-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathftrace_helper.h
255 lines (215 loc) · 5.77 KB
/
ftrace_helper.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
/*
* Hooking kernel functions using ftrace framework
*
* Copyright (c) 2018 ilammy
*/
#pragma once
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/version.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/container_of.h>
#include "stdlib.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)
static unsigned long lookup_name(const char *name)
{
struct kprobe kp = {
.symbol_name = name
};
unsigned long retval;
if (register_kprobe(&kp) < 0) return 0;
retval = (unsigned long) kp.addr;
unregister_kprobe(&kp);
return retval;
}
#else
static unsigned long lookup_name(const char *name)
{
return kallsyms_lookup_name(name);
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
#define FTRACE_OPS_FL_RECURSION FTRACE_OPS_FL_RECURSION_SAFE
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
#define ftrace_regs pt_regs
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
{
return fregs;
}
#endif
/*
* There are two ways of preventing vicious recursive loops when hooking:
* - detect recusion using function return address (USE_FENTRY_OFFSET = 0)
* - avoid recusion by jumping over the ftrace call (USE_FENTRY_OFFSET = 1)
*/
#define USE_FENTRY_OFFSET 0
/**
* struct ftrace_hook - describes a single hook to install
*
* @name: name of the function to hook
*
* @function: pointer to the function to execute instead
*
* @original: pointer to the location where to save a pointer
* to the original function
*
* @address: kernel address of the function entry
*
* @ops: ftrace_ops state for this function hook
*
* The user should fill in only &name, &hook, &orig fields.
* Other fields are considered implementation details.
*/
struct ftrace_hook {
const char *name;
void *function;
void *original;
unsigned long address;
struct ftrace_ops ops;
};
static int fh_resolve_hook_address(struct ftrace_hook *hook)
{
hook->address = lookup_name(hook->name);
if (!hook->address) {
//rk_info("unresolved symbol: %s\n", hook->name);
return -ENOENT;
}
#if USE_FENTRY_OFFSET
*((unsigned long*) hook->original) = hook->address + MCOUNT_INSN_SIZE;
#else
*((unsigned long*) hook->original) = hook->address;
#endif
return 0;
}
static void notrace fh_ftrace_thunk(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
struct ftrace_hook *hook = container_of(ops, struct ftrace_hook, ops);
#if USE_FENTRY_OFFSET
regs->ip = (unsigned long)hook->function;
#else
if (!within_module(parent_ip, THIS_MODULE))
regs->ip = (unsigned long)hook->function;
#endif
}
/**
* fh_install_hooks() - register and enable a single hook
* @hook: a hook to install
*
* Returns: zero on success, negative error code otherwise.
*/
int fh_install_hook(struct ftrace_hook *hook)
{
int err;
err = fh_resolve_hook_address(hook);
if (err)
return err;
/*
* We're going to modify %rip register so we'll need IPMODIFY flag
* and SAVE_REGS as its prerequisite. ftrace's anti-recursion guard
* is useless if we change %rip so disable it with RECURSION.
* We'll perform our own checks for trace function reentry.
*/
hook->ops.func = fh_ftrace_thunk;
hook->ops.flags = FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_RECURSION
| FTRACE_OPS_FL_IPMODIFY;
err = ftrace_set_filter_ip(&hook->ops, hook->address, 0, 0);
if (err) {
//rk_info("ftrace_set_filter_ip() failed: %d\n", err);
return err;
}
err = register_ftrace_function(&hook->ops);
if (err) {
//rk_info("register_ftrace_function() failed: %d\n", err);
ftrace_set_filter_ip(&hook->ops, hook->address, 1, 0);
return err;
}
printk(KERN_DEBUG "installed hook on %s\n", hook->name);
return 0;
}
/**
* fh_remove_hooks() - disable and unregister a single hook
* @hook: a hook to remove
*/
void fh_remove_hook(struct ftrace_hook *hook)
{
int err;
err = unregister_ftrace_function(&hook->ops);
if (err) {
//rk_info("unregister_ftrace_function() failed: %d\n", err);
}
err = ftrace_set_filter_ip(&hook->ops, hook->address, 1, 0);
if (err) {
//rk_info("ftrace_set_filter_ip() failed: %d\n", err);
}
}
/**
* fh_install_hooks() - register and enable multiple hooks
* @hooks: array of hooks to install
* @count: number of hooks to install
*
* If some hooks fail to install then all hooks will be removed.
*
* Returns: zero on success, negative error code otherwise.
*/
int fh_install_hooks(struct ftrace_hook *hooks, size_t count)
{
int err;
size_t i;
for (i = 0; i < count; i++) {
err = fh_install_hook(&hooks[i]);
if (err)
goto error;
}
return 0;
error:
while (i != 0) {
fh_remove_hook(&hooks[--i]);
}
return err;
}
/**
* fh_remove_hooks() - disable and unregister multiple hooks
* @hooks: array of hooks to remove
* @count: number of hooks to remove
*/
void fh_remove_hooks(struct ftrace_hook *hooks, size_t count)
{
size_t i;
for (i = 0; i < count; i++)
fh_remove_hook(&hooks[i]);
}
#ifndef CONFIG_X86_64
#error Currently only x86_64 architecture is supported
#endif
#if defined(CONFIG_X86_64) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0))
#define PTREGS_SYSCALL_STUBS 1
#endif
/*
* Tail call optimization can interfere with recursion detection based on
* return address on the stack. Disable it to avoid machine hangups.
*/
#if !USE_FENTRY_OFFSET
#pragma GCC optimize("-fno-optimize-sibling-calls")
#endif
#ifdef PTREGS_SYSCALL_STUBS
#define SYSCALL_NAME(name) ("__x64_" name)
#else
#define SYSCALL_NAME(name) (name)
#endif
#define HOOK(_name, _function, _original) \
{ \
.name = SYSCALL_NAME(_name), \
.function = (_function), \
.original = (_original), \
}
#define HOOK_NOSYS(_name, _function, _original) \
{ \
.name = (_name), \
.function = (_function), \
.original = (_original), \
}