-
Notifications
You must be signed in to change notification settings - Fork 643
/
Copy pathprofile.bpf.c
126 lines (109 loc) · 3.28 KB
/
profile.bpf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// SPDX-License-Identifier: GPL-2.0-only
#include "vmlinux.h"
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#include "profile.bpf.h"
#include "pid.h"
#include "ume.h"
#define PF_KTHREAD 0x00200000
struct global_config_t {
uint64_t ns_pid_ino;
};
const volatile struct global_config_t global_config;
SEC("perf_event")
int do_perf_event(struct bpf_perf_event_data *ctx) {
u32 tgid = 0;
current_pid(global_config.ns_pid_ino, &tgid);
struct sample_key key = {};
u32 *val, one = 1;
struct task_struct *task = (struct task_struct *)bpf_get_current_task();
if (tgid == 0 || task == 0) {
return 0;
}
int flags = 0;
if (pyro_bpf_core_read(&flags, sizeof(flags), &task->flags)) {
bpf_dbg_printk("failed to read task->flags\n");
return 0;
}
if (flags & PF_KTHREAD) {
bpf_dbg_printk("skipping kthread %d\n", tgid);
return 0;
}
struct pid_config *config = bpf_map_lookup_elem(&pids, &tgid);
if (config == NULL) {
struct pid_config unknown = {
.type = PROFILING_TYPE_UNKNOWN,
.collect_kernel = 0,
.collect_user = 0,
.padding_ = 0
};
if (bpf_map_update_elem(&pids, &tgid, &unknown, BPF_NOEXIST)) {
bpf_dbg_printk("failed to update pids map. probably concurrent update\n");
return 0;
}
struct pid_event event = {
.op = OP_REQUEST_UNKNOWN_PROCESS_INFO,
.pid = tgid
};
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
return 0;
}
if (config->type == PROFILING_TYPE_ERROR || config->type == PROFILING_TYPE_UNKNOWN) {
return 0;
}
if (config->type == PROFILING_TYPE_PYTHON) {
bpf_tail_call(ctx, &progs, PROG_IDX_PYTHON);
return 0;
}
if (config->type == PROFILING_TYPE_FRAMEPOINTERS) {
key.pid = tgid;
key.kern_stack = -1;
key.user_stack = -1;
if (config->collect_kernel) {
key.kern_stack = bpf_get_stackid(ctx, &stacks, KERN_STACKID_FLAGS);
}
if (config->collect_user) {
key.user_stack = bpf_get_stackid(ctx, &stacks, USER_STACKID_FLAGS);
}
val = bpf_map_lookup_elem(&counts, &key);
if (val)
(*val)++;
else
bpf_map_update_elem(&counts, &key, &one, BPF_NOEXIST);
}
return 0;
}
SEC("kprobe/disassociate_ctty")
int BPF_KPROBE(disassociate_ctty, int on_exit) {
if (!on_exit) {
return 0;
}
u32 pid = 0;
current_pid(global_config.ns_pid_ino, &pid);
if (pid == 0) {
return 0;
}
bpf_map_delete_elem(&pids, &pid);
struct pid_event event = {
.op = OP_PID_DEAD,
.pid = pid
};
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
return 0;
}
// execve/execveat
SEC("kprobe/exec")
int BPF_KPROBE(exec, void *_) {
u32 pid = 0;
current_pid(global_config.ns_pid_ino, &pid);
if (pid == 0) {
return 0;
}
struct pid_event event = {
.op = OP_REQUEST_EXEC_PROCESS_INFO,
.pid = pid
};
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
return 0;
}
char _license[] SEC("license") = "GPL";