Skip to content

Commit

Permalink
Rework known process detection
Browse files Browse the repository at this point in the history
We don't need a separate map anymore. As BPF tracing programs require
hash maps to be preallocated, this will save some memory.

Something we might want to do in the future is tune the maximum entries
in the mappings BPF map.

Additionally, this also cleans up some of the implementations for the
BPF bindings. I love bindgen!!

Test Plan
=========

Ran locally without any issues or errors.
  • Loading branch information
javierhonduco committed Apr 15, 2024
1 parent 0a13d07 commit b956408
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 100 deletions.
1 change: 1 addition & 0 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ fn main() {
println!("cargo:rerun-if-changed={PROFILER_BPF_SOURCE}");

let bindings = bindgen::Builder::default()
.derive_default(true)
.header(PROFILER_BPF_HEADER)
.generate()
.expect("Unable to generate bindings");
Expand Down
57 changes: 18 additions & 39 deletions src/bpf/profiler.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,6 @@ struct {
__uint(max_entries, 0);
} events SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_PROCESSES);
__type(key, int);
__type(value, process_info_t);
} process_info SEC(".maps");

// Mapping of executable ID to unwind info chunks.
struct {
__uint(type, BPF_MAP_TYPE_HASH);
Expand Down Expand Up @@ -271,6 +264,15 @@ find_unwind_table(chunk_info_t **chunk_info, pid_t per_process_id, u64 pc, u64 *
return FIND_UNWIND_CHUNK_NOT_FOUND;
}

static __always_inline bool process_is_known(int per_process_id) {
struct exec_mappings_key key = {};
key.prefix_len = PREFIX_LEN;
key.pid = __builtin_bswap32((u32) per_process_id);
key.data = 0;

return bpf_map_lookup_elem(&exec_mappings, &key) != NULL;
}

// Kernel addresses have the top bits set.
static __always_inline bool in_kernel(u64 ip) { return ip & (1UL << 63); }

Expand Down Expand Up @@ -577,22 +579,14 @@ int dwarf_unwind(struct bpf_perf_event_data *ctx) {
bpf_probe_read_user(&previous_rip, 8, (void *)(previous_rip_addr));

if (previous_rip == 0) {
process_info_t *proc_info = bpf_map_lookup_elem(&process_info, &per_process_id);
if (proc_info == NULL) {
LOG("[error] should never happen");
return 1;
}

if (proc_info->is_jit_compiler) {
LOG("[info] rip=0, Section not added, yet");
bump_unwind_error_jit();
return 1;
if (err == 0) {
LOG("[warn] previous_rip=0, maybe this is a JIT segment?");
} else {
LOG("[error] previous_rip should not be zero. This can mean that the "
"read failed, ret=%d while reading @ %llx.",
err, previous_rip_addr);
bump_unwind_error_catchall();
}

LOG("[error] previous_rip should not be zero. This can mean that the "
"read failed, ret=%d while reading @ %llx.",
err, previous_rip_addr);
bump_unwind_error_catchall();
return 1;
}

Expand Down Expand Up @@ -644,20 +638,7 @@ int dwarf_unwind(struct bpf_perf_event_data *ctx) {
add_stack(ctx, pid_tgid, unwind_state);
bump_unwind_success_dwarf();
} else {
process_info_t *proc_info =
bpf_map_lookup_elem(&process_info, &per_process_id);
if (proc_info == NULL) {
LOG("[error] should never happen 610");
return 1;
}

if (proc_info->is_jit_compiler) {
LOG("[info] Section not added, yet");
bump_unwind_error_jit();
return 1;
}

LOG("[error] Could not find unwind table and rbp != 0 (%llx). New "
LOG("[error] Could not find unwind table and rbp != 0 (%llx). Unknown "
"mapping?",
unwind_state->bp);
// request_refresh_process_info(ctx, user_pid);
Expand Down Expand Up @@ -748,9 +729,7 @@ int on_event(struct bpf_perf_event_data *ctx) {
profiler_state->stack_key.user_stack_id = 0;
profiler_state->stack_key.kernel_stack_id = 0;

process_info_t *proc_info =
bpf_map_lookup_elem(&process_info, &per_process_id);
if (proc_info) {
if (process_is_known(per_process_id)) {
bpf_printk("== unwinding with per_process_id: %d", per_process_id);
bump_samples();

Expand Down
7 changes: 0 additions & 7 deletions src/bpf/profiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,13 +144,6 @@ struct exec_mappings_key {
// Prefix size in bits, excluding the prefix length.
#define PREFIX_LEN (sizeof(struct exec_mappings_key) - sizeof(u32)) * 8;

// Executable mappings for a process.
typedef struct {
u32 is_jit_compiler;
//u32 len;
// mapping_t mappings[MAX_MAPPINGS_PER_PROCESS];
} process_info_t;

// A row in the stack unwinding table for x86_64.
typedef struct __attribute__((packed)) {
u64 pc;
Expand Down
33 changes: 1 addition & 32 deletions src/bpf/profiler_bindings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ include!(concat!(env!("OUT_DIR"), "/profiler_bindings.rs"));
unsafe impl Plain for stack_count_key_t {}
unsafe impl Plain for native_stack_t {}
unsafe impl Plain for Event {}
unsafe impl Plain for process_info_t {}
unsafe impl Plain for unwind_info_chunks_t {}
unsafe impl Plain for unwinder_stats_t {}
unsafe impl Plain for exec_mappings_key {}
Expand Down Expand Up @@ -47,34 +46,4 @@ impl Add for unwinder_stats_t {
error_jit: self.error_jit + other.error_jit,
}
}
}

#[allow(clippy::derivable_impls)]
impl Default for unwinder_stats_t {
fn default() -> Self {
Self {
total: 0,
success_dwarf: 0,
error_truncated: 0,
error_unsupported_expression: 0,
error_unsupported_frame_pointer_action: 0,
error_unsupported_cfa_register: 0,
error_catchall: 0,
error_should_never_happen: 0,
error_pc_not_covered: 0,
error_jit: 0,
}
}
}

#[allow(clippy::derivable_impls)]
impl Default for stack_count_key_t {
fn default() -> Self {
Self {
task_id: 0,
pid: 0,
user_stack_id: 0,
kernel_stack_id: 0,
}
}
}
}
44 changes: 22 additions & 22 deletions src/profiler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,18 @@ impl Profiler<'_> {
.expect("update"); // error with value: System(7)', src/main.rs:663:26
}

fn add_bpf_mapping(
&mut self,
key: &exec_mappings_key,
value: &mapping_t,
) -> Result<(), libbpf_rs::Error> {
self.bpf.maps().exec_mappings().update(
unsafe { plain::as_bytes(key) },
unsafe { plain::as_bytes(value) },
MapFlags::ANY,
)
}

fn add_unwind_info(&mut self, pid: i32) {
if !self.process_is_known(pid) {
panic!("add_unwind_info -- expected process to be known");
Expand Down Expand Up @@ -759,6 +771,14 @@ impl Profiler<'_> {
if got_some_unwind_info {
self.native_unwind_state.dirty = true;

// Add entry just with the pid to signal processes that we already know about.
let key = exec_mappings_key::new(
pid.try_into().unwrap(),
0x0,
32, // pid bits
);
self.add_bpf_mapping(&key, &mapping_t::default()).unwrap();

// Add process info
for mapping in mappings {
for address_range in summarize_address_range(mapping.begin, mapping.end - 1) {
Expand All @@ -767,30 +787,10 @@ impl Profiler<'_> {
address_range.addr,
address_range.range,
);
let value = mapping;

self.bpf
.maps()
.exec_mappings()
.update(
unsafe { plain::as_bytes(&key) },
unsafe { plain::as_bytes(&value) },
MapFlags::ANY,
)
.unwrap();

self.add_bpf_mapping(&key, &mapping).unwrap();
}
}

let proc_info = process_info_t { is_jit_compiler: 0 };
self.bpf
.maps()
.process_info()
.update(
&pid.to_ne_bytes(),
unsafe { plain::as_bytes(&proc_info) },
MapFlags::ANY,
)
.unwrap();
}
}

Expand Down

0 comments on commit b956408

Please sign in to comment.