diff --git a/Cargo.lock b/Cargo.lock index 1ad7167..8a01d56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,6 +848,7 @@ dependencies = [ "data-encoding", "errno", "gimli", + "glob", "inferno", "insta", "lazy_static", diff --git a/Cargo.toml b/Cargo.toml index cc428f2..188c2cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ rstest = "0.18.2" [build-dependencies] bindgen = "0.69.4" libbpf-cargo = "0.22.1" +glob = "0.3.1" [profile.dev.package] insta.opt-level = 3 diff --git a/build.rs b/build.rs index 4816bf5..a67a18d 100644 --- a/build.rs +++ b/build.rs @@ -3,6 +3,7 @@ extern crate bindgen; use std::env; use std::path::PathBuf; +use glob::glob; use libbpf_cargo::SkeletonBuilder; use std::path::Path; @@ -10,11 +11,17 @@ const PROFILER_BPF_HEADER: &str = "./src/bpf/profiler.h"; const PROFILER_BPF_SOURCE: &str = "./src/bpf/profiler.bpf.c"; const PROFILER_SKELETON: &str = "./src/bpf/profiler_skel.rs"; +const TRACERS_BPF_HEADER: &str = "./src/bpf/tracers.h"; +const TRACERS_BPF_SOURCE: &str = "./src/bpf/tracers.bpf.c"; +const TRACERS_SKELETON: &str = "./src/bpf/tracers_skel.rs"; + fn main() { // Inform cargo of when to re build - println!("cargo:rerun-if-changed={PROFILER_BPF_HEADER}"); - println!("cargo:rerun-if-changed={PROFILER_BPF_SOURCE}"); + for path in glob("src/bpf/*[hc]").unwrap().flatten() { + println!("cargo:rerun-if-changed={}", path.display()); + } + // Main native profiler. let bindings = bindgen::Builder::default() .derive_default(true) .header(PROFILER_BPF_HEADER) @@ -27,10 +34,30 @@ fn main() { .write_to_file(bindings_out_file) .expect("Couldn't write bindings!"); + // Tracers. + let bindings = bindgen::Builder::default() + .derive_default(true) + .header(TRACERS_BPF_HEADER) + .generate() + .expect("Unable to generate bindings"); + + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + let bindings_out_file = out_path.join("tracers_bindings.rs"); + bindings + .write_to_file(bindings_out_file) + .expect("Couldn't write bindings!"); + let skel = Path::new(PROFILER_SKELETON); SkeletonBuilder::new() .source(PROFILER_BPF_SOURCE) .clang_args("-Wextra -Wall") .build_and_generate(skel) .expect("run skeleton builder"); + + let skel = Path::new(TRACERS_SKELETON); + SkeletonBuilder::new() + .source(TRACERS_BPF_SOURCE) + .clang_args("-Wextra -Wall") + .build_and_generate(skel) + .expect("run skeleton builder"); } diff --git a/src/bpf/common.h b/src/bpf/constants.h similarity index 61% rename from src/bpf/common.h rename to src/bpf/constants.h index 4551519..0a77bcb 100644 --- a/src/bpf/common.h +++ b/src/bpf/constants.h @@ -1,5 +1,5 @@ -#ifndef __LINUX_PAGE_CONSTANTS_HACK__ -#define __LINUX_PAGE_CONSTANTS_HACK__ +#ifndef __LIGHTSWITCH_LINUX_PAGE_CONSTANTS__ +#define __LIGHTSWITCH_LINUX_PAGE_CONSTANTS__ // Values for x86_64 as of 6.0.18-200. #define TOP_OF_KERNEL_STACK_PADDING 0 @@ -10,9 +10,10 @@ #endif -#ifndef __ERROR_CONSTANTS_HACK__ -#define __ERROR_CONSTANTS_HACK__ +#ifndef __LIGHTSWITCH_ERROR_CONSTANTS__ +#define __LIGHTSWITCH_ERROR_CONSTANTS__ #define EFAULT 14 #define EEXIST 17 + #endif diff --git a/src/bpf/mod.rs b/src/bpf/mod.rs index d82ba9a..1e12629 100644 --- a/src/bpf/mod.rs +++ b/src/bpf/mod.rs @@ -1,2 +1,4 @@ pub mod profiler_bindings; pub mod profiler_skel; +pub mod tracers_bindings; +pub mod tracers_skel; diff --git a/src/bpf/profiler.bpf.c b/src/bpf/profiler.bpf.c index 2c2974d..a7be1e3 100644 --- a/src/bpf/profiler.bpf.c +++ b/src/bpf/profiler.bpf.c @@ -2,23 +2,17 @@ // Copyright 2022 The Parca Authors // Copyright 2024 The Lightswitch Authors -#include "common.h" +#include "constants.h" #include "vmlinux.h" #include "profiler.h" +#include "shared_maps.h" +#include "shared_helpers.h" #include #include #include #include -struct { - __uint(type, BPF_MAP_TYPE_LPM_TRIE); - __type(key, struct exec_mappings_key); - __type(value, mapping_t); - __uint(map_flags, BPF_F_NO_PREALLOC); - __uint(max_entries, MAX_PROCESSES * 200); -} exec_mappings SEC(".maps"); - struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 100000); @@ -69,14 +63,6 @@ struct { __type(value, stack_unwind_table_t); } unwind_tables SEC(".maps"); -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, u32); - __type(value, struct unwinder_stats_t); -} percpu_stats SEC(".maps"); - - struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, MAX_PROCESSES); @@ -84,33 +70,6 @@ struct { __type(value, bool); } rate_limits SEC(".maps"); -/*=========================== HELPER FUNCTIONS ==============================*/ - -#define DEFINE_COUNTER(__func__name) \ - static void bump_unwind_##__func__name() { \ - u32 zero = 0; \ - struct unwinder_stats_t *unwinder_stats = \ - bpf_map_lookup_elem(&percpu_stats, &zero); \ - if (unwinder_stats != NULL) { \ - unwinder_stats->__func__name++; \ - } \ - } - -DEFINE_COUNTER(total); -DEFINE_COUNTER(success_dwarf); -DEFINE_COUNTER(error_truncated); -DEFINE_COUNTER(error_unsupported_expression); -DEFINE_COUNTER(error_unsupported_frame_pointer_action); -DEFINE_COUNTER(error_unsupported_cfa_register); -DEFINE_COUNTER(error_catchall); -DEFINE_COUNTER(error_should_never_happen); -DEFINE_COUNTER(error_pc_not_covered); -DEFINE_COUNTER(error_mapping_not_found); -DEFINE_COUNTER(error_mapping_does_not_contain_pc); -DEFINE_COUNTER(error_chunk_not_found); -DEFINE_COUNTER(error_binary_search_exausted_iterations); -DEFINE_COUNTER(error_sending_new_process_event); -DEFINE_COUNTER(error_cfa_offset_did_not_fit); // Binary search the unwind table to find the row index containing the unwind // information for a given program counter (pc) relative to the object file. @@ -146,29 +105,6 @@ static __always_inline u64 find_offset_for_pc(stack_unwind_table_t *table, u64 p return BINARY_SEARCH_EXHAUSTED_ITERATIONS; } -static __always_inline mapping_t* find_mapping(int per_process_id, u64 pc) { - struct exec_mappings_key key = {}; - key.prefix_len = PREFIX_LEN; - key.pid = __builtin_bswap32((u32) per_process_id); - key.data = __builtin_bswap64(pc); - - mapping_t *mapping = bpf_map_lookup_elem(&exec_mappings, &key); - - if (mapping == NULL) { - LOG("[error] no mapping found for pc %llx", pc); - bump_unwind_error_mapping_not_found(); - return NULL; - } - - if (pc < mapping->begin || pc >= mapping->end) { - LOG("[error] pc %llx not contained within begin: %llx end: %llx", pc, mapping->begin, mapping->end); - bump_unwind_error_mapping_does_not_contain_pc(); - return NULL; - } - - return mapping; -} - // Finds the shard information for a given pid and program counter. Optionally, // and offset can be passed that will be filled in with the mapping's load // address. @@ -207,14 +143,6 @@ find_chunk(mapping_t *mapping, u64 object_relative_pc) { return NULL; } -static __always_inline bool process_is_known(int per_process_id) { - struct exec_mappings_key key = {}; - key.prefix_len = PREFIX_LEN; - key.pid = __builtin_bswap32((u32) per_process_id); - key.data = 0; - - return bpf_map_lookup_elem(&exec_mappings, &key) != NULL; -} static __always_inline void event_new_process(struct bpf_perf_event_data *ctx, int per_process_id) { Event event = { diff --git a/src/bpf/shared_helpers.h b/src/bpf/shared_helpers.h new file mode 100644 index 0000000..801ce4f --- /dev/null +++ b/src/bpf/shared_helpers.h @@ -0,0 +1,32 @@ + +static __always_inline mapping_t* find_mapping(int per_process_id, u64 pc) { + struct exec_mappings_key key = {}; + key.prefix_len = PREFIX_LEN; + key.pid = __builtin_bswap32((u32) per_process_id); + key.data = __builtin_bswap64(pc); + + mapping_t *mapping = bpf_map_lookup_elem(&exec_mappings, &key); + + if (mapping == NULL) { + LOG("[error] no mapping found for pc %llx", pc); + bump_unwind_error_mapping_not_found(); + return NULL; + } + + if (pc < mapping->begin || pc >= mapping->end) { + LOG("[error] pc %llx not contained within begin: %llx end: %llx", pc, mapping->begin, mapping->end); + bump_unwind_error_mapping_does_not_contain_pc(); + return NULL; + } + + return mapping; +} + +static __always_inline bool process_is_known(int per_process_id) { + struct exec_mappings_key key = {}; + key.prefix_len = PREFIX_LEN; + key.pid = __builtin_bswap32((u32) per_process_id); + key.data = 0; + + return bpf_map_lookup_elem(&exec_mappings, &key) != NULL; +} \ No newline at end of file diff --git a/src/bpf/shared_maps.h b/src/bpf/shared_maps.h new file mode 100644 index 0000000..c213d32 --- /dev/null +++ b/src/bpf/shared_maps.h @@ -0,0 +1,48 @@ +#ifndef __LIGHTSWITCH_SHARED_BPF_MAPS__ +#define __LIGHTSWITCH_SHARED_BPF_MAPS__ + +#include + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __type(key, struct exec_mappings_key); + __type(value, mapping_t); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, MAX_PROCESSES * 200); +} exec_mappings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct unwinder_stats_t); +} percpu_stats SEC(".maps"); + + +#define DEFINE_COUNTER(__func__name) \ + static void bump_unwind_##__func__name() { \ + u32 zero = 0; \ + struct unwinder_stats_t *unwinder_stats = \ + bpf_map_lookup_elem(&percpu_stats, &zero); \ + if (unwinder_stats != NULL) { \ + unwinder_stats->__func__name++; \ + } \ + } + +DEFINE_COUNTER(total); +DEFINE_COUNTER(success_dwarf); +DEFINE_COUNTER(error_truncated); +DEFINE_COUNTER(error_unsupported_expression); +DEFINE_COUNTER(error_unsupported_frame_pointer_action); +DEFINE_COUNTER(error_unsupported_cfa_register); +DEFINE_COUNTER(error_catchall); +DEFINE_COUNTER(error_should_never_happen); +DEFINE_COUNTER(error_pc_not_covered); +DEFINE_COUNTER(error_mapping_not_found); +DEFINE_COUNTER(error_mapping_does_not_contain_pc); +DEFINE_COUNTER(error_chunk_not_found); +DEFINE_COUNTER(error_binary_search_exausted_iterations); +DEFINE_COUNTER(error_sending_new_process_event); +DEFINE_COUNTER(error_cfa_offset_did_not_fit); + +#endif \ No newline at end of file diff --git a/src/bpf/tracers.bpf.c b/src/bpf/tracers.bpf.c new file mode 100644 index 0000000..4ec892f --- /dev/null +++ b/src/bpf/tracers.bpf.c @@ -0,0 +1,111 @@ +#include "vmlinux.h" +#include "profiler.h" +#include "shared_maps.h" +#include "shared_helpers.h" +#include "tracers.h" + +#include + +typedef struct { + u64 pid_tgid; +} mmap_data_key_t; + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); + __uint(max_entries, 0); +} tracer_events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 500); + __type(key, mmap_data_key_t); + __type(value, u64); +} tracked_munmap SEC(".maps"); + +// Arguments from +// /sys/kernel/debug/tracing/events/syscalls/sys_enter_munmap/format +struct munmap_entry_args { + unsigned short common_type; + unsigned char common_flags; + unsigned char common_preempt_count; + int common_pid; + int __syscall_nr; + unsigned long addr; + size_t len; +}; + + +SEC("tracepoint/sched/sched_process_exit") +int tracer_process_exit(void *ctx) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + int per_process_id = pid_tgid >> 32; + + if (!process_is_known(per_process_id)) { + return 0; + } + + tracer_event_t event = { + .type = TRACER_EVENT_TYPE_PROCESS_EXIT, + .pid = bpf_get_current_pid_tgid() >> 32, + .start_address = 0, + }; + + if (bpf_perf_event_output(ctx, &tracer_events, BPF_F_CURRENT_CPU, &event, sizeof(tracer_event_t)) < 0) { + LOG("[error] failed to send process exit tracer event"); + } +} + +SEC("tracepoint/syscalls/sys_enter_munmap") +int tracer_enter_munmap(struct munmap_entry_args *args) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + int per_process_id = pid_tgid >> 32; + u64 start_address = args->addr; + + // We might not know about some mappings, but also we definitely don't want to notify + // of non-executable mappings being unmapped. + if (find_mapping(per_process_id, start_address) == NULL){ + return 0; + } + + mmap_data_key_t key = { + .pid_tgid = bpf_get_current_pid_tgid(), + }; + bpf_map_update_elem(&tracked_munmap, &key, &start_address, BPF_ANY); + + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_munmap") +int tracer_exit_munmap(struct trace_event_raw_sys_exit *ctx) { + mmap_data_key_t key = { + .pid_tgid = bpf_get_current_pid_tgid(), + }; + + u64 *start_address = bpf_map_lookup_elem(&tracked_munmap, &key); + if (start_address == NULL) { + return 0; + } + + int ret = ctx->ret; + if (ret != 0) { + return 0; + } + + LOG("[debug] sending munmap event"); + + tracer_event_t event = { + .type = TRACER_EVENT_TYPE_MUNMAP, + .pid = bpf_get_current_pid_tgid() >> 32, + .start_address = *start_address, + }; + + if (bpf_perf_event_output(ctx, &tracer_events, BPF_F_CURRENT_CPU, &event, sizeof(tracer_event_t)) < 0) { + LOG("[error] failed to send munmap tracer event"); + } + + return 0; +} + +char LICENSE[] SEC("license") = "Dual MIT/GPL"; diff --git a/src/bpf/tracers.h b/src/bpf/tracers.h new file mode 100644 index 0000000..e57ac02 --- /dev/null +++ b/src/bpf/tracers.h @@ -0,0 +1,12 @@ +#include "basic_types.h" + +enum tracer_event_type { + TRACER_EVENT_TYPE_PROCESS_EXIT = 1, + TRACER_EVENT_TYPE_MUNMAP = 2, +}; + +typedef struct { + u32 type; + int pid; + u64 start_address; +} tracer_event_t; \ No newline at end of file diff --git a/src/bpf/tracers_bindings.rs b/src/bpf/tracers_bindings.rs new file mode 100644 index 0000000..55e9deb --- /dev/null +++ b/src/bpf/tracers_bindings.rs @@ -0,0 +1,22 @@ +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] + +use crate::profiler::TracerEvent; +use plain::Plain; +include!(concat!(env!("OUT_DIR"), "/tracers_bindings.rs")); + +unsafe impl Plain for tracer_event_t {} + +impl From for TracerEvent { + fn from(event: tracer_event_t) -> Self { + match event.type_ { + tracer_event_type_TRACER_EVENT_TYPE_PROCESS_EXIT => TracerEvent::ProcessExit(event.pid), + tracer_event_type_TRACER_EVENT_TYPE_MUNMAP => { + TracerEvent::Munmap(event.pid, event.start_address) + } + _ => { + panic!("invalid event type {}, should never happen", event.type_); + } + } + } +} diff --git a/src/profiler.rs b/src/profiler.rs index 34a0b06..72db401 100644 --- a/src/profiler.rs +++ b/src/profiler.rs @@ -16,12 +16,19 @@ use tracing::{debug, error, info, span, warn, Level}; use crate::bpf::profiler_bindings::*; use crate::bpf::profiler_skel::{ProfilerSkel, ProfilerSkelBuilder}; +use crate::bpf::tracers_bindings::*; +use crate::bpf::tracers_skel::{TracersSkel, TracersSkelBuilder}; use crate::collector::*; use crate::object::{BuildId, ObjectFile}; use crate::perf_events::setup_perf_event; use crate::unwind_info::{in_memory_unwind_info, remove_redundant, remove_unnecesary_markers}; use crate::util::summarize_address_range; +pub enum TracerEvent { + ProcessExit(i32), + Munmap(i32, u64), +} + // Some temporary data structures to get things going, this could use lots of // improvements #[derive(Debug, Clone)] @@ -31,8 +38,15 @@ pub enum MappingType { Vdso, } +#[derive(Clone)] +pub enum ProcessStatus { + Running, + Exited, +} + #[derive(Clone)] pub struct ProcessInfo { + pub status: ProcessStatus, pub mappings: Vec, } @@ -78,12 +92,16 @@ pub struct Profiler<'bpf> { // Prevent the links from being removed _links: Vec, bpf: ProfilerSkel<'bpf>, + tracers: TracersSkel<'bpf>, // Profiler state procs: Arc>>, object_files: Arc>>, - // Channel for bpf events,. + // Channel for new process events. chan_send: Arc>>, chan_receive: Arc>>, + // Channel for munmaps events. + tracers_chan_send: Arc>>, + tracers_chan_receive: Arc>>, // Native unwinding state native_unwind_state: NativeUnwindState, // Debug options @@ -139,6 +157,18 @@ impl Profiler<'_> { skel_builder.obj_builder.debug(bpf_debug); let open_skel = skel_builder.open().expect("open skel"); let bpf = open_skel.load().expect("load skel"); + let exec_mappings_fd = bpf.maps().exec_mappings().as_fd().as_raw_fd(); + + let mut tracers_builder = TracersSkelBuilder::default(); + tracers_builder.obj_builder.debug(bpf_debug); + let open_tracers = tracers_builder.open().expect("open skel"); + open_tracers + .maps() + .exec_mappings() + .reuse_fd(exec_mappings_fd) + .expect("reuse exec_mappings"); + + let tracers = open_tracers.load().expect("load skel"); let procs = Arc::new(Mutex::new(HashMap::new())); let object_files = Arc::new(Mutex::new(HashMap::new())); @@ -147,6 +177,10 @@ impl Profiler<'_> { let chan_send = Arc::new(Mutex::new(sender)); let chan_receive = Arc::new(Mutex::new(receiver)); + let (sender, receiver) = mpsc::channel(); + let tracers_chan_send = Arc::new(Mutex::new(sender)); + let tracers_chan_receive = Arc::new(Mutex::new(receiver)); + let live_shard = Vec::with_capacity(SHARD_CAPACITY); let build_id_to_executable_id = HashMap::new(); let shard_index = 0; @@ -168,10 +202,13 @@ impl Profiler<'_> { Profiler { _links: Vec::new(), bpf, + tracers, procs, object_files, chan_send, chan_receive, + tracers_chan_send, + tracers_chan_receive, native_unwind_state, filter_pids, profile_send, @@ -207,11 +244,14 @@ impl Profiler<'_> { self.setup_perf_events(); self.set_bpf_map_info(); + self.tracers.attach().expect("attach tracers"); + + // New process events. let chan_send = self.chan_send.clone(); let perf_buffer = PerfBufferBuilder::new(self.bpf.maps().events()) .pages(PERF_BUFFER_BYTES / page_size::get()) - .sample_cb(move |cpu: i32, data: &[u8]| { - Self::handle_event(&chan_send, cpu, data); + .sample_cb(move |_cpu: i32, data: &[u8]| { + Self::handle_event(&chan_send, data); }) .lost_cb(Self::handle_lost_events) .build() @@ -221,6 +261,32 @@ impl Profiler<'_> { perf_buffer.poll(Duration::from_millis(100)).expect("poll"); }); + // Trace events are received here, such memory unmaps. + let tracers_send = self.tracers_chan_send.clone(); + let tracers_events_perf_buffer = + PerfBufferBuilder::new(self.tracers.maps().tracer_events()) + .pages(PERF_BUFFER_BYTES / page_size::get()) + .sample_cb(move |_cpu: i32, data: &[u8]| { + let mut event = tracer_event_t::default(); + plain::copy_from_bytes(&mut event, data).expect("serde tracers event"); + tracers_send + .lock() + .expect("sender lock") + .send(TracerEvent::from(event)) + .expect("handle event send"); + }) + .lost_cb(|_cpu, lost_count| { + warn!("lost {} events from the tracers", lost_count); + }) + .build() + .unwrap(); + + let _tracers_poll_thread = thread::spawn(move || loop { + tracers_events_perf_buffer + .poll(Duration::from_millis(100)) + .expect("poll"); + }); + let profile_receive = self.profile_receive.clone(); let procs = self.procs.clone(); let object_files = self.object_files.clone(); @@ -259,41 +325,84 @@ impl Profiler<'_> { time_since_last_scheduled_collection = Instant::now(); } - let read = self.chan_receive.lock().expect("receive lock").try_recv(); + let read = self + .tracers_chan_receive + .lock() + .expect("receive lock") + .recv_timeout(Duration::from_millis(50)); match read { - Ok(event) => { - let pid = event.pid; + Ok(TracerEvent::Munmap(pid, start_address)) => { + self.handle_unmap(pid, start_address); + } + Ok(TracerEvent::ProcessExit(pid)) => { + self.handle_process_exit(pid); + } + Err(_) => {} + } - if event.type_ == event_type_EVENT_NEW_PROCESS { - // let span = span!(Level::DEBUG, "calling event_new_proc").entered(); - self.event_new_proc(pid); + let read = self + .chan_receive + .lock() + .expect("receive lock") + .recv_timeout(Duration::from_millis(150)); - //let mut pname = "".to_string(); - /* if let Ok(proc) = procfs::process::Process::new(pid) { - if let Ok(name) = proc.cmdline() { - pname = name.join("").to_string(); - } - } */ - } else { - error!("unknown event {}", event.type_); - } - } - Err(_) => { - // todo + if let Ok(event) = read { + if event.type_ == event_type_EVENT_NEW_PROCESS { + self.event_new_proc(event.pid); + } else { + error!("unknown event type {}", event.type_); } } if self.native_unwind_state.dirty && self.native_unwind_state.last_persisted.elapsed() > Duration::from_millis(100) { - self.persist_unwind_info(&self.native_unwind_state.live_shard); - self.native_unwind_state.dirty = false; + if self.persist_unwind_info(&self.native_unwind_state.live_shard) { + self.native_unwind_state.dirty = false; + } self.native_unwind_state.last_persisted = Instant::now(); } } } + pub fn handle_process_exit(&self, pid: i32) { + let mut procs = self.procs.lock().expect("lock"); + + match procs.get_mut(&pid) { + Some(proc_info) => { + debug!("marking process {} as exited", pid); + proc_info.status = ProcessStatus::Exited; + } + None => { + debug!("could not find process {} while marking as exited", pid); + } + } + } + + pub fn handle_unmap(&self, pid: i32, start_address: u64) { + let procs = self.procs.lock().expect("lock"); + + match procs.get(&pid) { + Some(proc_info) => { + for mapping in &proc_info.mappings { + if mapping.start_addr <= start_address && start_address <= mapping.end_addr { + debug!("found memory mapping {:x} for {} while handling munmap, not doing anything", start_address, pid); + return; + } + } + + debug!( + "could not find memory mapping {:x} for {} while handling munmap", + start_address, pid + ); + } + None => { + debug!("could not find {} while handling munmap", pid); + } + } + } + /// Clears a BPF map in a iterator-stable way. pub fn clear_map(&mut self, name: &str) { let map = self.bpf.object().map(name).expect("map exists"); @@ -448,7 +557,7 @@ impl Profiler<'_> { self.procs.lock().expect("lock").get(&pid).is_some() } - fn persist_unwind_info(&self, live_shard: &Vec) { + fn persist_unwind_info(&self, live_shard: &Vec) -> bool { let _span = span!(Level::DEBUG, "persist_unwind_info").entered(); let key = self.native_unwind_state.shard_index.to_ne_bytes(); @@ -460,11 +569,21 @@ impl Profiler<'_> { ) }; - self.bpf + match self + .bpf .maps() .unwind_tables() .update(&key, val, MapFlags::ANY) - .expect("update"); // error with value: System(7)', src/main.rs:663:26 + { + Ok(_) => { + debug!("unwind info persisted succesfully"); + true + } + Err(e) => { + warn!("failed to persist unwind info with {:?}", e); + false + } + } } fn add_bpf_mapping( @@ -486,9 +605,7 @@ impl Profiler<'_> { // Local unwind info state let mut mappings = Vec::with_capacity(MAX_MAPPINGS_PER_PROCESS as usize); - - // hack for kworkers and such - let mut got_some_unwind_info: bool = true; + let mut have_unwind_info = false; // Get unwind info for mapping in self @@ -638,11 +755,7 @@ impl Profiler<'_> { self.native_unwind_state.build_id_to_executable_id.len(), ); - // no unwind info / errors - if found_unwind_info.is_empty() { - got_some_unwind_info = false; - break; - } + have_unwind_info = !found_unwind_info.is_empty(); let first_pc = found_unwind_info[0].pc; let last_pc = found_unwind_info[found_unwind_info.len() - 1].pc; @@ -697,7 +810,9 @@ impl Profiler<'_> { if available_space == 0 { info!("no space in live shard, allocating a new one"); - self.persist_unwind_info(&self.native_unwind_state.live_shard); + if self.persist_unwind_info(&self.native_unwind_state.live_shard) { + self.native_unwind_state.dirty = false; + } self.native_unwind_state.live_shard.truncate(0); self.native_unwind_state.shard_index += 1; continue; @@ -775,7 +890,7 @@ impl Profiler<'_> { .insert(build_id, executable_id as u32); } // Added all mappings - if got_some_unwind_info { + if have_unwind_info { self.native_unwind_state.dirty = true; // Add entry just with the pid to signal processes that we already know about. @@ -938,7 +1053,10 @@ impl Profiler<'_> { } mappings.sort_by_key(|k| k.start_addr.cmp(&k.start_addr)); - let proc_info = ProcessInfo { mappings }; + let proc_info = ProcessInfo { + status: ProcessStatus::Running, + mappings, + }; self.procs .clone() .lock() @@ -948,7 +1066,7 @@ impl Profiler<'_> { Ok(()) } - fn handle_event(sender: &Arc>>, _cpu: i32, data: &[u8]) { + fn handle_event(sender: &Arc>>, data: &[u8]) { let event = plain::from_bytes(data).expect("handle event serde"); sender .lock()