diff --git a/hbt/src/perf_event/BPerfEventsGroup.cpp b/hbt/src/perf_event/BPerfEventsGroup.cpp index e803f1eb..52a6c89d 100644 --- a/hbt/src/perf_event/BPerfEventsGroup.cpp +++ b/hbt/src/perf_event/BPerfEventsGroup.cpp @@ -263,7 +263,7 @@ int BPerfEventsGroup::syncCpu_(__u32 cpu, int leader_fd) { return ::bpf_prog_test_run_opts(leader_fd, &opts); } -void BPerfEventsGroup::syncGlobal_() { +void BPerfEventsGroup::syncGlobal_() const { int err; for (int cpu = 0; cpu < cpu_cnt_; cpu++) { @@ -497,7 +497,7 @@ int BPerfEventsGroup::preparePerThreadBPerf_(bperf_leader_cgroup* skel) { std::vector BPerfEventsGroup::readFromBpf_( int fd, - __u64 id) { + __u64 id) const { std::vector values( (size_t)cpu_cnt_ * BPERF_MAX_GROUP_SIZE); if (!enabled_) { @@ -518,7 +518,7 @@ int BPerfEventsGroup::read( struct bpf_perf_event_value* output, int fd, __u64 id, - bool skip_offset) { + bool skip_offset) const { auto event_cnt = confs_.size(); auto values = readFromBpf_(fd, id); @@ -550,7 +550,7 @@ int BPerfEventsGroup::readPerCpu( int, std::array>& output, int fd, - __u64 id) { + __u64 id) const { auto event_cnt = confs_.size(); auto values = readFromBpf_(fd, id); @@ -569,11 +569,11 @@ int BPerfEventsGroup::readPerCpu( int BPerfEventsGroup::readGlobal( struct bpf_perf_event_value* output, - bool skip_offset) { + bool skip_offset) const { return read(output, global_output_fd_, 0, skip_offset); } -bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) { +bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) const { const auto num_events = rv.getNumEvents(); HBT_ARG_CHECK_EQ(confs_.size(), rv.getNumEvents()); @@ -589,11 +589,11 @@ bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) { int BPerfEventsGroup::readGlobalPerCpu( std:: map>& - output) { + output) const { return readPerCpu(output, global_output_fd_, 0); } -bool BPerfEventsGroup::readGlobalPerCpu(std::map& rv) { +bool BPerfEventsGroup::readGlobalPerCpu(std::map& rv) const { std::map> output; int numEvents = readGlobalPerCpu(output); @@ -604,13 +604,12 @@ bool BPerfEventsGroup::readGlobalPerCpu(std::map& rv) { return numEvents > 0; } -int BPerfEventsGroup::readCgroup( - struct bpf_perf_event_value* output, - __u64 id) { +int BPerfEventsGroup::readCgroup(struct bpf_perf_event_value* output, __u64 id) + const { return read(output, cgroup_output_fd_, id, true /* skip_offset */); } -bool BPerfEventsGroup::readCgroup(ReadValues& rv, __u64 id) { +bool BPerfEventsGroup::readCgroup(ReadValues& rv, __u64 id) const { const auto num_events = rv.getNumEvents(); HBT_ARG_CHECK_EQ(confs_.size(), rv.getNumEvents()); diff --git a/hbt/src/perf_event/BPerfEventsGroup.h b/hbt/src/perf_event/BPerfEventsGroup.h index 0f4530eb..85500d8e 100644 --- a/hbt/src/perf_event/BPerfEventsGroup.h +++ b/hbt/src/perf_event/BPerfEventsGroup.h @@ -70,17 +70,18 @@ class BPerfEventsGroup { bool removeCgroup(__u64 id); // eBPF like interface to read counters from all CPUs and accumulate them. - int readGlobal(struct bpf_perf_event_value* output, bool skip_offset = false); - bool readGlobal(ReadValues& rv, bool skip_offset = false); + int readGlobal(struct bpf_perf_event_value* output, bool skip_offset = false) + const; + bool readGlobal(ReadValues& rv, bool skip_offset = false) const; int readGlobalPerCpu( std::map< int, std::array>& - output); - bool readGlobalPerCpu(std::map& rv); + output) const; + bool readGlobalPerCpu(std::map& rv) const; - int readCgroup(struct bpf_perf_event_value* output, __u64 id); - bool readCgroup(ReadValues& rv, __u64 id); + int readCgroup(struct bpf_perf_event_value* output, __u64 id) const; + bool readCgroup(ReadValues& rv, __u64 id) const; protected: const EventConfs confs_; @@ -128,13 +129,13 @@ class BPerfEventsGroup { // read(&offset_); struct bpf_perf_event_value offsets_[BPERF_MAX_GROUP_SIZE]; - std::vector readFromBpf_(int fd, __u64 id); + std::vector readFromBpf_(int fd, __u64 id) const; int read( struct bpf_perf_event_value* output, int fd, __u64 id, - bool skip_offset = false); + bool skip_offset = false) const; int readPerCpu( std::map< @@ -142,14 +143,14 @@ class BPerfEventsGroup { std::array>& output, int fd, - __u64 id); + __u64 id) const; int reloadSkel_(struct bperf_attr_map_elem* entry); int loadPerfEvent_(struct bperf_leader_cgroup* skel); static int syncCpu_(__u32 cpu, int leader_pd); static void toReadValues(ReadValues& rv, struct bpf_perf_event_value*); - void syncGlobal_(); + void syncGlobal_() const; // For per thread monitoring bool per_thread_;