Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

define BPerfEventsManager for managing shared global perf events #296

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 70 additions & 15 deletions hbt/src/perf_event/BPerfEventsGroup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,8 +246,8 @@ bool BPerfEventsGroup::isOpen() const {
HBT_DCHECK(leader_link_fd_ >= 0);
// set proper offset_
::memset(offsets_, 0, sizeof(offsets_));
readGlobal(offsets_, true);
enabled_ = true;
readGlobal(offsets_, true);
return true;
}

Expand All @@ -265,7 +265,7 @@ int BPerfEventsGroup::syncCpu_(__u32 cpu, int leader_fd) {
return ::bpf_prog_test_run_opts(leader_fd, &opts);
}

void BPerfEventsGroup::syncGlobal_() {
void BPerfEventsGroup::syncGlobal_() const {
int err;

for (int cpu = 0; cpu < cpu_cnt_; cpu++) {
Expand Down Expand Up @@ -504,23 +504,35 @@ int BPerfEventsGroup::preparePerThreadBPerf_(bperf_leader_cgroup* skel) {
return err;
}

int BPerfEventsGroup::read(
struct bpf_perf_event_value* output,
std::vector<struct bpf_perf_event_value> BPerfEventsGroup::readFromBpf_(
int fd,
__u64 id,
bool skip_offset) {
auto event_cnt = confs_.size();
__u64 id) const {
std::vector<struct bpf_perf_event_value> values(
(size_t)cpu_cnt_ * BPERF_MAX_GROUP_SIZE);

if (!enabled_) {
return -1;
return {};
}

syncGlobal_();
if (int ret = ::bpf_map_lookup_elem(fd, &id, values.data()); ret) {
HBT_LOG_ERROR() << "cannot look up key " << id
<< " from output map. Return value: " << ret;
return {};
}

return values;
}

int BPerfEventsGroup::read(
struct bpf_perf_event_value* output,
int fd,
__u64 id,
bool skip_offset) const {
auto event_cnt = confs_.size();

auto values = readFromBpf_(fd, id);

if (values.empty()) {
return -1;
}

Expand All @@ -542,13 +554,35 @@ int BPerfEventsGroup::read(
return event_cnt;
}

int BPerfEventsGroup::readPerCpu(
std::map<
int,
std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>& output,
int fd,
__u64 id) const {
auto event_cnt = confs_.size();

auto values = readFromBpf_(fd, id);

for (size_t e = 0; e < event_cnt; e++) {
for (int c = 0; c < cpu_cnt_; c++) {
int idx = c * BPERF_MAX_GROUP_SIZE + e;
output[c][e].counter = values[idx].counter;
output[c][e].enabled = values[idx].enabled;
output[c][e].running = values[idx].running;
}
}

return event_cnt;
}

int BPerfEventsGroup::readGlobal(
struct bpf_perf_event_value* output,
bool skip_offset) {
bool skip_offset) const {
return read(output, global_output_fd_, 0, skip_offset);
}

bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) {
bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) const {
const auto num_events = rv.getNumEvents();
HBT_ARG_CHECK_EQ(confs_.size(), rv.getNumEvents());

Expand All @@ -561,13 +595,30 @@ bool BPerfEventsGroup::readGlobal(ReadValues& rv, bool skip_offset) {
}
}

int BPerfEventsGroup::readCgroup(
struct bpf_perf_event_value* output,
__u64 id) {
int BPerfEventsGroup::readGlobalPerCpu(
std::
map<int, std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>&
output) const {
return readPerCpu(output, global_output_fd_, 0);
}

bool BPerfEventsGroup::readGlobalPerCpu(std::map<int, ReadValues>& rv) const {
std::map<int, std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>
output;
int numEvents = readGlobalPerCpu(output);
for (auto& [cpu, values] : output) {
rv.insert({cpu, ReadValues(numEvents)});
toReadValues(rv.at(cpu), values.data());
}
return numEvents > 0;
}

int BPerfEventsGroup::readCgroup(struct bpf_perf_event_value* output, __u64 id)
const {
return read(output, cgroup_output_fd_, id, true /* skip_offset */);
}

bool BPerfEventsGroup::readCgroup(ReadValues& rv, __u64 id) {
bool BPerfEventsGroup::readCgroup(ReadValues& rv, __u64 id) const {
const auto num_events = rv.getNumEvents();
HBT_ARG_CHECK_EQ(confs_.size(), rv.getNumEvents());

Expand All @@ -580,6 +631,10 @@ bool BPerfEventsGroup::readCgroup(ReadValues& rv, __u64 id) {
}
}

EventConfs BPerfEventsGroup::getEventConfs() const {
return confs_;
}

void BPerfEventsGroup::toReadValues(
ReadValues& rv,
struct bpf_perf_event_value* values) {
Expand Down
31 changes: 25 additions & 6 deletions hbt/src/perf_event/BPerfEventsGroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,20 @@ class BPerfEventsGroup {
bool removeCgroup(__u64 id);

// eBPF like interface to read counters from all CPUs and accumulate them.
int readGlobal(struct bpf_perf_event_value* output, bool skip_offset = false);
bool readGlobal(ReadValues& rv, bool skip_offset = false);
int readGlobal(struct bpf_perf_event_value* output, bool skip_offset = false)
const;
bool readGlobal(ReadValues& rv, bool skip_offset = false) const;
int readGlobalPerCpu(
std::map<
int,
std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>&
output) const;
bool readGlobalPerCpu(std::map<int, ReadValues>& rv) const;

int readCgroup(struct bpf_perf_event_value* output, __u64 id);
bool readCgroup(ReadValues& rv, __u64 id);
int readCgroup(struct bpf_perf_event_value* output, __u64 id) const;
bool readCgroup(ReadValues& rv, __u64 id) const;

EventConfs getEventConfs() const;

protected:
const EventConfs confs_;
Expand Down Expand Up @@ -122,18 +131,28 @@ class BPerfEventsGroup {
// read(&offset_);
struct bpf_perf_event_value offsets_[BPERF_MAX_GROUP_SIZE];

std::vector<struct bpf_perf_event_value> readFromBpf_(int fd, __u64 id) const;

int read(
struct bpf_perf_event_value* output,
int fd,
__u64 id,
bool skip_offset = false);
bool skip_offset = false) const;

int readPerCpu(
std::map<
int,
std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>&
output,
int fd,
__u64 id) const;

int reloadSkel_(struct bperf_attr_map_elem* entry);
int loadPerfEvent_(struct bperf_leader_cgroup* skel);

static int syncCpu_(__u32 cpu, int leader_pd);
static void toReadValues(ReadValues& rv, struct bpf_perf_event_value*);
void syncGlobal_();
void syncGlobal_() const;

// For per thread monitoring
bool per_thread_;
Expand Down
9 changes: 9 additions & 0 deletions hbt/src/perf_event/tests/BPerfEventsGroupTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,15 @@ TEST(BPerfEventsGroupTest, RunSystemWide) {
checkReading(val, prev, n);
::memcpy(prev, val, sizeof(prev));
}

std::map<int, std::array<struct bpf_perf_event_value, BPERF_MAX_GROUP_SIZE>>
perCpuValues;
auto n = system.readGlobalPerCpu(perCpuValues);
EXPECT_GT(n, 0);
EXPECT_EQ(perCpuValues.size(), CpuSet::makeAllOnline().numCpus());
for (const auto& [cpu, values] : perCpuValues) {
EXPECT_GE(values[0].counter, 0);
}
}

TEST(BPerfEventsGroupTest, RunCgroup) {
Expand Down
Loading