Skip to content
This repository has been archived by the owner on Jul 16, 2024. It is now read-only.

Commit

Permalink
BACKPORT: perf/smmuv3: Validate groups for global filtering
Browse files Browse the repository at this point in the history
With global filtering, it becomes possible for users to construct
self-contradictory groups with conflicting filters. Make sure we
cover that when initially validating events.

This patch is belongs to series patches to supports SMMUv3 PMU.

Backport from: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/drivers/perf?id=3c9347351a6ea1234aa647b36f89052de050d2a2

Signed-off-by: Robin Murphy <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
  • Loading branch information
tphan-ampere committed Jun 30, 2020
1 parent 236a28b commit 9482e6e
Showing 1 changed file with 34 additions and 13 deletions.
47 changes: 34 additions & 13 deletions drivers/perf/arm_smmuv3_pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,6 @@ struct smmu_pmu {
void __iomem *reloc_base;
u64 counter_mask;
bool global_filter;
u32 global_filter_span;
u32 global_filter_sid;
};

#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
Expand Down Expand Up @@ -244,6 +242,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
smmu_pmu_set_smr(smmu_pmu, idx, sid);
}

static bool smmu_pmu_check_global_filter(struct perf_event *curr,
struct perf_event *new)
{
if (get_filter_enable(new) != get_filter_enable(curr))
return false;

if (!get_filter_enable(new))
return true;

return get_filter_span(new) == get_filter_span(curr) &&
get_filter_stream_id(new) == get_filter_stream_id(curr);
}

static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx)
{
Expand All @@ -263,17 +274,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
}

/* Requested settings same as current global settings*/
if (span == smmu_pmu->global_filter_span &&
sid == smmu_pmu->global_filter_sid)
idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
if (idx == num_ctrs ||
smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
smmu_pmu_set_event_filter(event, 0, span, sid);
return 0;
}

if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
return -EAGAIN;

smmu_pmu_set_event_filter(event, 0, span, sid);
smmu_pmu->global_filter_span = span;
smmu_pmu->global_filter_sid = sid;
return 0;
return -EAGAIN;
}

static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
Expand All @@ -296,6 +304,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
return idx;
}

static bool smmu_pmu_events_compatible(struct perf_event *curr,
struct perf_event *new)
{
if (new->pmu != curr->pmu)
return false;

if (to_smmu_pmu(new->pmu)->global_filter &&
!smmu_pmu_check_global_filter(curr, new))
return false;

return true;
}

/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
Expand Down Expand Up @@ -333,7 +354,7 @@ static int smmu_pmu_event_init(struct perf_event *event)

/* Don't allow groups with mixed PMUs, except for s/w events */
if (!is_software_event(event->group_leader)) {
if (event->group_leader->pmu != event->pmu)
if (!smmu_pmu_events_compatible(event->group_leader, event))
return -EINVAL;

if (++group_num_events > smmu_pmu->num_counters)
Expand All @@ -344,7 +365,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
if (is_software_event(sibling))
continue;

if (sibling->pmu != event->pmu)
if (!smmu_pmu_events_compatible(sibling, event))
return -EINVAL;

if (++group_num_events > smmu_pmu->num_counters)
Expand Down

0 comments on commit 9482e6e

Please sign in to comment.