[PATCH v10 1/5] x86: perf: Move RDPMC event flag to a common definition
Rob Herring
robh at kernel.org
Tue Sep 14 13:47:56 PDT 2021
In preparation to enable user counter access on arm64 and to move some
of the user access handling to perf core, create a common event flag for
user counter access and convert x86 to use it.
Since the architecture specific flags start at the LSB, starting at the
MSB for common flags.
Cc: Peter Zijlstra <peterz at infradead.org>
Cc: Ingo Molnar <mingo at redhat.com>
Cc: Arnaldo Carvalho de Melo <acme at kernel.org>
Cc: Mark Rutland <mark.rutland at arm.com>
Cc: Alexander Shishkin <alexander.shishkin at linux.intel.com>
Cc: Jiri Olsa <jolsa at redhat.com>
Cc: Namhyung Kim <namhyung at kernel.org>
Cc: Kan Liang <kan.liang at linux.intel.com>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: Borislav Petkov <bp at alien8.de>
Cc: x86 at kernel.org
Cc: "H. Peter Anvin" <hpa at zytor.com>
Cc: linux-perf-users at vger.kernel.org
Signed-off-by: Rob Herring <robh at kernel.org>
---
arch/x86/events/core.c | 10 +++++-----
arch/x86/events/perf_event.h | 2 +-
include/linux/perf_event.h | 2 ++
3 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2a57dbed4894..2bd50fc061e1 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2469,7 +2469,7 @@ static int x86_pmu_event_init(struct perf_event *event)
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
- event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
return err;
}
@@ -2503,7 +2503,7 @@ void perf_clear_dirty_counters(void)
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
- if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
return;
/*
@@ -2524,7 +2524,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
- if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
return;
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
@@ -2535,7 +2535,7 @@ static int x86_pmu_event_idx(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- if (!(hwc->flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+ if (!(hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
return 0;
if (is_metric_idx(hwc->idx))
@@ -2718,7 +2718,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_rdpmc =
- !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
+ !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
userpg->pmc_width = x86_pmu.cntval_bits;
if (!using_native_sched_clock() || !sched_clock_stable())
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index e3ac05c97b5e..49f68b15745f 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -73,7 +73,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
-#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
+
#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fe156a8170aa..12debf008d39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -142,6 +142,8 @@ struct hw_perf_event {
int event_base_rdpmc;
int idx;
int last_cpu;
+
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
int flags;
struct hw_perf_event_extra extra_reg;
--
2.30.2
More information about the linux-arm-kernel
mailing list