[PATCH v3 3/4] perf: arm_cspmu: Support implementation specific validation

Robin Murphy robin.murphy at arm.com
Tue Jun 20 04:44:24 PDT 2023


On 07/06/2023 9:31 am, Ilkka Koskinen wrote:
> Some platforms may use e.g. different filtering mechanism and, thus,
> may need different way to validate the events and group.
> 
> Signed-off-by: Ilkka Koskinen <ilkka at os.amperecomputing.com>
> ---
>   drivers/perf/arm_cspmu/arm_cspmu.c | 13 ++++++++++++-
>   drivers/perf/arm_cspmu/arm_cspmu.h |  4 ++++
>   2 files changed, 16 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
> index 72ca4f56347c..9021d1878250 100644
> --- a/drivers/perf/arm_cspmu/arm_cspmu.c
> +++ b/drivers/perf/arm_cspmu/arm_cspmu.c
> @@ -559,7 +559,7 @@ static void arm_cspmu_disable(struct pmu *pmu)
>   static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
>   				struct perf_event *event)
>   {
> -	int idx;
> +	int idx, ret;
>   	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>   
>   	if (supports_cycle_counter(cspmu)) {
> @@ -593,6 +593,12 @@ static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
>   	if (idx >= cspmu->num_logical_ctrs)
>   		return -EAGAIN;
>   
> +	if (cspmu->impl.ops.validate_event) {
> +		ret = cspmu->impl.ops.validate_event(cspmu, event);
> +		if (ret)
> +			return ret;
> +	}
> +
>   	set_bit(idx, hw_events->used_ctrs);
>   
>   	return idx;
> @@ -618,6 +624,7 @@ static bool arm_cspmu_validate_event(struct pmu *pmu,
>    */
>   static bool arm_cspmu_validate_group(struct perf_event *event)
>   {
> +	struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
>   	struct perf_event *sibling, *leader = event->group_leader;
>   	struct arm_cspmu_hw_events fake_hw_events;
>   
> @@ -635,6 +642,10 @@ static bool arm_cspmu_validate_group(struct perf_event *event)
>   			return false;
>   	}
>   
> +	if (cspmu->impl.ops.validate_group &&
> +	    cspmu->impl.ops.validate_group(event))
> +		return false;

Hmm, this means that any driver wanting to use it has to duplicate all 
the group iteration logic, which isn't ideal. More than that, though, 
the way you've implemented it in patch #4 I'm not sure even does 
anything, since it only appears to be repeating the same checks that 
already happen in this path:

   arm_csmpu_validate_group()
     arm_cspmu_validate_event()
       arm_cspmu_get_event_idx()
         ops.validate_event() -> ampere_cspmu_validate_params()

so there's no need for the ops.validate_group hook to just call 
ampere_cspmu_validate_params() a second time when it's guaranteed to 
succeed (because otherwise we'd have bailed out already).

I think what we want overall is an "is this event config valid at all" 
hook from arm_cspmu_event_init() (which we don't really need to 
implement yet unless you want to start sanity-checking your actual 
rank/bank/threshold values), plus an "is this event schedulable in the 
given PMU context" hook from arm_cspmu_get_event_idx(), which should 
serve for both group validation via the fake context in event_init and 
actual scheduling in the real context in add.

Thanks,
Robin.

> +
>   	return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
>   }
>   
> diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
> index f89ae2077164..291cedb196ea 100644
> --- a/drivers/perf/arm_cspmu/arm_cspmu.h
> +++ b/drivers/perf/arm_cspmu/arm_cspmu.h
> @@ -106,6 +106,10 @@ struct arm_cspmu_impl_ops {
>   	void (*set_ev_filter)(struct arm_cspmu *cspmu,
>   			      struct hw_perf_event *hwc,
>   			      u32 filter);
> +	/* Implementation specific group validation */
> +	int (*validate_group)(struct perf_event *event);
> +	/* Implementation specific event validation */
> +	int (*validate_event)(struct arm_cspmu *cspmu, struct perf_event *new);
>   	/* Hide/show unsupported events */
>   	umode_t (*event_attr_is_visible)(struct kobject *kobj,
>   					 struct attribute *attr, int unused);



More information about the linux-arm-kernel mailing list