[PATCH V2] perf/imx_ddr: Add stop event counters support for i.MX8MP

Joakim Zhang qiangqing.zhang at nxp.com
Thu Sep 17 06:22:08 EDT 2020


Kindly Ping...

Best Regards,
Joakim Zhang

> -----Original Message-----
> From: Joakim Zhang <qiangqing.zhang at nxp.com>
> Sent: 2020年9月8日 18:48
> To: will at kernel.org; mark.rutland at arm.com; robin.murphy at arm.com
> Cc: dl-linux-imx <linux-imx at nxp.com>; linux-arm-kernel at lists.infradead.org;
> Joakim Zhang <qiangqing.zhang at nxp.com>
> Subject: [PATCH V2] perf/imx_ddr: Add stop event counters support for
> i.MX8MP
> 
> DDR Perf driver only supports free-running event counters(counter1/2/3) now,
> this patch adds support for stop event counters.
> 
> Legacy SoCs:
> Cycle counter(counter0) is a special counter, only count cycles. When cycle
> counter overflow, it will lock all counters and generate an interrupt. In
> ddr_perf_irq_handler, disable cycle counter then all counters would stop at the
> same time, update all counters' count, then enable cycle counter that all
> counters count again. During this process, only clear cycle counter, no need to
> clear event counters since they are free-running counters. They would continue
> counting after overflow and do/while loop from ddr_perf_event_update can
> handle event counters overflow case.
> 
> i.MX8MP:
> Almost all is the same as legacy SoCs, the only difference is that, event
> counters are not free-running any more. Like cycle counter, when event
> counters overflow, they would stop counting unless clear the counter, and no
> interrupt generate for event counters. So we should clear event counters that
> let them re-count when cycle counter overflow, which ensure event counters
> will not lose data.
> 
> This patch adds stop event counters support which would be compatible to
> free-running event counters.
> 
> Signed-off-by: Joakim Zhang <qiangqing.zhang at nxp.com>
> ---
> ChangeLogs:
> V1->V2:
> 	* clear event counters in update function, instead of irq
> 	handler, so remove spinlock.
> ---
>  drivers/perf/fsl_imx8_ddr_perf.c | 68 ++++++++++++++++++++++----------
>  1 file changed, 48 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
> index 90884d14f95f..c0f0adfcac06 100644
> --- a/drivers/perf/fsl_imx8_ddr_perf.c
> +++ b/drivers/perf/fsl_imx8_ddr_perf.c
> @@ -361,25 +361,6 @@ static int ddr_perf_event_init(struct perf_event
> *event)
>  	return 0;
>  }
> 
> -
> -static void ddr_perf_event_update(struct perf_event *event) -{
> -	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
> -	struct hw_perf_event *hwc = &event->hw;
> -	u64 delta, prev_raw_count, new_raw_count;
> -	int counter = hwc->idx;
> -
> -	do {
> -		prev_raw_count = local64_read(&hwc->prev_count);
> -		new_raw_count = ddr_perf_read_counter(pmu, counter);
> -	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
> -			new_raw_count) != prev_raw_count);
> -
> -	delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
> -
> -	local64_add(delta, &event->count);
> -}
> -
>  static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
>  				  int counter, bool enable)
>  {
> @@ -404,6 +385,52 @@ static void ddr_perf_counter_enable(struct ddr_pmu
> *pmu, int config,
>  	}
>  }
> 
> +static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
> +{
> +	int val;
> +
> +	val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
> +
> +	return val & CNTL_OVER ? true : false; }
> +
> +static void ddr_perf_event_update(struct perf_event *event) {
> +	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
> +	struct hw_perf_event *hwc = &event->hw;
> +	u64 delta, prev_raw_count, new_raw_count;
> +	int counter = hwc->idx;
> +	int ret;
> +
> +	if (counter == EVENT_CYCLES_COUNTER) {
> +		do {
> +			prev_raw_count = local64_read(&hwc->prev_count);
> +			new_raw_count = ddr_perf_read_counter(pmu, counter);
> +		} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
> +				new_raw_count) != prev_raw_count);
> +
> +		delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
> +
> +		local64_add(delta, &event->count);
> +	} else {
> +		/*
> +		 * For legacy SoCs: event counters continue counting when overflow,
> +		 *                  no need to clear the counter.
> +		 * For new SoCs: event counters stop counting when overflow, need
> +		 *               clear counter to let it count again.
> +		 */
> +		ret = ddr_perf_counter_overflow(pmu, counter);
> +		if (ret)
> +			dev_warn(pmu->dev, "Event Counter%d overflow happened,
> data
> +incorrect!!\n", counter);
> +
> +		new_raw_count = ddr_perf_read_counter(pmu, counter);
> +		local64_add(new_raw_count, &event->count);
> +
> +		/* Clear event counter, it's fine for both legacy and new SoCs. */
> +		ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
> +	}
> +}
> +
>  static void ddr_perf_event_start(struct perf_event *event, int flags)  {
>  	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); @@ -546,7 +573,8
> @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
>  	/*
>  	 * When the cycle counter overflows, all counters are stopped,
>  	 * and an IRQ is raised. If any other counter overflows, it
> -	 * continues counting, and no IRQ is raised.
> +	 * continues counting (stop counting for new SoCs, such as i.MX8MP),
> +	 * and no IRQ is raised.
>  	 *
>  	 * Cycles occur at least 4 times as often as other events, so we
>  	 * can update all events on a cycle counter overflow and not
> --
> 2.17.1



More information about the linux-arm-kernel mailing list