[PATCH v6 2/3] perf/marvell: perf/marvell: Odyssey DDR Performance monitor support
Will Deacon
will at kernel.org
Fri Aug 16 05:26:18 PDT 2024
On Thu, Jul 04, 2024 at 02:50:21PM +0530, Gowthami Thiagarajan wrote:
> Odyssey DRAM Subsystem supports eight counters for monitoring performance
> and software can program those counters to monitor any of the defined
> performance events. Supported performance events include those counted
> at the interface between the DDR controller and the PHY, interface between
> the DDR Controller and the CHI interconnect, or within the DDR Controller.
>
> Additionally DSS also supports two fixed performance event counters, one
> for ddr reads and the other for ddr writes.
>
> Signed-off-by: Gowthami Thiagarajan <gthiagarajan at marvell.com>
> ---
> drivers/perf/marvell_cn10k_ddr_pmu.c | 248 +++++++++++++++++++++++++++
> 1 file changed, 248 insertions(+)
Please can you add some documentation to Documentation/admin-guide/perf?
> @@ -314,6 +420,14 @@ static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
> case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
> *event_bitmap = (1ULL << (eventid - 1));
> break;
> + case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
> + if (ddr_pmu->version == DDR_PMU_V2) {
> + *event_bitmap = (1ULL << (eventid - 1));
> + } else {
> + pr_err("%s Invalid eventid %d\n", __func__, eventid);
> + return -EINVAL;
> + }
This 'else' clause is identical to the default case. Please either add
a shared error label or have a fallthrough. We shouldn't duplicate the
print.
> + break;
> case EVENT_OP_IS_ENTER_SELFREF:
> case EVENT_OP_IS_ENTER_POWERDOWN:
> case EVENT_OP_IS_ENTER_MPSM:
> @@ -439,6 +553,7 @@ static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
>
> val = readq_relaxed(pmu->base +
> DDRC_PERF_REG(p_data->ddrc_perf_cnt_base, counter));
> +
Random whitespace change.
> return val;
> }
>
> @@ -459,15 +574,43 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
> local64_add((new_count - prev_count) & mask, &event->count);
> }
>
> +static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
> + int counter)
> +{
> + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
> + u64 ctrl_reg = p_data->ddrc_perf_cnt_start_op_ctrl;
> +
> + writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
> + DDRC_PERF_REG(ctrl_reg, counter));
> +}
> +
> +static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
> + int counter)
> +{
> + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
> + u64 ctrl_reg = p_data->ddrc_perf_cnt_end_op_ctrl;
> +
> + writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
> + DDRC_PERF_REG(ctrl_reg, counter));
> +}
> +
> static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
> {
> struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
> + u64 ctrl_reg = pmu->p_data->ddrc_perf_cnt_op_mode_ctrl;
> struct hw_perf_event *hwc = &event->hw;
> int counter = hwc->idx;
>
> local64_set(&hwc->prev_count, 0);
>
> cn10k_ddr_perf_counter_enable(pmu, counter, true);
> + if (pmu->version == DDR_PMU_V2) {
> + /* Setup the PMU counter to work in manual mode */
> + writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, pmu->base +
> + DDRC_PERF_REG(ctrl_reg, counter));
> +
> + cn10k_ddr_perf_counter_start(pmu, counter);
> + }
>
> hwc->state = 0;
> }
> @@ -526,6 +669,9 @@ static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
>
> cn10k_ddr_perf_counter_enable(pmu, counter, false);
>
> + if (pmu->version == DDR_PMU_V2)
> + cn10k_ddr_perf_counter_stop(pmu, counter);
> +
> if (flags & PERF_EF_UPDATE)
> cn10k_ddr_perf_event_update(event);
>
> @@ -642,6 +788,61 @@ static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
> cn10k_ddr_perf_pmu_enable(&pmu->pmu);
> }
>
> +static void ddr_pmu_v2_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
> + bool enable)
> +{
> + const struct ddr_pmu_platform_data *p_data = pmu->p_data;
> + u64 val;
> +
> + val = readq_relaxed(pmu->base + p_data->ddrc_perf_cnt_freerun_ctrl);
> + if (enable)
> + val |= DDRC_PERF_FREERUN_READ_EN;
> + else
> + val &= ~DDRC_PERF_FREERUN_READ_EN;
> +
> + writeq_relaxed(val, pmu->base + p_data->ddrc_perf_cnt_freerun_ctrl);
> +}
> +
> +static void ddr_pmu_v2_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
> + bool enable)
> +{
> + const struct ddr_pmu_platform_data *p_data = pmu->p_data;
> + u64 val;
> +
> + val = readq_relaxed(pmu->base + p_data->ddrc_perf_cnt_freerun_ctrl);
> + if (enable)
> + val |= DDRC_PERF_FREERUN_WRITE_EN;
> + else
> + val &= ~DDRC_PERF_FREERUN_WRITE_EN;
> +
> + writeq_relaxed(val, pmu->base + p_data->ddrc_perf_cnt_freerun_ctrl);
> +}
> +
> +static void ddr_pmu_v2_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
> +{
> + const struct ddr_pmu_platform_data *p_data = pmu->p_data;
> + u64 val;
> +
> + val = DDRC_FREERUN_READ_CNT_CLR;
> + writeq_relaxed(val, pmu->base + p_data->ddrc_perf_cnt_freerun_clr);
> +}
> +
> +static void ddr_pmu_v2_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
> +{
> + const struct ddr_pmu_platform_data *p_data = pmu->p_data;
> + u64 val;
> +
> + val = DDRC_FREERUN_WRITE_CNT_CLR;
> + writeq_relaxed(val, pmu->base + p_data->ddrc_perf_cnt_freerun_clr);
> +}
> +
> +static void ddr_pmu_v2_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
> +{
> + cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
> + cn10k_ddr_perf_counter_stop(pmu, evt_idx);
> + cn10k_ddr_perf_counter_start(pmu, evt_idx);
I don't understand this. Why are you calling _event_update() when
cn10k_ddr_perf_event_stop() already does that? And why are you calling
stop/start back-to-back after the update?
Will
More information about the linux-arm-kernel
mailing list