[PATCH v2 27/29] arm_mpam: Add helper to reset saved mbwu state
Jonathan Cameron
jonathan.cameron at huawei.com
Fri Sep 12 06:33:02 PDT 2025
On Wed, 10 Sep 2025 20:43:07 +0000
James Morse <james.morse at arm.com> wrote:
> resctrl expects to reset the bandwidth counters when the filesystem
> is mounted.
>
> To allow this, add a helper that clears the saved mbwu state. Instead
> of cross calling to each CPU that can access the component MSC to
> write to the counter, set a flag that causes it to be zero'd on the
> the next read. This is easily done by forcing a configuration update.
>
> Signed-off-by: James Morse <james.morse at arm.com>
Minor comments inline.
Jonathan
> @@ -1245,6 +1257,37 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx,
> return err;
> }
>
> +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx)
> +{
> + int idx;
> + struct mpam_msc *msc;
> + struct mpam_vmsc *vmsc;
> + struct mpam_msc_ris *ris;
> +
> + if (!mpam_is_enabled())
> + return;
> +
> + idx = srcu_read_lock(&mpam_srcu);
Maybe guard() though it doesn't add that much here.
> + list_for_each_entry_rcu(vmsc, &comp->vmsc, comp_list) {
Reason not to use _srcu variants?
> + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &vmsc->props))
> + continue;
> +
> + msc = vmsc->msc;
> + list_for_each_entry_rcu(ris, &vmsc->ris, vmsc_list) {
> + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props))
> + continue;
> +
> + if (WARN_ON_ONCE(!mpam_mon_sel_lock(msc)))
> + continue;
> +
> + ris->mbwu_state[ctx->mon].correction = 0;
> + ris->mbwu_state[ctx->mon].reset_on_next_read = true;
> + mpam_mon_sel_unlock(msc);
> + }
> + }
> + srcu_read_unlock(&mpam_srcu, idx);
> +}
> +
> static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd)
> {
> u32 num_words, msb;
> diff --git a/drivers/resctrl/mpam_internal.h b/drivers/resctrl/mpam_internal.h
> index c190826dfbda..7cbcafe8294a 100644
> --- a/drivers/resctrl/mpam_internal.h
> +++ b/drivers/resctrl/mpam_internal.h
> @@ -223,10 +223,12 @@ struct mon_cfg {
>
> /*
> * Changes to enabled and cfg are protected by the msc->lock.
> - * Changes to prev_val and correction are protected by the msc's mon_sel_lock.
> + * Changes to reset_on_next_read, prev_val and correction are protected by the
> + * msc's mon_sel_lock.
Getting close to the point where a list of one per line would reduce churn.
If you anticipate adding more to this in future I'd definitely consider it.
e.g.
* msc's mon_sel_lcok protects:
* - reset_on_next_read
* - prev_val
* - correction
*/
> */
> struct msmon_mbwu_state {
> bool enabled;
> + bool reset_on_next_read;
> struct mon_cfg cfg;
>
> /* The value last read from the hardware. Used to detect overflow. */
> @@ -393,6 +395,7 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid,
>
> int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx,
> enum mpam_device_features, u64 *val);
> +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx);
>
> int mpam_get_cpumask_from_cache_id(unsigned long cache_id, u32 cache_level,
> cpumask_t *affinity);
More information about the linux-arm-kernel
mailing list