Hi James,
On 8/22/25 16:30, James Morse wrote:
> Resetting RIS entries from the cpuhp callback is easy as the
> callback occurs on the correct CPU. This won't be true for any other
> caller that wants to reset or configure an MSC.
>
> Add a helper that schedules the provided function if necessary.
> Prevent the cpuhp callbacks from changing the MSC state by taking the
> cpuhp lock.
At first, I thought this was referring to something done in the patch.
Consider changing to something like:
Callers should take the cpuhp lock to prevent the cpuhp callbacks from
changing the MSC state.
Regardless, this looks good to me.
Reviewed-by: Ben Horgan <ben.horgan@arm.com>
>
> Signed-off-by: James Morse <james.morse@arm.com>
> ---
> drivers/resctrl/mpam_devices.c | 37 +++++++++++++++++++++++++++++++---
> 1 file changed, 34 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/resctrl/mpam_devices.c b/drivers/resctrl/mpam_devices.c
> index c1f01dd748ad..759244966736 100644
> --- a/drivers/resctrl/mpam_devices.c
> +++ b/drivers/resctrl/mpam_devices.c
> @@ -906,20 +906,51 @@ static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid)
> mutex_unlock(&msc->part_sel_lock);
> }
>
> -static void mpam_reset_ris(struct mpam_msc_ris *ris)
> +/*
> + * Called via smp_call_on_cpu() to prevent migration, while still being
> + * pre-emptible.
> + */
> +static int mpam_reset_ris(void *arg)
> {
> u16 partid, partid_max;
> + struct mpam_msc_ris *ris = arg;
>
> mpam_assert_srcu_read_lock_held();
>
> if (ris->in_reset_state)
> - return;
> + return 0;
>
> spin_lock(&partid_max_lock);
> partid_max = mpam_partid_max;
> spin_unlock(&partid_max_lock);
> for (partid = 0; partid < partid_max; partid++)
> mpam_reset_ris_partid(ris, partid);
> +
> + return 0;
> +}
> +
> +/*
> + * Get the preferred CPU for this MSC. If it is accessible from this CPU,
> + * this CPU is preferred. This can be preempted/migrated, it will only result
> + * in more work.
> + */
> +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc)
> +{
> + int cpu = raw_smp_processor_id();
> +
> + if (cpumask_test_cpu(cpu, &msc->accessibility))
> + return cpu;
> +
> + return cpumask_first_and(&msc->accessibility, cpu_online_mask);
> +}
> +
> +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg)
> +{
> + lockdep_assert_irqs_enabled();
> + lockdep_assert_cpus_held();
> + mpam_assert_srcu_read_lock_held();
> +
> + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true);
> }
>
> static void mpam_reset_msc(struct mpam_msc *msc, bool online)
> @@ -932,7 +963,7 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online)
> mpam_mon_sel_outer_lock(msc);
> idx = srcu_read_lock(&mpam_srcu);
> list_for_each_entry_srcu(ris, &msc->ris, msc_list, srcu_read_lock_held(&mpam_srcu)) {
> - mpam_reset_ris(ris);
> + mpam_touch_msc(msc, &mpam_reset_ris, ris);
>
> /*
> * Set in_reset_state when coming online. The reset state
Thanks,
Ben