[PATCH v12 18/31] fs/resctrl: Split L3 dependent parts out of __mon_event_count()

Tony Luck posted 31 patches 2 months ago
There is a newer version of this series
[PATCH v12 18/31] fs/resctrl: Split L3 dependent parts out of __mon_event_count()
Posted by Tony Luck 2 months ago
Almost all of the code in __mon_event_count() is specific to the RDT_RESOURCE_L3
resource.

Split it out into __l3_mon_event_count().

Suggested-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
 fs/resctrl/monitor.c | 70 ++++++++++++++++++++++++--------------------
 1 file changed, 39 insertions(+), 31 deletions(-)

diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index 895cf5b0f524..faaa2851c5c2 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -415,32 +415,7 @@ static void mbm_cntr_free(struct rdt_l3_mon_domain *d, int cntr_id)
 	memset(&d->cntr_cfg[cntr_id], 0, sizeof(*d->cntr_cfg));
 }
 
-/*
- * Called from preemptible context via a direct call of mon_event_count() for
- * events that can be read on any CPU.
- * Called from preemptible but non-migratable process context (mon_event_count()
- * via smp_call_on_cpu()) OR non-preemptible context (mon_event_count() via
- * smp_call_function_any()) for events that need to be read on a specific CPU.
- */
-static bool cpu_on_correct_domain(struct rmid_read *rr)
-{
-	int cpu;
-
-	/* Any CPU is OK for this event */
-	if (rr->evt->any_cpu)
-		return true;
-
-	cpu = smp_processor_id();
-
-	/* Single domain. Must be on a CPU in that domain. */
-	if (rr->hdr)
-		return cpumask_test_cpu(cpu, &rr->hdr->cpu_mask);
-
-	/* Summing domains that share a cache, must be on a CPU for that cache. */
-	return cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map);
-}
-
-static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
+static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
 {
 	u32 closid = rdtgrp->closid;
 	u32 rmid = rdtgrp->mon.rmid;
@@ -450,9 +425,6 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
 	int err, ret;
 	u64 tval = 0;
 
-	if (!cpu_on_correct_domain(rr))
-		return -EINVAL;
-
 	if (rr->is_mbm_cntr) {
 		if (!rr->hdr || !domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
 			return -EINVAL;
@@ -504,8 +476,6 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
 	 * all domains fail for any reason.
 	 */
 	ret = -EINVAL;
-	if (WARN_ON_ONCE(rr->r->rid != RDT_RESOURCE_L3))
-		return ret;
 
 	list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
 		if (d->ci_id != rr->ci->id)
@@ -529,6 +499,44 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
 	return ret;
 }
 
+/*
+ * Called from preemptible context via a direct call of mon_event_count() for
+ * events that can be read on any CPU.
+ * Called from preemptible but non-migratable process context (mon_event_count()
+ * via smp_call_on_cpu()) OR non-preemptible context (mon_event_count() via
+ * smp_call_function_any()) for events that need to be read on a specific CPU.
+ */
+static bool cpu_on_correct_domain(struct rmid_read *rr)
+{
+	int cpu;
+
+	/* Any CPU is OK for this event */
+	if (rr->evt->any_cpu)
+		return true;
+
+	cpu = smp_processor_id();
+
+	/* Single domain. Must be on a CPU in that domain. */
+	if (rr->hdr)
+		return cpumask_test_cpu(cpu, &rr->hdr->cpu_mask);
+
+	/* Summing domains that share a cache, must be on a CPU for that cache. */
+	return cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map);
+}
+
+static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
+{
+	if (!cpu_on_correct_domain(rr))
+		return -EINVAL;
+
+	switch (rr->r->rid) {
+	case RDT_RESOURCE_L3:
+		return __l3_mon_event_count(rdtgrp, rr);
+	default:
+		return -EINVAL;
+	}
+}
+
 /*
  * mbm_bw_count() - Update bw count from values previously read by
  *		    __mon_event_count().
-- 
2.51.0
Re: [PATCH v12 18/31] fs/resctrl: Split L3 dependent parts out of __mon_event_count()
Posted by Reinette Chatre 1 month, 3 weeks ago
Hi Tony,

On 10/13/25 3:33 PM, Tony Luck wrote:
> Almost all of the code in __mon_event_count() is specific to the RDT_RESOURCE_L3
> resource.
> 
> Split it out into __l3_mon_event_count().

Missing a "why". We could perhaps write it similar to an earlier commit message:
	Carve out the L3 resource specific event reading code into a separate
	helper to support reading event data from a new monitoring resource.

> 
> Suggested-by: Reinette Chatre <reinette.chatre@intel.com>
> Signed-off-by: Tony Luck <tony.luck@intel.com>
> ---


> @@ -529,6 +499,44 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
>  	return ret;
>  }
>  
> +/*
> + * Called from preemptible context via a direct call of mon_event_count() for
> + * events that can be read on any CPU.
> + * Called from preemptible but non-migratable process context (mon_event_count()
> + * via smp_call_on_cpu()) OR non-preemptible context (mon_event_count() via
> + * smp_call_function_any()) for events that need to be read on a specific CPU.
> + */
> +static bool cpu_on_correct_domain(struct rmid_read *rr)
> +{
> +	int cpu;
> +
> +	/* Any CPU is OK for this event */
> +	if (rr->evt->any_cpu)
> +		return true;
> +
> +	cpu = smp_processor_id();
> +
> +	/* Single domain. Must be on a CPU in that domain. */
> +	if (rr->hdr)
> +		return cpumask_test_cpu(cpu, &rr->hdr->cpu_mask);
> +
> +	/* Summing domains that share a cache, must be on a CPU for that cache. */
> +	return cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map);
> +}
> +
> +static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> +{
> +	if (!cpu_on_correct_domain(rr))
> +		return -EINVAL;

It is a bit subtle how cpu_on_correct_domain() contains L3 specific code.
This may be ok if one rather thinks of it as a sanity check of struct rmid_read.

Reinette