Convert the whole call sequence from mon_event_read() to resctrl_arch_rmid_read()
to pass resource independent struct rdt_domain_hdr instead of an L3 specific
domain structure to prepare for monitoring events in other resources.
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
include/linux/resctrl.h | 4 +-
fs/resctrl/internal.h | 18 ++++---
arch/x86/kernel/cpu/resctrl/monitor.c | 12 +++--
fs/resctrl/ctrlmondata.c | 9 +---
fs/resctrl/monitor.c | 72 +++++++++++++++++----------
5 files changed, 68 insertions(+), 47 deletions(-)
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 5db37c7e89c5..9b9877fb3238 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -517,7 +517,7 @@ void resctrl_offline_cpu(unsigned int cpu);
* resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
* for this resource and domain.
* @r: resource that the counter should be read from.
- * @d: domain that the counter should be read from.
+ * @hdr: Header of domain that the counter should be read from.
* @closid: closid that matches the rmid. Depending on the architecture, the
* counter may match traffic of both @closid and @rmid, or @rmid
* only.
@@ -538,7 +538,7 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *arch_mon_ctx);
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index 5e52269b391e..9912b774a580 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -106,24 +106,26 @@ struct mon_data {
* resource group then its event count is summed with the count from all
* its child resource groups.
* @r: Resource describing the properties of the event being read.
- * @d: Domain that the counter should be read from. If NULL then sum all
- * domains in @r sharing L3 @ci.id
+ * @hdr: Header of domain that the counter should be read from. If NULL then
+ * sum all domains in @r sharing L3 @ci.id
* @evtid: Which monitor event to read.
* @first: Initialize MBM counter when true.
- * @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
+ * @ci: Cacheinfo for L3. Only set when @hdr is NULL. Used when summing
+ * domains.
* @is_mbm_cntr: true if "mbm_event" counter assignment mode is enabled and it
* is an MBM event.
* @err: Error encountered when reading counter.
- * @val: Returned value of event counter. If @rgrp is a parent resource group,
- * @val includes the sum of event counts from its child resource groups.
- * If @d is NULL, @val includes the sum of all domains in @r sharing @ci.id,
- * (summed across child resource groups if @rgrp is a parent resource group).
+ * @val: Returned value of event counter. If @rgrp is a parent resource
+ * group, @val includes the sum of event counts from its child
+ * resource groups. If @hdr is NULL, @val includes the sum of all
+ * domains in @r sharing @ci.id, (summed across child resource groups
+ * if @rgrp is a parent resource group).
* @arch_mon_ctx: Hardware monitor allocated for this read request (MPAM only).
*/
struct rmid_read {
struct rdtgroup *rgrp;
struct rdt_resource *r;
- struct rdt_mon_domain *d;
+ struct rdt_domain_hdr *hdr;
enum resctrl_event_id evtid;
bool first;
struct cacheinfo *ci;
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index dffcc8307500..3da970ea1903 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -238,19 +238,25 @@ static u64 get_corrected_val(struct rdt_resource *r, struct rdt_mon_domain *d,
return chunks * hw_res->mon_scale;
}
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain_hdr *hdr,
u32 unused, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *ignored)
{
- struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
- int cpu = cpumask_any(&d->hdr.cpu_mask);
+ struct rdt_hw_mon_domain *hw_dom;
struct arch_mbm_state *am;
+ struct rdt_mon_domain *d;
u64 msr_val;
u32 prmid;
+ int cpu;
int ret;
resctrl_arch_rmid_read_context_check();
+ if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
+ return -EINVAL;
+ d = container_of(hdr, struct rdt_mon_domain, hdr);
+ hw_dom = resctrl_to_arch_mon_dom(d);
+ cpu = cpumask_any(&hdr->cpu_mask);
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
ret = __rmid_read_phys(prmid, eventid, &msr_val);
diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
index 3154cdc98a31..9242a2982e77 100644
--- a/fs/resctrl/ctrlmondata.c
+++ b/fs/resctrl/ctrlmondata.c
@@ -554,25 +554,18 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_domain_hdr *hdr, struct rdtgroup *rdtgrp,
cpumask_t *cpumask, int evtid, int first)
{
- struct rdt_mon_domain *d = NULL;
int cpu;
/* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
- if (hdr) {
- if (!domain_header_is_valid(hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
- return;
- d = container_of(hdr, struct rdt_mon_domain, hdr);
- }
-
/*
* Setup the parameters to pass to mon_event_count() to read the data.
*/
rr->rgrp = rdtgrp;
rr->evtid = evtid;
rr->r = r;
- rr->d = d;
+ rr->hdr = hdr;
rr->first = first;
if (resctrl_arch_mbm_cntr_assign_enabled(r) &&
resctrl_is_mbm_event(evtid)) {
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index 179962a81362..7765491ddb4c 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -159,7 +159,7 @@ void __check_limbo(struct rdt_mon_domain *d, bool force_free)
break;
entry = __rmid_entry(idx);
- if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
+ if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
QOS_L3_OCCUP_EVENT_ID, &val,
arch_mon_ctx)) {
rmid_dirty = true;
@@ -421,11 +421,16 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
struct rdt_mon_domain *d;
int cntr_id = -ENOENT;
struct mbm_state *m;
- int err, ret;
u64 tval = 0;
+ if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
+ rr->err = -EIO;
+ return -EINVAL;
+ }
+ d = container_of(rr->hdr, struct rdt_mon_domain, hdr);
+
if (rr->is_mbm_cntr) {
- cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
+ cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evtid);
if (cntr_id < 0) {
rr->err = -ENOENT;
return -EINVAL;
@@ -434,32 +439,41 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
if (rr->first) {
if (rr->is_mbm_cntr)
- resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
+ resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evtid);
else
- resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
- m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
+ resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evtid);
+ m = get_mbm_state(d, closid, rmid, rr->evtid);
if (m)
memset(m, 0, sizeof(struct mbm_state));
return 0;
}
- if (rr->d) {
- /* Reading a single domain, must be on a CPU in that domain. */
- if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
- return -EINVAL;
- if (rr->is_mbm_cntr)
- rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
- rr->evtid, &tval);
- else
- rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
- rr->evtid, &tval, rr->arch_mon_ctx);
- if (rr->err)
- return rr->err;
+ /* Reading a single domain, must be on a CPU in that domain. */
+ if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
+ return -EINVAL;
+ if (rr->is_mbm_cntr)
+ rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
+ rr->evtid, &tval);
+ else
+ rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
+ rr->evtid, &tval, rr->arch_mon_ctx);
+ if (rr->err)
+ return rr->err;
- rr->val += tval;
+ rr->val += tval;
- return 0;
- }
+ return 0;
+}
+
+static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
+{
+ int cpu = smp_processor_id();
+ u32 closid = rdtgrp->closid;
+ u32 rmid = rdtgrp->mon.rmid;
+ struct rdt_mon_domain *d;
+ int cntr_id = -ENOENT;
+ u64 tval = 0;
+ int err, ret;
/* Summing domains that share a cache, must be on a CPU for that cache. */
if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
@@ -480,7 +494,7 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
rr->evtid, &tval);
else
- err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
+ err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
rr->evtid, &tval, rr->arch_mon_ctx);
if (!err) {
rr->val += tval;
@@ -498,8 +512,10 @@ static int __mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
switch (rr->r->rid) {
case RDT_RESOURCE_L3:
- return __l3_mon_event_count(rdtgrp, rr);
-
+ if (rr->hdr)
+ return __l3_mon_event_count(rdtgrp, rr);
+ else
+ return __l3_mon_event_count_sum(rdtgrp, rr);
default:
rr->err = -EINVAL;
return -EINVAL;
@@ -523,9 +539,13 @@ static void mbm_bw_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
u64 cur_bw, bytes, cur_bytes;
u32 closid = rdtgrp->closid;
u32 rmid = rdtgrp->mon.rmid;
+ struct rdt_mon_domain *d;
struct mbm_state *m;
- m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
+ if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3))
+ return;
+ d = container_of(rr->hdr, struct rdt_mon_domain, hdr);
+ m = get_mbm_state(d, closid, rmid, rr->evtid);
if (WARN_ON_ONCE(!m))
return;
@@ -698,7 +718,7 @@ static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *
struct rmid_read rr = {0};
rr.r = r;
- rr.d = d;
+ rr.hdr = &d->hdr;
rr.evtid = evtid;
if (resctrl_arch_mbm_cntr_assign_enabled(r)) {
rr.is_mbm_cntr = true;
--
2.51.1
Hi Tony,
On 11/24/25 10:53 AM, Tony Luck wrote:
...
> diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
> index 179962a81362..7765491ddb4c 100644
> --- a/fs/resctrl/monitor.c
> +++ b/fs/resctrl/monitor.c
> @@ -159,7 +159,7 @@ void __check_limbo(struct rdt_mon_domain *d, bool force_free)
> break;
>
> entry = __rmid_entry(idx);
> - if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
> + if (resctrl_arch_rmid_read(r, &d->hdr, entry->closid, entry->rmid,
> QOS_L3_OCCUP_EVENT_ID, &val,
> arch_mon_ctx)) {
> rmid_dirty = true;
> @@ -421,11 +421,16 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> struct rdt_mon_domain *d;
> int cntr_id = -ENOENT;
> struct mbm_state *m;
> - int err, ret;
> u64 tval = 0;
>
> + if (!domain_header_is_valid(rr->hdr, RESCTRL_MON_DOMAIN, RDT_RESOURCE_L3)) {
> + rr->err = -EIO;
> + return -EINVAL;
> + }
> + d = container_of(rr->hdr, struct rdt_mon_domain, hdr);
> +
> if (rr->is_mbm_cntr) {
> - cntr_id = mbm_cntr_get(rr->r, rr->d, rdtgrp, rr->evtid);
> + cntr_id = mbm_cntr_get(rr->r, d, rdtgrp, rr->evtid);
> if (cntr_id < 0) {
> rr->err = -ENOENT;
> return -EINVAL;
> @@ -434,32 +439,41 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
>
> if (rr->first) {
> if (rr->is_mbm_cntr)
> - resctrl_arch_reset_cntr(rr->r, rr->d, closid, rmid, cntr_id, rr->evtid);
> + resctrl_arch_reset_cntr(rr->r, d, closid, rmid, cntr_id, rr->evtid);
> else
> - resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
> - m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
> + resctrl_arch_reset_rmid(rr->r, d, closid, rmid, rr->evtid);
> + m = get_mbm_state(d, closid, rmid, rr->evtid);
> if (m)
> memset(m, 0, sizeof(struct mbm_state));
> return 0;
> }
>
> - if (rr->d) {
> - /* Reading a single domain, must be on a CPU in that domain. */
> - if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask))
> - return -EINVAL;
> - if (rr->is_mbm_cntr)
> - rr->err = resctrl_arch_cntr_read(rr->r, rr->d, closid, rmid, cntr_id,
> - rr->evtid, &tval);
> - else
> - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid,
> - rr->evtid, &tval, rr->arch_mon_ctx);
> - if (rr->err)
> - return rr->err;
> + /* Reading a single domain, must be on a CPU in that domain. */
> + if (!cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
> + return -EINVAL;
> + if (rr->is_mbm_cntr)
> + rr->err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
> + rr->evtid, &tval);
> + else
> + rr->err = resctrl_arch_rmid_read(rr->r, rr->hdr, closid, rmid,
> + rr->evtid, &tval, rr->arch_mon_ctx);
> + if (rr->err)
> + return rr->err;
>
> - rr->val += tval;
> + rr->val += tval;
>
> - return 0;
> - }
> + return 0;
> +}
> +
> +static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> +{
> + int cpu = smp_processor_id();
> + u32 closid = rdtgrp->closid;
> + u32 rmid = rdtgrp->mon.rmid;
> + struct rdt_mon_domain *d;
> + int cntr_id = -ENOENT;
> + u64 tval = 0;
> + int err, ret;
>
> /* Summing domains that share a cache, must be on a CPU for that cache. */
> if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
> @@ -480,7 +494,7 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
> rr->evtid, &tval);
This is not safe. The current __mon_event_count() implementation being refactored by this series
ensures that if rr->is_mbm_cntr is true then cntr_id is valid. This patch places the code doing so
in __l3_mon_event_count() without an equivalent in the new __l3_mon_event_count_sum(). From what I
can tell, since __l3_mon_event_count_sum() sets cntr_id to -ENOENT and never initializes it correctly,
resctrl_arch_cntr_read() will be called with an invalid cntr_id that it is not able to handle.
There is no overlap in support for SNC and assignable counters. Do you expect that this is something that
should be supported? Even if it is, SNC is model specific so it may be reasonable to expect that when/if
a system supporting both features arrives it would need enabling anyway. I thus propose for simplicity
that the handling of assignable counters by __l3_mon_event_count_sum() be dropped, albeit with a loud
complaint if it is ever called with rr->is_mbm_cntr set.
Reinette
On Tue, Dec 02, 2025 at 08:06:47AM -0800, Reinette Chatre wrote:
> Hi Tony,
> > +static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> > +{
> > + int cpu = smp_processor_id();
> > + u32 closid = rdtgrp->closid;
> > + u32 rmid = rdtgrp->mon.rmid;
> > + struct rdt_mon_domain *d;
> > + int cntr_id = -ENOENT;
> > + u64 tval = 0;
> > + int err, ret;
> >
> > /* Summing domains that share a cache, must be on a CPU for that cache. */
> > if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
> > @@ -480,7 +494,7 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> > err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
> > rr->evtid, &tval);
>
> This is not safe. The current __mon_event_count() implementation being refactored by this series
> ensures that if rr->is_mbm_cntr is true then cntr_id is valid. This patch places the code doing so
> in __l3_mon_event_count() without an equivalent in the new __l3_mon_event_count_sum(). From what I
> can tell, since __l3_mon_event_count_sum() sets cntr_id to -ENOENT and never initializes it correctly,
> resctrl_arch_cntr_read() will be called with an invalid cntr_id that it is not able to handle.
>
> There is no overlap in support for SNC and assignable counters. Do you expect that this is something that
> should be supported? Even if it is, SNC is model specific so it may be reasonable to expect that when/if
> a system supporting both features arrives it would need enabling anyway. I thus propose for simplicity
> that the handling of assignable counters by __l3_mon_event_count_sum() be dropped, albeit with a loud
> complaint if it is ever called with rr->is_mbm_cntr set.
>
Reinette,
Agreed. I see little liklihood that SNC and assignable counters will
meet on a system.
How does this look for the "loud complaint":
static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
{
int cpu = smp_processor_id();
u32 closid = rdtgrp->closid;
u32 rmid = rdtgrp->mon.rmid;
struct rdt_mon_domain *d;
u64 tval = 0;
int err, ret;
/*
* Summing across domains is only done for systems that implement
* Sub-NUMA Cluster. There is no overlap with systems that support
* assignable counters.
*/
if (rr->is_mbm_cntr) {
pr_warn_once("Assignable counter on SNC system!\n");
rr->err = -EINVAL;
return -EINVAL;
}
/* Summing domains that share a cache, must be on a CPU for that cache. */
if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
return -EINVAL;
/*
* Legacy files must report the sum of an event across all
* domains that share the same L3 cache instance.
* Report success if a read from any domain succeeds, -EINVAL
* (translated to "Unavailable" for user space) if reading from
* all domains fail for any reason.
*/
ret = -EINVAL;
list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
if (d->ci_id != rr->ci->id)
continue;
err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
rr->evtid, &tval, rr->arch_mon_ctx);
if (!err) {
rr->val += tval;
ret = 0;
}
}
if (ret)
rr->err = ret;
return ret;
}
-Tony
Hi Tony,
On 12/2/25 12:33 PM, Luck, Tony wrote:
> On Tue, Dec 02, 2025 at 08:06:47AM -0800, Reinette Chatre wrote:
>> Hi Tony,
>>> +static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
>>> +{
>>> + int cpu = smp_processor_id();
>>> + u32 closid = rdtgrp->closid;
>>> + u32 rmid = rdtgrp->mon.rmid;
>>> + struct rdt_mon_domain *d;
>>> + int cntr_id = -ENOENT;
>>> + u64 tval = 0;
>>> + int err, ret;
>>>
>>> /* Summing domains that share a cache, must be on a CPU for that cache. */
>>> if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
>>> @@ -480,7 +494,7 @@ static int __l3_mon_event_count(struct rdtgroup *rdtgrp, struct rmid_read *rr)
>>> err = resctrl_arch_cntr_read(rr->r, d, closid, rmid, cntr_id,
>>> rr->evtid, &tval);
>>
>> This is not safe. The current __mon_event_count() implementation being refactored by this series
>> ensures that if rr->is_mbm_cntr is true then cntr_id is valid. This patch places the code doing so
>> in __l3_mon_event_count() without an equivalent in the new __l3_mon_event_count_sum(). From what I
>> can tell, since __l3_mon_event_count_sum() sets cntr_id to -ENOENT and never initializes it correctly,
>> resctrl_arch_cntr_read() will be called with an invalid cntr_id that it is not able to handle.
>>
>> There is no overlap in support for SNC and assignable counters. Do you expect that this is something that
>> should be supported? Even if it is, SNC is model specific so it may be reasonable to expect that when/if
>> a system supporting both features arrives it would need enabling anyway. I thus propose for simplicity
>> that the handling of assignable counters by __l3_mon_event_count_sum() be dropped, albeit with a loud
>> complaint if it is ever called with rr->is_mbm_cntr set.
>>
>
> Reinette,
>
> Agreed. I see little liklihood that SNC and assignable counters will
> meet on a system.
>
> How does this look for the "loud complaint":
>
>
> static int __l3_mon_event_count_sum(struct rdtgroup *rdtgrp, struct rmid_read *rr)
> {
> int cpu = smp_processor_id();
> u32 closid = rdtgrp->closid;
> u32 rmid = rdtgrp->mon.rmid;
> struct rdt_mon_domain *d;
> u64 tval = 0;
> int err, ret;
>
> /*
> * Summing across domains is only done for systems that implement
> * Sub-NUMA Cluster. There is no overlap with systems that support
> * assignable counters.
> */
> if (rr->is_mbm_cntr) {
> pr_warn_once("Assignable counter on SNC system!\n");
> rr->err = -EINVAL;
> return -EINVAL;
> }
Thank you. On a high level this looks good to me. I am concerned about the architecture
"SNC" term creeping deeper into fs code when it (the fs) aims to generalize it as
"summing domains". Mentioning SNC in the comment may be useful though since it
helps to understand what is going on by being specific.
As a nit could the user space message not refer to SNC though? How about something like:
"Summing domains using assignable counters is not supported."
>
> /* Summing domains that share a cache, must be on a CPU for that cache. */
> if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
> return -EINVAL;
>
> /*
> * Legacy files must report the sum of an event across all
> * domains that share the same L3 cache instance.
> * Report success if a read from any domain succeeds, -EINVAL
> * (translated to "Unavailable" for user space) if reading from
> * all domains fail for any reason.
> */
> ret = -EINVAL;
> list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
> if (d->ci_id != rr->ci->id)
> continue;
> err = resctrl_arch_rmid_read(rr->r, &d->hdr, closid, rmid,
> rr->evtid, &tval, rr->arch_mon_ctx);
> if (!err) {
> rr->val += tval;
> ret = 0;
> }
> }
>
> if (ret)
> rr->err = ret;
>
> return ret;
> }
Reinette
> Thank you. On a high level this looks good to me. I am concerned about the architecture > "SNC" term creeping deeper into fs code when it (the fs) aims to generalize it as > "summing domains". Mentioning SNC in the comment may be useful though since it > helps to understand what is going on by being specific. > As a nit could the user space message not refer to SNC though? How about something like: > "Summing domains using assignable counters is not supported." That looks good. I'll use that. Thanks -Tony
© 2016 - 2026 Red Hat, Inc.