[PATCH v6 6/8] fs/resctrl: Introduce interface to display io_alloc CBMs

Babu Moger posted 8 patches 4 months ago
[PATCH v6 6/8] fs/resctrl: Introduce interface to display io_alloc CBMs
Posted by Babu Moger 4 months ago
The io_alloc feature in resctrl enables system software to configure
the portion of the L3 cache allocated for I/O traffic.

Add the interface to display CBMs (Capacity Bit Mask) of io_alloc
feature.

The CBM interface file io_alloc_cbm will reside in the info directory
(e.g., /sys/fs/resctrl/info/L3/). Displaying the resource name is not
necessary. Pass the resource name to show_doms() and print it only if
the name is valid. For io_alloc, pass NULL to suppress printing the
resource name.

When CDP is enabled, io_alloc routes traffic using the highest CLOSID
associated with an L3CODE resource. However, CBMs can be accessed via
either L3CODE or L3DATA resources.

Signed-off-by: Babu Moger <babu.moger@amd.com>
---
v6: Added "io_alloc_cbm" details in user doc resctrl.rst.
    Resource name is not printed in CBM now. Corrected the texts about it
    in resctrl.rst.

v5: Resolved conflicts due to recent resctrl FS/ARCH code restructure.
    Updated show_doms() to print the resource if only it is valid. Pass NULL while
    printing io_alloc CBM.
    Changed the code to access the CBMs via either L3CODE or L3DATA resources.

v4: Updated the change log.
    Added rdtgroup_mutex before rdt_last_cmd_puts().
    Returned -ENODEV when resource type is CDP_DATA.
    Kept the resource name while printing the CBM (L3:0=fff) that way
    I dont have to change show_doms() just for this feature and it is
    consistant across all the schemata display.

v3: Minor changes due to changes in resctrl_arch_get_io_alloc_enabled()
    and resctrl_io_alloc_closid_get().
    Added the check to verify CDP resource type.
    Updated the commit log.

v2: Fixed to display only on L3 resources.
    Added the locks while processing.
    Rename the displat to io_alloc_cbm (from sdciae_cmd).
---
 Documentation/filesystems/resctrl.rst | 13 +++++++
 fs/resctrl/ctrlmondata.c              |  8 +++--
 fs/resctrl/internal.h                 |  2 ++
 fs/resctrl/rdtgroup.c                 | 51 ++++++++++++++++++++++++++-
 4 files changed, 70 insertions(+), 4 deletions(-)

diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
index 03c829b2c276..b31748ec8c61 100644
--- a/Documentation/filesystems/resctrl.rst
+++ b/Documentation/filesystems/resctrl.rst
@@ -169,6 +169,19 @@ related to allocation:
 		When CDP is enabled, io_alloc routes I/O traffic using the highest
 		CLOSID allocated for the instruction cache (L3CODE).
 
+"io_alloc_cbm":
+		Capacity Bit Masks (CBMs) available to supported IO devices which
+		can directly insert cache lines in L3 which can help to reduce the
+		latency. CBMs are displayed in the following format:
+
+			<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+		Example::
+
+			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
+			0=ffff;1=ffff
+
+
 Memory bandwidth(MB) subdirectory contains the following files
 with respect to allocation:
 
diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
index 6ed2dfd4dbbd..ea039852569a 100644
--- a/fs/resctrl/ctrlmondata.c
+++ b/fs/resctrl/ctrlmondata.c
@@ -381,7 +381,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 	return ret ?: nbytes;
 }
 
-static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
+void show_doms(struct seq_file *s, struct resctrl_schema *schema, char *resource_name,
+	       int closid)
 {
 	struct rdt_resource *r = schema->res;
 	struct rdt_ctrl_domain *dom;
@@ -391,7 +392,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
 	/* Walking r->domains, ensure it can't race with cpuhp */
 	lockdep_assert_cpus_held();
 
-	seq_printf(s, "%*s:", max_name_width, schema->name);
+	if (resource_name)
+		seq_printf(s, "%*s:", max_name_width, resource_name);
 	list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
 		if (sep)
 			seq_puts(s, ";");
@@ -437,7 +439,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
 			closid = rdtgrp->closid;
 			list_for_each_entry(schema, &resctrl_schema_all, list) {
 				if (closid < schema->num_closid)
-					show_doms(s, schema, closid);
+					show_doms(s, schema, schema->name, closid);
 			}
 		}
 	} else {
diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
index 9a8cf6f11151..14f3697c1187 100644
--- a/fs/resctrl/internal.h
+++ b/fs/resctrl/internal.h
@@ -374,6 +374,8 @@ void rdt_staged_configs_clear(void);
 bool closid_allocated(unsigned int closid);
 
 int resctrl_find_cleanest_closid(void);
+void show_doms(struct seq_file *s, struct resctrl_schema *schema,
+	       char *name, int closid);
 
 #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index bbc032b4d0e9..0c2d2cf4baa1 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -1997,6 +1997,46 @@ static ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
 	return ret ?: nbytes;
 }
 
+static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
+				     struct seq_file *seq, void *v)
+{
+	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
+	struct rdt_resource *r = s->res;
+	u32 io_alloc_closid;
+	int ret = 0;
+
+	cpus_read_lock();
+	mutex_lock(&rdtgroup_mutex);
+
+	rdt_last_cmd_clear();
+
+	if (!r->cache.io_alloc_capable) {
+		rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
+		ret = -ENODEV;
+		goto cbm_show_out;
+	}
+
+	if (!resctrl_arch_get_io_alloc_enabled(r)) {
+		rdt_last_cmd_puts("io_alloc feature is not enabled\n");
+		ret = -EINVAL;
+		goto cbm_show_out;
+	}
+
+	io_alloc_closid = resctrl_io_alloc_closid_get(r);
+	if (io_alloc_closid < 0) {
+		rdt_last_cmd_puts("Max CLOSID to support io_alloc is not available\n");
+		ret = -EINVAL;
+		goto cbm_show_out;
+	}
+
+	show_doms(seq, resctrl_schema_io_alloc(s), NULL, io_alloc_closid);
+
+cbm_show_out:
+	mutex_unlock(&rdtgroup_mutex);
+	cpus_read_unlock();
+	return ret;
+}
+
 /* rdtgroup information files for one cache resource. */
 static struct rftype res_common_files[] = {
 	{
@@ -2156,6 +2196,12 @@ static struct rftype res_common_files[] = {
 		.seq_show       = resctrl_io_alloc_show,
 		.write          = resctrl_io_alloc_write,
 	},
+	{
+		.name		= "io_alloc_cbm",
+		.mode		= 0444,
+		.kf_ops		= &rdtgroup_kf_single_ops,
+		.seq_show	= resctrl_io_alloc_cbm_show,
+	},
 	{
 		.name		= "mba_MBps_event",
 		.mode		= 0644,
@@ -2267,9 +2313,12 @@ static void io_alloc_init(void)
 {
 	struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
 
-	if (r->cache.io_alloc_capable)
+	if (r->cache.io_alloc_capable) {
 		resctrl_file_fflags_init("io_alloc",
 					 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
+		resctrl_file_fflags_init("io_alloc_cbm",
+					 RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE);
+	}
 }
 
 void resctrl_file_fflags_init(const char *config, unsigned long fflags)
-- 
2.34.1
Re: [PATCH v6 6/8] fs/resctrl: Introduce interface to display io_alloc CBMs
Posted by Reinette Chatre 3 months, 3 weeks ago
Hi Babu,

On 6/11/25 2:23 PM, Babu Moger wrote:
> The io_alloc feature in resctrl enables system software to configure
> the portion of the L3 cache allocated for I/O traffic.

Drop L3?

> 
> Add the interface to display CBMs (Capacity Bit Mask) of io_alloc
> feature.

After the fs/arch split it is not always obvious what is meant with
"interface" ... it could be new API between fs and arch or it could be
new resctrl file.
This can be specific:
	Add "io_alloc_cbm" resctrl file to display ...

> 
> The CBM interface file io_alloc_cbm will reside in the info directory
> (e.g., /sys/fs/resctrl/info/L3/). Displaying the resource name is not
> necessary. Pass the resource name to show_doms() and print it only if
> the name is valid. For io_alloc, pass NULL to suppress printing the
> resource name.
> 
> When CDP is enabled, io_alloc routes traffic using the highest CLOSID
> associated with an L3CODE resource. However, CBMs can be accessed via
> either L3CODE or L3DATA resources.
> 
> Signed-off-by: Babu Moger <babu.moger@amd.com>
> ---

...

> ---
>  Documentation/filesystems/resctrl.rst | 13 +++++++
>  fs/resctrl/ctrlmondata.c              |  8 +++--
>  fs/resctrl/internal.h                 |  2 ++
>  fs/resctrl/rdtgroup.c                 | 51 ++++++++++++++++++++++++++-
>  4 files changed, 70 insertions(+), 4 deletions(-)
> 
> diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
> index 03c829b2c276..b31748ec8c61 100644
> --- a/Documentation/filesystems/resctrl.rst
> +++ b/Documentation/filesystems/resctrl.rst
> @@ -169,6 +169,19 @@ related to allocation:
>  		When CDP is enabled, io_alloc routes I/O traffic using the highest
>  		CLOSID allocated for the instruction cache (L3CODE).
>  
> +"io_alloc_cbm":
> +		Capacity Bit Masks (CBMs) available to supported IO devices which
> +		can directly insert cache lines in L3 which can help to reduce the

"CBMs that describe the portions of cache instances to which I/O traffic               
from supported IO devices are routed."

Please check ...  there seems to be some inconsistency in "IO" vs "I/O" use.

Also consider something like,
"When CDP is enabled "io_alloc_cbm" associated with the DATA 
 and CODE resources may reflect the same values. For example, values read from
 and written to /sys/fs/resctrl/info/L3DATA/io_alloc_cbm may be reflected by
 /sys/fs/resctrl/info/L3CODE/io_alloc_cbm and vice versa."
What do you think?

> +		latency. CBMs are displayed in the following format:
> +
> +			<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
> +
> +		Example::
> +
> +			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
> +			0=ffff;1=ffff
> +
> +
>  Memory bandwidth(MB) subdirectory contains the following files
>  with respect to allocation:
>  
> diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
> index 6ed2dfd4dbbd..ea039852569a 100644
> --- a/fs/resctrl/ctrlmondata.c
> +++ b/fs/resctrl/ctrlmondata.c
> @@ -381,7 +381,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
>  	return ret ?: nbytes;
>  }
>  
> -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema, char *resource_name,
> +	       int closid)
>  {
>  	struct rdt_resource *r = schema->res;
>  	struct rdt_ctrl_domain *dom;
> @@ -391,7 +392,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
>  	/* Walking r->domains, ensure it can't race with cpuhp */
>  	lockdep_assert_cpus_held();
>  
> -	seq_printf(s, "%*s:", max_name_width, schema->name);
> +	if (resource_name)
> +		seq_printf(s, "%*s:", max_name_width, resource_name);
>  	list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
>  		if (sep)
>  			seq_puts(s, ";");
> @@ -437,7 +439,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
>  			closid = rdtgrp->closid;
>  			list_for_each_entry(schema, &resctrl_schema_all, list) {
>  				if (closid < schema->num_closid)
> -					show_doms(s, schema, closid);
> +					show_doms(s, schema, schema->name, closid);
>  			}
>  		}
>  	} else {
> diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
> index 9a8cf6f11151..14f3697c1187 100644
> --- a/fs/resctrl/internal.h
> +++ b/fs/resctrl/internal.h
> @@ -374,6 +374,8 @@ void rdt_staged_configs_clear(void);
>  bool closid_allocated(unsigned int closid);
>  
>  int resctrl_find_cleanest_closid(void);
> +void show_doms(struct seq_file *s, struct resctrl_schema *schema,
> +	       char *name, int closid);
>  
>  #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
>  int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
> diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
> index bbc032b4d0e9..0c2d2cf4baa1 100644
> --- a/fs/resctrl/rdtgroup.c
> +++ b/fs/resctrl/rdtgroup.c
> @@ -1997,6 +1997,46 @@ static ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
>  	return ret ?: nbytes;
>  }
>  
> +static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
> +				     struct seq_file *seq, void *v)
> +{
> +	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
> +	struct rdt_resource *r = s->res;
> +	u32 io_alloc_closid;
> +	int ret = 0;
> +
> +	cpus_read_lock();
> +	mutex_lock(&rdtgroup_mutex);
> +
> +	rdt_last_cmd_clear();
> +
> +	if (!r->cache.io_alloc_capable) {
> +		rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
> +		ret = -ENODEV;
> +		goto cbm_show_out;

out_unlock

> +	}
> +
> +	if (!resctrl_arch_get_io_alloc_enabled(r)) {
> +		rdt_last_cmd_puts("io_alloc feature is not enabled\n");
> +		ret = -EINVAL;
> +		goto cbm_show_out;
> +	}
> +
> +	io_alloc_closid = resctrl_io_alloc_closid_get(r);
> +	if (io_alloc_closid < 0) {

Another example where io_alloc_closid must be valid thanks to earlier resctrl_arch_get_io_alloc_enabled(r).

> +		rdt_last_cmd_puts("Max CLOSID to support io_alloc is not available\n");
> +		ret = -EINVAL;
> +		goto cbm_show_out;
> +	}
> +
> +	show_doms(seq, resctrl_schema_io_alloc(s), NULL, io_alloc_closid);
> +
> +cbm_show_out:

out_unlock ... to match rest of resctrl

> +	mutex_unlock(&rdtgroup_mutex);
> +	cpus_read_unlock();
> +	return ret;
> +}
> +
>  /* rdtgroup information files for one cache resource. */
>  static struct rftype res_common_files[] = {
>  	{

Reinette
Re: [PATCH v6 6/8] fs/resctrl: Introduce interface to display io_alloc CBMs
Posted by Moger, Babu 3 months, 3 weeks ago
Hi Reinette,

On 6/17/2025 11:01 PM, Reinette Chatre wrote:
> Hi Babu,
> 
> On 6/11/25 2:23 PM, Babu Moger wrote:
>> The io_alloc feature in resctrl enables system software to configure
>> the portion of the L3 cache allocated for I/O traffic.
> 
> Drop L3?

Sure.


>>
>> Add the interface to display CBMs (Capacity Bit Mask) of io_alloc
>> feature.
> 
> After the fs/arch split it is not always obvious what is meant with
> "interface" ... it could be new API between fs and arch or it could be
> new resctrl file.
> This can be specific:
> 	Add "io_alloc_cbm" resctrl file to display ...
> 

Sure.

>>
>> The CBM interface file io_alloc_cbm will reside in the info directory
>> (e.g., /sys/fs/resctrl/info/L3/). Displaying the resource name is not
>> necessary. Pass the resource name to show_doms() and print it only if
>> the name is valid. For io_alloc, pass NULL to suppress printing the
>> resource name.
>>
>> When CDP is enabled, io_alloc routes traffic using the highest CLOSID
>> associated with an L3CODE resource. However, CBMs can be accessed via
>> either L3CODE or L3DATA resources.
>>
>> Signed-off-by: Babu Moger <babu.moger@amd.com>
>> ---
> 
> ...
> 
>> ---
>>   Documentation/filesystems/resctrl.rst | 13 +++++++
>>   fs/resctrl/ctrlmondata.c              |  8 +++--
>>   fs/resctrl/internal.h                 |  2 ++
>>   fs/resctrl/rdtgroup.c                 | 51 ++++++++++++++++++++++++++-
>>   4 files changed, 70 insertions(+), 4 deletions(-)
>>
>> diff --git a/Documentation/filesystems/resctrl.rst b/Documentation/filesystems/resctrl.rst
>> index 03c829b2c276..b31748ec8c61 100644
>> --- a/Documentation/filesystems/resctrl.rst
>> +++ b/Documentation/filesystems/resctrl.rst
>> @@ -169,6 +169,19 @@ related to allocation:
>>   		When CDP is enabled, io_alloc routes I/O traffic using the highest
>>   		CLOSID allocated for the instruction cache (L3CODE).
>>   
>> +"io_alloc_cbm":
>> +		Capacity Bit Masks (CBMs) available to supported IO devices which
>> +		can directly insert cache lines in L3 which can help to reduce the
> 
> "CBMs that describe the portions of cache instances to which I/O traffic
> from supported IO devices are routed."

Sure.


> Please check ...  there seems to be some inconsistency in "IO" vs "I/O" use.

Changed to "I/O" in all the places.

> 
> Also consider something like,
> "When CDP is enabled "io_alloc_cbm" associated with the DATA
>   and CODE resources may reflect the same values. For example, values read from
>   and written to /sys/fs/resctrl/info/L3DATA/io_alloc_cbm may be reflected by
>   /sys/fs/resctrl/info/L3CODE/io_alloc_cbm and vice versa."
> What do you think?

Looks  good.

> 
>> +		latency. CBMs are displayed in the following format:
>> +
>> +			<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
>> +
>> +		Example::
>> +
>> +			# cat /sys/fs/resctrl/info/L3/io_alloc_cbm
>> +			0=ffff;1=ffff
>> +
>> +
>>   Memory bandwidth(MB) subdirectory contains the following files
>>   with respect to allocation:
>>   
>> diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c
>> index 6ed2dfd4dbbd..ea039852569a 100644
>> --- a/fs/resctrl/ctrlmondata.c
>> +++ b/fs/resctrl/ctrlmondata.c
>> @@ -381,7 +381,8 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
>>   	return ret ?: nbytes;
>>   }
>>   
>> -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
>> +void show_doms(struct seq_file *s, struct resctrl_schema *schema, char *resource_name,
>> +	       int closid)
>>   {
>>   	struct rdt_resource *r = schema->res;
>>   	struct rdt_ctrl_domain *dom;
>> @@ -391,7 +392,8 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
>>   	/* Walking r->domains, ensure it can't race with cpuhp */
>>   	lockdep_assert_cpus_held();
>>   
>> -	seq_printf(s, "%*s:", max_name_width, schema->name);
>> +	if (resource_name)
>> +		seq_printf(s, "%*s:", max_name_width, resource_name);
>>   	list_for_each_entry(dom, &r->ctrl_domains, hdr.list) {
>>   		if (sep)
>>   			seq_puts(s, ";");
>> @@ -437,7 +439,7 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
>>   			closid = rdtgrp->closid;
>>   			list_for_each_entry(schema, &resctrl_schema_all, list) {
>>   				if (closid < schema->num_closid)
>> -					show_doms(s, schema, closid);
>> +					show_doms(s, schema, schema->name, closid);
>>   			}
>>   		}
>>   	} else {
>> diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h
>> index 9a8cf6f11151..14f3697c1187 100644
>> --- a/fs/resctrl/internal.h
>> +++ b/fs/resctrl/internal.h
>> @@ -374,6 +374,8 @@ void rdt_staged_configs_clear(void);
>>   bool closid_allocated(unsigned int closid);
>>   
>>   int resctrl_find_cleanest_closid(void);
>> +void show_doms(struct seq_file *s, struct resctrl_schema *schema,
>> +	       char *name, int closid);
>>   
>>   #ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
>>   int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
>> diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
>> index bbc032b4d0e9..0c2d2cf4baa1 100644
>> --- a/fs/resctrl/rdtgroup.c
>> +++ b/fs/resctrl/rdtgroup.c
>> @@ -1997,6 +1997,46 @@ static ssize_t resctrl_io_alloc_write(struct kernfs_open_file *of, char *buf,
>>   	return ret ?: nbytes;
>>   }
>>   
>> +static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
>> +				     struct seq_file *seq, void *v)
>> +{
>> +	struct resctrl_schema *s = rdt_kn_parent_priv(of->kn);
>> +	struct rdt_resource *r = s->res;
>> +	u32 io_alloc_closid;
>> +	int ret = 0;
>> +
>> +	cpus_read_lock();
>> +	mutex_lock(&rdtgroup_mutex);
>> +
>> +	rdt_last_cmd_clear();
>> +
>> +	if (!r->cache.io_alloc_capable) {
>> +		rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
>> +		ret = -ENODEV;
>> +		goto cbm_show_out;
> 
> out_unlock
> 

Sure.

>> +	}
>> +
>> +	if (!resctrl_arch_get_io_alloc_enabled(r)) {
>> +		rdt_last_cmd_puts("io_alloc feature is not enabled\n");
>> +		ret = -EINVAL;
>> +		goto cbm_show_out;
>> +	}
>> +
>> +	io_alloc_closid = resctrl_io_alloc_closid_get(r);
>> +	if (io_alloc_closid < 0) {
> 
> Another example where io_alloc_closid must be valid thanks to earlier resctrl_arch_get_io_alloc_enabled(r).

Sure. Will remove this check.

> 
>> +		rdt_last_cmd_puts("Max CLOSID to support io_alloc is not available\n");
>> +		ret = -EINVAL;
>> +		goto cbm_show_out;
>> +	}
>> +
>> +	show_doms(seq, resctrl_schema_io_alloc(s), NULL, io_alloc_closid);
>> +
>> +cbm_show_out:
> 
> out_unlock ... to match rest of resctrl

sure.

> 
>> +	mutex_unlock(&rdtgroup_mutex);
>> +	cpus_read_unlock();
>> +	return ret;
>> +}
>> +
>>   /* rdtgroup information files for one cache resource. */
>>   static struct rftype res_common_files[] = {
>>   	{
> 
> Reinette
> 

thanks
Babu