"io_alloc" feature is a mechanism that enables direct insertion of data
from I/O devices into the L3 cache. By directly caching data from I/O
devices rather than first storing the I/O data in DRAM, it reduces the
demands on DRAM bandwidth and reduces latency to the processor consuming
the I/O data.
io_alloc feature uses the highest CLOSID to route the traffic from I/O
devices. Provide the interface to modify io_alloc CBMs (Capacity Bit Mask)
when feature is enabled.
Signed-off-by: Babu Moger <babu.moger@amd.com>
---
v3: Minor changes due to changes in resctrl_arch_get_io_alloc_enabled()
and resctrl_io_alloc_closid_get().
Taken care of handling the CBM update when CDP is enabled.
Updated the commit log to make it generic.
v2: Added more generic text in documentation.
---
Documentation/arch/x86/resctrl.rst | 12 ++
arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
arch/x86/kernel/cpu/resctrl/internal.h | 1 +
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 134 +++++++++++++++++++++-
4 files changed, 147 insertions(+), 2 deletions(-)
diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
index 1b67e31d626c..29c8851bcc7f 100644
--- a/Documentation/arch/x86/resctrl.rst
+++ b/Documentation/arch/x86/resctrl.rst
@@ -169,6 +169,18 @@ related to allocation:
When CDP is enabled, io_alloc routes I/O traffic using the highest
CLOSID allocated for the instruction cache.
+"io_alloc_cbm":
+ Capacity Bit Masks (CBMs) available to supported IO devices which
+ can directly insert cache lines in L3 which can help to reduce the
+ latency. CBM can be configured by writing to the interface in the
+ following format::
+
+ L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+ When CDP is enabled, L3 control is divided into two separate resources:
+ L3CODE and L3DATA. However, the CBM can only be updated on the L3CODE
+ resource.
+
Memory bandwidth(MB) subdirectory contains the following files
with respect to allocation:
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index d272dea43924..4dfee0436c1c 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -102,7 +102,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
* requires at least two bits set.
* AMD allows non-contiguous bitmasks.
*/
-static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
+bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 07cf8409174d..702f6926bbdf 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -669,4 +669,5 @@ void rdt_staged_configs_clear(void);
bool closid_allocated(unsigned int closid);
int resctrl_find_cleanest_closid(void);
void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid);
+bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 81b9d8c5dabf..9997cbfc1c19 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1999,6 +1999,137 @@ static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
return ret;
}
+/*
+ * Read the CBM and check the validity. Make sure CBM is not shared
+ * with any other exclusive resctrl groups.
+ */
+static int resctrl_io_alloc_parse_cbm(char *buf, struct resctrl_schema *s,
+ struct rdt_ctrl_domain *d)
+{
+ struct resctrl_staged_config *cfg;
+ struct rdt_resource *r = s->res;
+ u32 io_alloc_closid;
+ u32 cbm_val;
+
+ cfg = &d->staged_config[s->conf_type];
+ if (cfg->have_new_ctrl) {
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
+ return -EINVAL;
+ }
+
+ if (!cbm_validate(buf, &cbm_val, r))
+ return -EINVAL;
+
+ /*
+ * The CBM may not overlap with other exclusive group.
+ */
+ io_alloc_closid = resctrl_io_alloc_closid_get(r, s);
+ if (rdtgroup_cbm_overlaps(s, d, cbm_val, io_alloc_closid, true)) {
+ rdt_last_cmd_puts("Overlaps with exclusive group\n");
+ return -EINVAL;
+ }
+
+ cfg->new_ctrl = cbm_val;
+ cfg->have_new_ctrl = true;
+
+ return 0;
+}
+
+static int resctrl_io_alloc_parse_line(char *line, struct rdt_resource *r,
+ struct resctrl_schema *s)
+{
+ struct rdt_ctrl_domain *d;
+ char *dom = NULL, *id;
+ unsigned long dom_id;
+
+next:
+ if (!line || line[0] == '\0')
+ return 0;
+
+ dom = strsep(&line, ";");
+ id = strsep(&dom, "=");
+ if (!dom || kstrtoul(id, 10, &dom_id)) {
+ rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
+ return -EINVAL;
+ }
+
+ dom = strim(dom);
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
+ if (d->hdr.id == dom_id) {
+ if (resctrl_io_alloc_parse_cbm(dom, s, d))
+ return -EINVAL;
+ goto next;
+ }
+ }
+ return -EINVAL;
+}
+
+static ssize_t resctrl_io_alloc_cbm_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct resctrl_schema *s = of->kn->parent->priv;
+ struct rdt_resource *r = s->res;
+ u32 io_alloc_closid;
+ char *resname;
+ int ret = 0;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+
+ buf[nbytes - 1] = '\0';
+
+ if (!r->cache.io_alloc_capable || s->conf_type == CDP_DATA) {
+ rdt_last_cmd_puts("io_alloc feature is not supported on the resource\n");
+ return -EINVAL;
+ }
+
+ cpus_read_lock();
+ mutex_lock(&rdtgroup_mutex);
+
+ rdt_last_cmd_clear();
+ rdt_staged_configs_clear();
+
+ if (!resctrl_arch_get_io_alloc_enabled(r)) {
+ rdt_last_cmd_puts("io_alloc feature is not enabled\n");
+ ret = -EINVAL;
+ goto cbm_write_out;
+ }
+
+ resname = strim(strsep(&buf, ":"));
+ if (!buf) {
+ rdt_last_cmd_puts("Missing ':'\n");
+ ret = -EINVAL;
+ goto cbm_write_out;
+ }
+
+ if (strcmp(resname, s->name)) {
+ rdt_last_cmd_printf("Unsupported resource name '%s'\n", resname);
+ ret = -EINVAL;
+ goto cbm_write_out;
+ }
+
+ if (buf[0] == '\0') {
+ rdt_last_cmd_printf("Missing '%s' value\n", resname);
+ ret = -EINVAL;
+ goto cbm_write_out;
+ }
+
+ ret = resctrl_io_alloc_parse_line(buf, r, s);
+ if (ret)
+ goto cbm_write_out;
+
+ io_alloc_closid = resctrl_io_alloc_closid_get(r, s);
+ ret = resctrl_arch_update_domains(r, io_alloc_closid);
+
+cbm_write_out:
+ rdt_staged_configs_clear();
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+
+ return ret ?: nbytes;
+}
+
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
@@ -2160,9 +2291,10 @@ static struct rftype res_common_files[] = {
},
{
.name = "io_alloc_cbm",
- .mode = 0444,
+ .mode = 0644,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = resctrl_io_alloc_cbm_show,
+ .write = resctrl_io_alloc_cbm_write,
},
{
.name = "mba_MBps_event",
--
2.34.1
Hi Babu,
On 1/30/25 1:20 PM, Babu Moger wrote:
> "io_alloc" feature is a mechanism that enables direct insertion of data
> from I/O devices into the L3 cache. By directly caching data from I/O
> devices rather than first storing the I/O data in DRAM, it reduces the
> demands on DRAM bandwidth and reduces latency to the processor consuming
> the I/O data.
>
> io_alloc feature uses the highest CLOSID to route the traffic from I/O
> devices. Provide the interface to modify io_alloc CBMs (Capacity Bit Mask)
> when feature is enabled.
>
> Signed-off-by: Babu Moger <babu.moger@amd.com>
> ---
> v3: Minor changes due to changes in resctrl_arch_get_io_alloc_enabled()
> and resctrl_io_alloc_closid_get().
> Taken care of handling the CBM update when CDP is enabled.
> Updated the commit log to make it generic.
>
> v2: Added more generic text in documentation.
> ---
> Documentation/arch/x86/resctrl.rst | 12 ++
> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
> arch/x86/kernel/cpu/resctrl/internal.h | 1 +
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 134 +++++++++++++++++++++-
> 4 files changed, 147 insertions(+), 2 deletions(-)
>
> diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
> index 1b67e31d626c..29c8851bcc7f 100644
> --- a/Documentation/arch/x86/resctrl.rst
> +++ b/Documentation/arch/x86/resctrl.rst
> @@ -169,6 +169,18 @@ related to allocation:
> When CDP is enabled, io_alloc routes I/O traffic using the highest
> CLOSID allocated for the instruction cache.
>
> +"io_alloc_cbm":
> + Capacity Bit Masks (CBMs) available to supported IO devices which
> + can directly insert cache lines in L3 which can help to reduce the
> + latency. CBM can be configured by writing to the interface in the
> + following format::
> +
> + L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
This format is dependent on the resource name (not always L3).
> +
> + When CDP is enabled, L3 control is divided into two separate resources:
> + L3CODE and L3DATA. However, the CBM can only be updated on the L3CODE
> + resource.
> +
> Memory bandwidth(MB) subdirectory contains the following files
> with respect to allocation:
>
> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> index d272dea43924..4dfee0436c1c 100644
> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
> @@ -102,7 +102,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
> * requires at least two bits set.
> * AMD allows non-contiguous bitmasks.
> */
> -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
> +bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
> {
> unsigned long first_bit, zero_bit, val;
> unsigned int cbm_len = r->cache.cbm_len;
> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
> index 07cf8409174d..702f6926bbdf 100644
> --- a/arch/x86/kernel/cpu/resctrl/internal.h
> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
> @@ -669,4 +669,5 @@ void rdt_staged_configs_clear(void);
> bool closid_allocated(unsigned int closid);
> int resctrl_find_cleanest_closid(void);
> void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid);
> +bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r);
> #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 81b9d8c5dabf..9997cbfc1c19 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -1999,6 +1999,137 @@ static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
> return ret;
> }
>
> +/*
> + * Read the CBM and check the validity. Make sure CBM is not shared
> + * with any other exclusive resctrl groups.
> + */
> +static int resctrl_io_alloc_parse_cbm(char *buf, struct resctrl_schema *s,
> + struct rdt_ctrl_domain *d)
> +{
> + struct resctrl_staged_config *cfg;
> + struct rdt_resource *r = s->res;
> + u32 io_alloc_closid;
> + u32 cbm_val;
> +
> + cfg = &d->staged_config[s->conf_type];
> + if (cfg->have_new_ctrl) {
> + rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
> + return -EINVAL;
> + }
> +
> + if (!cbm_validate(buf, &cbm_val, r))
> + return -EINVAL;
> +
> + /*
> + * The CBM may not overlap with other exclusive group.
> + */
> + io_alloc_closid = resctrl_io_alloc_closid_get(r, s);
> + if (rdtgroup_cbm_overlaps(s, d, cbm_val, io_alloc_closid, true)) {
> + rdt_last_cmd_puts("Overlaps with exclusive group\n");
> + return -EINVAL;
> + }
> +
> + cfg->new_ctrl = cbm_val;
> + cfg->have_new_ctrl = true;
> +
> + return 0;
> +}
Could you please reduce amount of duplication with parse_cbm()?
(for rest of patch, please check that related comments from previous patches
are addressed here also)
Reinette
Hi Reinette,
On 3/21/25 18:00, Reinette Chatre wrote:
> Hi Babu,
>
> On 1/30/25 1:20 PM, Babu Moger wrote:
>> "io_alloc" feature is a mechanism that enables direct insertion of data
>> from I/O devices into the L3 cache. By directly caching data from I/O
>> devices rather than first storing the I/O data in DRAM, it reduces the
>> demands on DRAM bandwidth and reduces latency to the processor consuming
>> the I/O data.
>>
>> io_alloc feature uses the highest CLOSID to route the traffic from I/O
>> devices. Provide the interface to modify io_alloc CBMs (Capacity Bit Mask)
>> when feature is enabled.
>>
>> Signed-off-by: Babu Moger <babu.moger@amd.com>
>> ---
>> v3: Minor changes due to changes in resctrl_arch_get_io_alloc_enabled()
>> and resctrl_io_alloc_closid_get().
>> Taken care of handling the CBM update when CDP is enabled.
>> Updated the commit log to make it generic.
>>
>> v2: Added more generic text in documentation.
>> ---
>> Documentation/arch/x86/resctrl.rst | 12 ++
>> arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +-
>> arch/x86/kernel/cpu/resctrl/internal.h | 1 +
>> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 134 +++++++++++++++++++++-
>> 4 files changed, 147 insertions(+), 2 deletions(-)
>>
>> diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst
>> index 1b67e31d626c..29c8851bcc7f 100644
>> --- a/Documentation/arch/x86/resctrl.rst
>> +++ b/Documentation/arch/x86/resctrl.rst
>> @@ -169,6 +169,18 @@ related to allocation:
>> When CDP is enabled, io_alloc routes I/O traffic using the highest
>> CLOSID allocated for the instruction cache.
>>
>> +"io_alloc_cbm":
>> + Capacity Bit Masks (CBMs) available to supported IO devices which
>> + can directly insert cache lines in L3 which can help to reduce the
>> + latency. CBM can be configured by writing to the interface in the
>> + following format::
>> +
>> + L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
>
> This format is dependent on the resource name (not always L3).
Yes. Will remove "L3:"
>
>> +
>> + When CDP is enabled, L3 control is divided into two separate resources:
>> + L3CODE and L3DATA. However, the CBM can only be updated on the L3CODE
>> + resource.
>> +
>> Memory bandwidth(MB) subdirectory contains the following files
>> with respect to allocation:
>>
>> diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> index d272dea43924..4dfee0436c1c 100644
>> --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
>> @@ -102,7 +102,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
>> * requires at least two bits set.
>> * AMD allows non-contiguous bitmasks.
>> */
>> -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
>> +bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
>> {
>> unsigned long first_bit, zero_bit, val;
>> unsigned int cbm_len = r->cache.cbm_len;
>> diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
>> index 07cf8409174d..702f6926bbdf 100644
>> --- a/arch/x86/kernel/cpu/resctrl/internal.h
>> +++ b/arch/x86/kernel/cpu/resctrl/internal.h
>> @@ -669,4 +669,5 @@ void rdt_staged_configs_clear(void);
>> bool closid_allocated(unsigned int closid);
>> int resctrl_find_cleanest_closid(void);
>> void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid);
>> +bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r);
>> #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
>> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> index 81b9d8c5dabf..9997cbfc1c19 100644
>> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
>> @@ -1999,6 +1999,137 @@ static int resctrl_io_alloc_cbm_show(struct kernfs_open_file *of,
>> return ret;
>> }
>>
>> +/*
>> + * Read the CBM and check the validity. Make sure CBM is not shared
>> + * with any other exclusive resctrl groups.
>> + */
>> +static int resctrl_io_alloc_parse_cbm(char *buf, struct resctrl_schema *s,
>> + struct rdt_ctrl_domain *d)
>> +{
>> + struct resctrl_staged_config *cfg;
>> + struct rdt_resource *r = s->res;
>> + u32 io_alloc_closid;
>> + u32 cbm_val;
>> +
>> + cfg = &d->staged_config[s->conf_type];
>> + if (cfg->have_new_ctrl) {
>> + rdt_last_cmd_printf("Duplicate domain %d\n", d->hdr.id);
>> + return -EINVAL;
>> + }
>> +
>> + if (!cbm_validate(buf, &cbm_val, r))
>> + return -EINVAL;
>> +
>> + /*
>> + * The CBM may not overlap with other exclusive group.
>> + */
>> + io_alloc_closid = resctrl_io_alloc_closid_get(r, s);
>> + if (rdtgroup_cbm_overlaps(s, d, cbm_val, io_alloc_closid, true)) {
>> + rdt_last_cmd_puts("Overlaps with exclusive group\n");
>> + return -EINVAL;
>> + }
>> +
>> + cfg->new_ctrl = cbm_val;
>> + cfg->have_new_ctrl = true;
>> +
>> + return 0;
>> +}
>
> Could you please reduce amount of duplication with parse_cbm()?
parse_cbm() needs rdtgrp to read 'mode' and 'closid' which is passed in
rdt_parse_data.
We can call parse_cbm directly if we add 'mode' and closid in
rdt_parse_data. Will add those changes in next revision.
>
> (for rest of patch, please check that related comments from previous patches
> are addressed here also)
Sure. Will do.
>
> Reinette
>
--
Thanks
Babu Moger
© 2016 - 2026 Red Hat, Inc.