Assign/unassign counters on resctrl group creation/deletion. Two counters
are required per group, one for MBM total event and one for MBM local
event.
There are a limited number of counters available for assignment. If these
counters are exhausted, the kernel will display the error message: "Out of
MBM assignable counters". However, it is not necessary to fail the
creation of a group due to assignment failures. Users have the flexibility
to modify the assignments at a later time.
Signed-off-by: Babu Moger <babu.moger@amd.com>
---
v8: Renamed rdtgroup_assign_grp to rdtgroup_assign_cntrs.
Renamed rdtgroup_unassign_grp to rdtgroup_unassign_cntrs.
Fixed the problem with unassigning the child MON groups of CTRL_MON group.
v7: Reworded the commit message.
Removed the reference of ABMC with mbm_cntr_assign.
Renamed the function rdtgroup_assign_cntrs to rdtgroup_assign_grp.
v6: Removed the redundant comments on all the calls of
rdtgroup_assign_cntrs. Updated the commit message.
Dropped printing error message on every call of rdtgroup_assign_cntrs.
v5: Removed the code to enable/disable ABMC during the mount.
That will be another patch.
Added arch callers to get the arch specific data.
Renamed fuctions to match the other abmc function.
Added code comments for assignment failures.
v4: Few name changes based on the upstream discussion.
Commit message update.
v3: This is a new patch. Patch addresses the upstream comment to enable
ABMC feature by default if the feature is available.
---
arch/x86/kernel/cpu/resctrl/rdtgroup.c | 64 ++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 791258adcbda..cb2c60c0319e 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2875,6 +2875,52 @@ static void schemata_list_destroy(void)
}
}
+/*
+ * Called when a new group is created. If `mbm_cntr_assign` mode is enabled,
+ * counters are automatically assigned. Each group requires two counters:
+ * one for the total event and one for the local event. Due to the limited
+ * number of counters, assignments may fail in some cases. However, it is
+ * not necessary to fail the group creation. Users have the option to
+ * modify the assignments after the group has been created.
+ */
+static int rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ int ret = 0;
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r))
+ return 0;
+
+ if (is_mbm_total_enabled())
+ ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID);
+
+ if (!ret && is_mbm_local_enabled())
+ ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID);
+
+ return ret;
+}
+
+/*
+ * Called when a group is deleted. Counters are unassigned if it was in
+ * assigned state.
+ */
+static int rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ int ret = 0;
+
+ if (!resctrl_arch_mbm_cntr_assign_enabled(r))
+ return 0;
+
+ if (is_mbm_total_enabled())
+ ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID);
+
+ if (!ret && is_mbm_local_enabled())
+ ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID);
+
+ return ret;
+}
+
static int rdt_get_tree(struct fs_context *fc)
{
struct rdt_fs_context *ctx = rdt_fc2context(fc);
@@ -2934,6 +2980,8 @@ static int rdt_get_tree(struct fs_context *fc)
if (ret < 0)
goto out_mongrp;
rdtgroup_default.mon.mon_data_kn = kn_mondata;
+
+ rdtgroup_assign_cntrs(&rdtgroup_default);
}
ret = rdt_pseudo_lock_init();
@@ -2964,6 +3012,7 @@ static int rdt_get_tree(struct fs_context *fc)
out_psl:
rdt_pseudo_lock_release();
out_mondata:
+ rdtgroup_unassign_cntrs(&rdtgroup_default);
if (resctrl_arch_mon_capable())
kernfs_remove(kn_mondata);
out_mongrp:
@@ -3144,6 +3193,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
head = &rdtgrp->mon.crdtgrp_list;
list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
+ rdtgroup_unassign_cntrs(sentry);
free_rmid(sentry->closid, sentry->mon.rmid);
list_del(&sentry->mon.crdtgrp_list);
@@ -3184,6 +3234,8 @@ static void rmdir_all_sub(void)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
kernfs_remove(rdtgrp->kn);
@@ -3223,6 +3275,8 @@ static void rdt_kill_sb(struct super_block *sb)
resctrl_arch_disable_alloc();
if (resctrl_arch_mon_capable())
resctrl_arch_disable_mon();
+
+ rdtgroup_unassign_cntrs(&rdtgroup_default);
resctrl_mounted = false;
kernfs_kill_sb(sb);
mutex_unlock(&rdtgroup_mutex);
@@ -3814,6 +3868,8 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
goto out_unlock;
}
+ rdtgroup_assign_cntrs(rdtgrp);
+
kernfs_activate(rdtgrp->kn);
/*
@@ -3858,6 +3914,8 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
if (ret)
goto out_closid_free;
+ rdtgroup_assign_cntrs(rdtgrp);
+
kernfs_activate(rdtgrp->kn);
ret = rdtgroup_init_alloc(rdtgrp);
@@ -3883,6 +3941,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
out_del_list:
list_del(&rdtgrp->rdtgroup_list);
out_rmid_free:
+ rdtgroup_unassign_cntrs(rdtgrp);
mkdir_rdt_prepare_rmid_free(rdtgrp);
out_closid_free:
closid_free(closid);
@@ -3953,6 +4012,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
update_closid_rmid(tmpmask, NULL);
rdtgrp->flags = RDT_DELETED;
+
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
/*
@@ -3999,6 +4061,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid_rmid(tmpmask, NULL);
+ rdtgroup_unassign_cntrs(rdtgrp);
+
free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
closid_free(rdtgrp->closid);
--
2.34.1
Hi Babu, On 10/9/24 10:39 AM, Babu Moger wrote: > Assign/unassign counters on resctrl group creation/deletion. Two counters > are required per group, one for MBM total event and one for MBM local > event. > > There are a limited number of counters available for assignment. If these > counters are exhausted, the kernel will display the error message: "Out of > MBM assignable counters". However, it is not necessary to fail the > creation of a group due to assignment failures. Users have the flexibility > to modify the assignments at a later time. > > Signed-off-by: Babu Moger <babu.moger@amd.com> > --- ... > --- > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 64 ++++++++++++++++++++++++++ > 1 file changed, 64 insertions(+) > > diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > index 791258adcbda..cb2c60c0319e 100644 > --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c > +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c ... > static int rdt_get_tree(struct fs_context *fc) > { > struct rdt_fs_context *ctx = rdt_fc2context(fc); > @@ -2934,6 +2980,8 @@ static int rdt_get_tree(struct fs_context *fc) > if (ret < 0) > goto out_mongrp; > rdtgroup_default.mon.mon_data_kn = kn_mondata; > + > + rdtgroup_assign_cntrs(&rdtgroup_default); > } > > ret = rdt_pseudo_lock_init(); > @@ -2964,6 +3012,7 @@ static int rdt_get_tree(struct fs_context *fc) > out_psl: > rdt_pseudo_lock_release(); > out_mondata: > + rdtgroup_unassign_cntrs(&rdtgroup_default); > if (resctrl_arch_mon_capable()) > kernfs_remove(kn_mondata); I think I mentioned this before ... this addition belongs within the "if (resctrl_arch_mon_capable())" to be symmetrical with where it was called from. > out_mongrp: > @@ -3144,6 +3193,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) > > head = &rdtgrp->mon.crdtgrp_list; > list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { > + rdtgroup_unassign_cntrs(sentry); > free_rmid(sentry->closid, sentry->mon.rmid); > list_del(&sentry->mon.crdtgrp_list); > > @@ -3184,6 +3234,8 @@ static void rmdir_all_sub(void) > cpumask_or(&rdtgroup_default.cpu_mask, > &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); > > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > > kernfs_remove(rdtgrp->kn); > @@ -3223,6 +3275,8 @@ static void rdt_kill_sb(struct super_block *sb) > resctrl_arch_disable_alloc(); > if (resctrl_arch_mon_capable()) > resctrl_arch_disable_mon(); > + > + rdtgroup_unassign_cntrs(&rdtgroup_default); Unassigning counters after monitoring is completely disabled seems late. I think this can be moved earlier to be right after the counters of all the other groups are unassigned. > resctrl_mounted = false; > kernfs_kill_sb(sb); > mutex_unlock(&rdtgroup_mutex); > @@ -3814,6 +3868,8 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, > goto out_unlock; > } > > + rdtgroup_assign_cntrs(rdtgrp); > + > kernfs_activate(rdtgrp->kn); > > /* > @@ -3858,6 +3914,8 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, > if (ret) > goto out_closid_free; > > + rdtgroup_assign_cntrs(rdtgrp); > + > kernfs_activate(rdtgrp->kn); > > ret = rdtgroup_init_alloc(rdtgrp); > @@ -3883,6 +3941,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, > out_del_list: > list_del(&rdtgrp->rdtgroup_list); > out_rmid_free: > + rdtgroup_unassign_cntrs(rdtgrp); > mkdir_rdt_prepare_rmid_free(rdtgrp); > out_closid_free: > closid_free(closid); > @@ -3953,6 +4012,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) > update_closid_rmid(tmpmask, NULL); > > rdtgrp->flags = RDT_DELETED; > + > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > > /* > @@ -3999,6 +4061,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) > cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); > update_closid_rmid(tmpmask, NULL); > > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > closid_free(rdtgrp->closid); > Reinette
Hi Reinette, On 10/15/2024 10:30 PM, Reinette Chatre wrote: > Hi Babu, > > On 10/9/24 10:39 AM, Babu Moger wrote: >> Assign/unassign counters on resctrl group creation/deletion. Two counters >> are required per group, one for MBM total event and one for MBM local >> event. >> >> There are a limited number of counters available for assignment. If these >> counters are exhausted, the kernel will display the error message: "Out of >> MBM assignable counters". However, it is not necessary to fail the >> creation of a group due to assignment failures. Users have the flexibility >> to modify the assignments at a later time. >> >> Signed-off-by: Babu Moger <babu.moger@amd.com> >> --- > ... > >> --- >> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 64 ++++++++++++++++++++++++++ >> 1 file changed, 64 insertions(+) >> >> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c >> index 791258adcbda..cb2c60c0319e 100644 >> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c >> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > > ... > >> static int rdt_get_tree(struct fs_context *fc) >> { >> struct rdt_fs_context *ctx = rdt_fc2context(fc); >> @@ -2934,6 +2980,8 @@ static int rdt_get_tree(struct fs_context *fc) >> if (ret < 0) >> goto out_mongrp; >> rdtgroup_default.mon.mon_data_kn = kn_mondata; >> + >> + rdtgroup_assign_cntrs(&rdtgroup_default); >> } >> >> ret = rdt_pseudo_lock_init(); >> @@ -2964,6 +3012,7 @@ static int rdt_get_tree(struct fs_context *fc) >> out_psl: >> rdt_pseudo_lock_release(); >> out_mondata: >> + rdtgroup_unassign_cntrs(&rdtgroup_default); >> if (resctrl_arch_mon_capable()) >> kernfs_remove(kn_mondata); > > I think I mentioned this before ... this addition belongs within the > "if (resctrl_arch_mon_capable())" to be symmetrical with where it was called from. Sure. > >> out_mongrp: >> @@ -3144,6 +3193,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) >> >> head = &rdtgrp->mon.crdtgrp_list; >> list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { >> + rdtgroup_unassign_cntrs(sentry); >> free_rmid(sentry->closid, sentry->mon.rmid); >> list_del(&sentry->mon.crdtgrp_list); >> >> @@ -3184,6 +3234,8 @@ static void rmdir_all_sub(void) >> cpumask_or(&rdtgroup_default.cpu_mask, >> &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); >> >> + rdtgroup_unassign_cntrs(rdtgrp); >> + >> free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); >> >> kernfs_remove(rdtgrp->kn); >> @@ -3223,6 +3275,8 @@ static void rdt_kill_sb(struct super_block *sb) >> resctrl_arch_disable_alloc(); >> if (resctrl_arch_mon_capable()) >> resctrl_arch_disable_mon(); >> + >> + rdtgroup_unassign_cntrs(&rdtgroup_default); > > Unassigning counters after monitoring is completely disabled seems late. I > think this can be moved earlier to be right after the counters of all the > other groups are unassigned. Sure. Right after rmdir_all_sub(). > >> resctrl_mounted = false; >> kernfs_kill_sb(sb); >> mutex_unlock(&rdtgroup_mutex); >> @@ -3814,6 +3868,8 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, >> goto out_unlock; >> } >> >> + rdtgroup_assign_cntrs(rdtgrp); >> + >> kernfs_activate(rdtgrp->kn); >> >> /* >> @@ -3858,6 +3914,8 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, >> if (ret) >> goto out_closid_free; >> >> + rdtgroup_assign_cntrs(rdtgrp); >> + >> kernfs_activate(rdtgrp->kn); >> >> ret = rdtgroup_init_alloc(rdtgrp); >> @@ -3883,6 +3941,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, >> out_del_list: >> list_del(&rdtgrp->rdtgroup_list); >> out_rmid_free: >> + rdtgroup_unassign_cntrs(rdtgrp); >> mkdir_rdt_prepare_rmid_free(rdtgrp); >> out_closid_free: >> closid_free(closid); >> @@ -3953,6 +4012,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) >> update_closid_rmid(tmpmask, NULL); >> >> rdtgrp->flags = RDT_DELETED; >> + >> + rdtgroup_unassign_cntrs(rdtgrp); >> + >> free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); >> >> /* >> @@ -3999,6 +4061,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) >> cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); >> update_closid_rmid(tmpmask, NULL); >> >> + rdtgroup_unassign_cntrs(rdtgrp); >> + >> free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); >> closid_free(rdtgrp->closid); >> > > Reinette > Thanks - Babu Moger
On Wed, Oct 09, 2024 at 12:39:44PM -0500, Babu Moger wrote: > +/* > + * Called when a new group is created. If `mbm_cntr_assign` mode is enabled, > + * counters are automatically assigned. Each group requires two counters: > + * one for the total event and one for the local event. Due to the limited > + * number of counters, assignments may fail in some cases. However, it is > + * not necessary to fail the group creation. Users have the option to > + * modify the assignments after the group has been created. > + */ > +static int rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) > +{ > + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > + int ret = 0; > + > + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > + return 0; > + > + if (is_mbm_total_enabled()) > + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > + > + if (!ret && is_mbm_local_enabled()) > + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); This overwrites the value from allocating the counter for total event. > + > + return ret; But none of the callers check the return. Indeed it is ok (and expected) that counter allocation can fail. Just make this a "void" function and delete the "ret" local variable. > +} > + > +/* > + * Called when a group is deleted. Counters are unassigned if it was in > + * assigned state. > + */ > +static int rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp) > +{ > + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > + int ret = 0; > + > + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > + return 0; > + > + if (is_mbm_total_enabled()) > + ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > + > + if (!ret && is_mbm_local_enabled()) > + ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > + > + return ret; Ditto. No caller checks. Make this a void function. Dig down the call chain here. It looks like rdtgroup_unassign_cntr_event() can't fail, so it should be a void function too. Ditto resctrl_arch_config_cntr() > +} -Tony
Hi Tony, On 10/11/2024 12:17 PM, Tony Luck wrote: > On Wed, Oct 09, 2024 at 12:39:44PM -0500, Babu Moger wrote: >> +/* >> + * Called when a new group is created. If `mbm_cntr_assign` mode is enabled, >> + * counters are automatically assigned. Each group requires two counters: >> + * one for the total event and one for the local event. Due to the limited >> + * number of counters, assignments may fail in some cases. However, it is >> + * not necessary to fail the group creation. Users have the option to >> + * modify the assignments after the group has been created. >> + */ >> +static int rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) >> +{ >> + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; >> + int ret = 0; >> + >> + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) >> + return 0; >> + >> + if (is_mbm_total_enabled()) >> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); >> + >> + if (!ret && is_mbm_local_enabled()) >> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > This overwrites the value from allocating the counter for total event. Total event and local events have two different indexes. Can you please elaborate? > >> + >> + return ret; > > But none of the callers check the return. Indeed it is ok (and > expected) that counter allocation can fail. > > Just make this a "void" function and delete the "ret" local variable. Comment below. >> +} >> + >> +/* >> + * Called when a group is deleted. Counters are unassigned if it was in >> + * assigned state. >> + */ >> +static int rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp) >> +{ >> + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; >> + int ret = 0; >> + >> + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) >> + return 0; >> + >> + if (is_mbm_total_enabled()) >> + ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); >> + >> + if (!ret && is_mbm_local_enabled()) >> + ret = rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); >> + >> + return ret; > > Ditto. No caller checks. Make this a void function. Dig down the > call chain here. It looks like rdtgroup_unassign_cntr_event() can't > fail, so it should be a void function too. Ditto resctrl_arch_config_cntr() It was started a void function. In this case all the call sequence return 0. There is a possibility that other architectures can return failure(in arch calls resctrl_arch_config_cntr()). Keeping that in mind we added the check to handle the return values. Hope that helps. Thanks -- - Babu Moger
> >> +static int rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) > >> +{ > >> + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > >> + int ret = 0; > >> + > >> + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > >> + return 0; > >> + > >> + if (is_mbm_total_enabled()) > >> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); Consider that this call fails. "ret" indicates failure to allocate. > >> + > >> + if (!ret && is_mbm_local_enabled()) > >> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); Now this call succeeds. The failure of the previous call is forgotten as "ret" is overwritten with the success code. > > > > This overwrites the value from allocating the counter for total event. > > Total event and local events have two different indexes. > Can you please elaborate? See comments above. If you want a return code you need int ret_local = 0, ret_total = 0; if (is_mbm_total_enabled()) ret_total = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); if (!ret && is_mbm_local_enabled()) ret_local = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); return some_function of ret_local and ret_total; Not sure if you want to say success only if both of these calls succeeded. Or maybe if either worked? But it all seems complicated since callers don't have to take any different action depending on whether allocation of a counter succeeds or fails. -Tony
Hi Tony, On 10/11/24 16:33, Luck, Tony wrote: >>>> +static int rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) >>>> +{ >>>> + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; >>>> + int ret = 0; >>>> + >>>> + if (!resctrl_arch_mbm_cntr_assign_enabled(r)) >>>> + return 0; >>>> + >>>> + if (is_mbm_total_enabled()) >>>> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > > Consider that this call fails. "ret" indicates failure to allocate. Look at this call if (is_mbm_total_enabled()) ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); If this call fails, it will return immediately. Lets say ret = 1; (1 if for failure. 0 for success) > >>>> + >>>> + if (!ret && is_mbm_local_enabled()) >>>> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > Now this call succeeds. The failure of the previous call is forgotten as "ret" is > overwritten with the success code. It will not make this call if the first call fails because of this check. if (!ret && is_mbm_local_enabled()) ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); return ret; Here if (!1) evaluates to false. Did I miss something? > >>> >>> This overwrites the value from allocating the counter for total event. >> >> Total event and local events have two different indexes. >> Can you please elaborate? > > See comments above. If you want a return code you need > > int ret_local = 0, ret_total = 0; > > if (is_mbm_total_enabled()) > ret_total = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > if (!ret && is_mbm_local_enabled()) > ret_local = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > > return some_function of ret_local and ret_total; > > Not sure if you want to say success only if both of these calls succeeded. Or maybe if either worked? > > But it all seems complicated since callers don't have to take any different action depending on whether allocation of a counter succeeds or fails. > > -Tony > > > -- Thanks Babu Moger
> >>>> + if (!ret && is_mbm_local_enabled()) > >>>> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > > > Now this call succeeds. The failure of the previous call is forgotten as "ret" is > > overwritten with the success code. > > It will not make this call if the first call fails because of this check. > > if (!ret && is_mbm_local_enabled()) > ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, > QOS_L3_MBM_LOCAL_EVENT_ID); > > return ret; > > Here if (!1) evaluates to false. > > Did I miss something? You didn't. I missed the check for ret in the local case. It is still the case that callers don't care about the return value. -Tony
On 12/31/69 18:00, Luck, Tony wrote: >>>>>> + if (!ret && is_mbm_local_enabled()) >>>>>> + ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); >>> >>> Now this call succeeds. The failure of the previous call is forgotten as "ret" is >>> overwritten with the success code. >> >> It will not make this call if the first call fails because of this check. >> >> if (!ret && is_mbm_local_enabled()) >> ret = rdtgroup_assign_cntr_event(r, rdtgrp, NULL, >> QOS_L3_MBM_LOCAL_EVENT_ID); >> >> return ret; >> >> Here if (!1) evaluates to false. >> >> Did I miss something? > > You didn't. > > I missed the check for ret in the local case. That is fine, > > It is still the case that callers don't care about the return value. That is correct. -- Thanks Babu Moger
Hi Babu, On 10/14/24 9:35 AM, Moger, Babu wrote: > On 12/31/69 18:00, Luck, Tony wrote: >> >> It is still the case that callers don't care about the return value. > > That is correct. > Are you planning to change this? I think Tony has a good point that since assignment failures do not matter it unnecessarily complicates the code to have rdtgroup_assign_cntrs() return failure. I also think the internals of rdtgroup_assign_cntrs() deserve a closer look. I assume that error handling within rdtgroup_assign_cntrs() was created with ABMC in mind. When only considering ABMC then the only reason why rdtgroup_assign_cntr_event() could fail is if the system ran out of counters and then indeed it makes no sense to attempt another call to rdtgroup_assign_cntr_event(). Now that the resctrl fs/arch split is clear the implementation does indeed expose another opportunity for failure ... if the arch callback, resctrl_arch_config_cntr() fails. It could thus be possible for the first rdtgroup_assign_cntr_event() to fail while the second succeeds. Earlier [1], Tony suggested to, within rdtgroup_assign_cntrs(), remove the local ret variable and have it return void. This sounds good to me. When doing so a function comment explaining the usage will be helpful. I also think that rdtgroup_unassign_cntrs() deserves similar scrutiny. Even more so since I do not think that the second rdtgroup_unassign_cntr_event() should be prevented from running if the first rdtgroup_unassign_cntr_event() fails. Reinette [1] https://lore.kernel.org/all/ZwldvDBjEA3TSw2k@agluck-desk3.sc.intel.com/
Hi Reinette/Tony, On 10/14/24 21:39, wrote: > Hi Babu, > > On 10/14/24 9:35 AM, Moger, Babu wrote: >> On 12/31/69 18:00, Luck, Tony wrote: > >>> >>> It is still the case that callers don't care about the return value. >> >> That is correct. >> > > Are you planning to change this? I think Tony has a good point that since > assignment failures do not matter it unnecessarily complicates the code to > have rdtgroup_assign_cntrs() return failure. > > I also think the internals of rdtgroup_assign_cntrs() deserve a closer look. > I assume that error handling within rdtgroup_assign_cntrs() was created with > ABMC in mind. When only considering ABMC then the only reason why > rdtgroup_assign_cntr_event() could fail is if the system ran out of counters > and then indeed it makes no sense to attempt another call to rdtgroup_assign_cntr_event(). > > Now that the resctrl fs/arch split is clear the implementation does indeed expose > another opportunity for failure ... if the arch callback, resctrl_arch_config_cntr() > fails. It could thus be possible for the first rdtgroup_assign_cntr_event() to fail > while the second succeeds. Earlier [1], Tony suggested to, within rdtgroup_assign_cntrs(), > remove the local ret variable and have it return void. This sounds good to me. > When doing so a function comment explaining the usage will be helpful. > > I also think that rdtgroup_unassign_cntrs() deserves similar scrutiny. Even more > so since I do not think that the second rdtgroup_unassign_cntr_event() > should be prevented from running if the first rdtgroup_unassign_cntr_event() fails. Sounds fine with me. Now it will look like this below. static void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; if (!resctrl_arch_mbm_cntr_assign_enabled(r)) return; if (is_mbm_total_enabled()) rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); if (is_mbm_local_enabled()) rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); } /* * Called when a group is deleted. Counters are unassigned if it was in * assigned state. */ static void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; if (!resctrl_arch_mbm_cntr_assign_enabled(r)) return; if (is_mbm_total_enabled()) rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); if (is_mbm_local_enabled()) rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); } > > Reinette > > [1] https://lore.kernel.org/all/ZwldvDBjEA3TSw2k@agluck-desk3.sc.intel.com/ > -- Thanks Babu Moger
Hi Babu, On 10/15/24 8:43 AM, Moger, Babu wrote: > Hi Reinette/Tony, > > On 10/14/24 21:39, wrote: >> Hi Babu, >> >> On 10/14/24 9:35 AM, Moger, Babu wrote: >>> On 12/31/69 18:00, Luck, Tony wrote: >> >>>> >>>> It is still the case that callers don't care about the return value. >>> >>> That is correct. >>> >> >> Are you planning to change this? I think Tony has a good point that since >> assignment failures do not matter it unnecessarily complicates the code to >> have rdtgroup_assign_cntrs() return failure. >> >> I also think the internals of rdtgroup_assign_cntrs() deserve a closer look. >> I assume that error handling within rdtgroup_assign_cntrs() was created with >> ABMC in mind. When only considering ABMC then the only reason why >> rdtgroup_assign_cntr_event() could fail is if the system ran out of counters >> and then indeed it makes no sense to attempt another call to rdtgroup_assign_cntr_event(). >> >> Now that the resctrl fs/arch split is clear the implementation does indeed expose >> another opportunity for failure ... if the arch callback, resctrl_arch_config_cntr() >> fails. It could thus be possible for the first rdtgroup_assign_cntr_event() to fail >> while the second succeeds. Earlier [1], Tony suggested to, within rdtgroup_assign_cntrs(), >> remove the local ret variable and have it return void. This sounds good to me. >> When doing so a function comment explaining the usage will be helpful. >> >> I also think that rdtgroup_unassign_cntrs() deserves similar scrutiny. Even more >> so since I do not think that the second rdtgroup_unassign_cntr_event() >> should be prevented from running if the first rdtgroup_unassign_cntr_event() fails. > > > Sounds fine with me. Now it will look like this below. Thank you for considering. > > I assume that you will keep rdtgroup_assign_cntrs() function comment? I think it may need some small changes to go with the function now returning void ... for example, saying "Each group *requires* two counters" and then not failing when two counters cannot be allocated seems suspect. For example (please feel free to improve): Called when a new group is created. If "mbm_cntr_assign" mode is enabled, counters are automatically assigned. Each group can accommodate two counters: one for the total event and one for the local event. Assignments may fail due to the limited number of counters. However, it is not necessary to fail the group creation and thus no failure is returned. Users have the option to modify the counter assignments after the group has been created. > static void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) > { > struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > > if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > return; > > if (is_mbm_total_enabled()) > rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > > if (is_mbm_local_enabled()) > rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > } > > /* > * Called when a group is deleted. Counters are unassigned if it was in > * assigned state. > */ > static void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp) > { > struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > > if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > return; > > if (is_mbm_total_enabled()) > rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > > if (is_mbm_local_enabled()) > rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > } Looks good to me, thank you. Reinette
Hi Reinette, On 10/15/24 12:18, wrote: > Hi Babu, > > On 10/15/24 8:43 AM, Moger, Babu wrote: >> Hi Reinette/Tony, >> >> On 10/14/24 21:39, wrote: >>> Hi Babu, >>> >>> On 10/14/24 9:35 AM, Moger, Babu wrote: >>>> On 12/31/69 18:00, Luck, Tony wrote: >>> >>>>> >>>>> It is still the case that callers don't care about the return value. >>>> >>>> That is correct. >>>> >>> >>> Are you planning to change this? I think Tony has a good point that since >>> assignment failures do not matter it unnecessarily complicates the code to >>> have rdtgroup_assign_cntrs() return failure. >>> >>> I also think the internals of rdtgroup_assign_cntrs() deserve a closer look. >>> I assume that error handling within rdtgroup_assign_cntrs() was created with >>> ABMC in mind. When only considering ABMC then the only reason why >>> rdtgroup_assign_cntr_event() could fail is if the system ran out of counters >>> and then indeed it makes no sense to attempt another call to rdtgroup_assign_cntr_event(). >>> >>> Now that the resctrl fs/arch split is clear the implementation does indeed expose >>> another opportunity for failure ... if the arch callback, resctrl_arch_config_cntr() >>> fails. It could thus be possible for the first rdtgroup_assign_cntr_event() to fail >>> while the second succeeds. Earlier [1], Tony suggested to, within rdtgroup_assign_cntrs(), >>> remove the local ret variable and have it return void. This sounds good to me. >>> When doing so a function comment explaining the usage will be helpful. >>> >>> I also think that rdtgroup_unassign_cntrs() deserves similar scrutiny. Even more >>> so since I do not think that the second rdtgroup_unassign_cntr_event() >>> should be prevented from running if the first rdtgroup_unassign_cntr_event() fails. >> >> >> Sounds fine with me. Now it will look like this below. > > Thank you for considering. > >> >> > > I assume that you will keep rdtgroup_assign_cntrs() function comment? I think > it may need some small changes to go with the function now returning void ... > for example, saying "Each group *requires* two counters" and then not failing when > two counters cannot be allocated seems suspect. > > For example (please feel free to improve): > > Called when a new group is created. If "mbm_cntr_assign" mode is enabled, > counters are automatically assigned. Each group can accommodate two counters: > one for the total event and one for the local event. Assignments may fail > due to the limited number of counters. However, it is not necessary to > fail the group creation and thus no failure is returned. Users have the > option to modify the counter assignments after the group has been created. > Looks good. Thanks -- Thanks Babu Moger
> Sounds fine with me. Now it will look like this below. > > > static void rdtgroup_assign_cntrs(struct rdtgroup *rdtgrp) > { > struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > > if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > return; > > if (is_mbm_total_enabled()) > rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > > if (is_mbm_local_enabled()) > rdtgroup_assign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > } > > /* > * Called when a group is deleted. Counters are unassigned if it was in > * assigned state. > */ > static void rdtgroup_unassign_cntrs(struct rdtgroup *rdtgrp) > { > struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; > > if (!resctrl_arch_mbm_cntr_assign_enabled(r)) > return; > > if (is_mbm_total_enabled()) > rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_TOTAL_EVENT_ID); > > if (is_mbm_local_enabled()) > rdtgroup_unassign_cntr_event(r, rdtgrp, NULL, QOS_L3_MBM_LOCAL_EVENT_ID); > > } Much cleaner. Thanks. -Tony
© 2016 - 2024 Red Hat, Inc.