[PATCH v2 25/45] arm_mpam: resctrl: Add support for 'MB' resource

Ben Horgan posted 45 patches 1 month, 3 weeks ago
There is a newer version of this series
[PATCH v2 25/45] arm_mpam: resctrl: Add support for 'MB' resource
Posted by Ben Horgan 1 month, 3 weeks ago
From: James Morse <james.morse@arm.com>

resctrl supports 'MB', as a percentage throttling of traffic somewhere
after the L3. This is the control that mba_sc uses, so ideally the class
chosen should be as close as possible to the counters used for mba_local.

MB's percentage control should be backed either with the fixed point
fraction MBW_MAX. The bandwidth portion bitmaps is not used as its tricky
to pick which bits to use to avoid contention, and may be possible to
expose this as something other than a percentage in the future.

CC: Zeng Heng <zengheng4@huawei.com>
Co-developed-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>>
Signed-off-by: Ben Horgan <ben.horgan@arm.com>
---
 drivers/resctrl/mpam_resctrl.c | 212 ++++++++++++++++++++++++++++++++-
 1 file changed, 211 insertions(+), 1 deletion(-)

diff --git a/drivers/resctrl/mpam_resctrl.c b/drivers/resctrl/mpam_resctrl.c
index a20656e49edc..e376841c5596 100644
--- a/drivers/resctrl/mpam_resctrl.c
+++ b/drivers/resctrl/mpam_resctrl.c
@@ -247,6 +247,33 @@ static bool cache_has_usable_cpor(struct mpam_class *class)
 	return class->props.cpbm_wd <= 32;
 }
 
+static bool mba_class_use_mbw_max(struct mpam_props *cprops)
+{
+	return (mpam_has_feature(mpam_feat_mbw_max, cprops) &&
+		cprops->bwa_wd);
+}
+
+static bool class_has_usable_mba(struct mpam_props *cprops)
+{
+	return mba_class_use_mbw_max(cprops);
+}
+
+/*
+ * Calculate the worst-case percentage change from each implemented step
+ * in the control.
+ */
+static u32 get_mba_granularity(struct mpam_props *cprops)
+{
+	if (!mba_class_use_mbw_max(cprops))
+		return 0;
+
+	/*
+	 * bwa_wd is the number of bits implemented in the 0.xxx
+	 * fixed point fraction. 1 bit is 50%, 2 is 25% etc.
+	 */
+	return DIV_ROUND_UP(MAX_MBA_BW, 1 << cprops->bwa_wd);
+}
+
 /*
  * Each fixed-point hardware value architecturally represents a range
  * of values: the full range 0% - 100% is split contiguously into
@@ -287,6 +314,96 @@ static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops)
 	return val;
 }
 
+static u32 get_mba_min(struct mpam_props *cprops)
+{
+	u32 val = 0;
+
+	if (mba_class_use_mbw_max(cprops))
+		val = mbw_max_to_percent(val, cprops);
+	else
+		WARN_ON_ONCE(1);
+
+	return val;
+}
+
+/* Find the L3 cache that has affinity with this CPU */
+static int find_l3_equivalent_bitmask(int cpu, cpumask_var_t tmp_cpumask)
+{
+	u32 cache_id = get_cpu_cacheinfo_id(cpu, 3);
+
+	lockdep_assert_cpus_held();
+
+	return mpam_get_cpumask_from_cache_id(cache_id, 3, tmp_cpumask);
+}
+
+/*
+ * topology_matches_l3() - Is the provided class the same shape as L3
+ * @victim:		The class we'd like to pretend is L3.
+ *
+ * resctrl expects all the world's a Xeon, and all counters are on the
+ * L3. We play fast and loose with this, mapping counters on other
+ * classes - provided the CPU->domain mapping is the same kind of shape.
+ *
+ * Using cacheinfo directly would make this work even if resctrl can't
+ * use the L3 - but cacheinfo can't tell us anything about offline CPUs.
+ * Using the L3 resctrl domain list also depends on CPUs being online.
+ * Using the mpam_class we picked for L3 so we can use its domain list
+ * assumes that there are MPAM controls on the L3.
+ * Instead, this path eventually uses the mpam_get_cpumask_from_cache_id()
+ * helper which can tell us about offline CPUs ... but getting the cache_id
+ * to start with relies on at least one CPU per L3 cache being online at
+ * boot.
+ *
+ * Walk the victim component list and compare the affinity mask with the
+ * corresponding L3. The topology matches if each victim:component's affinity
+ * mask is the same as the CPU's corresponding L3's. These lists/masks are
+ * computed from firmware tables so don't change at runtime.
+ */
+static bool topology_matches_l3(struct mpam_class *victim)
+{
+	int cpu, err;
+	struct mpam_component *victim_iter;
+	cpumask_var_t __free(free_cpumask_var) tmp_cpumask;
+
+	if (!alloc_cpumask_var(&tmp_cpumask, GFP_KERNEL))
+		return false;
+
+	guard(srcu)(&mpam_srcu);
+	list_for_each_entry_srcu(victim_iter, &victim->components, class_list,
+				 srcu_read_lock_held(&mpam_srcu)) {
+		if (cpumask_empty(&victim_iter->affinity)) {
+			pr_debug("class %u has CPU-less component %u - can't match L3!\n",
+				 victim->level, victim_iter->comp_id);
+			return false;
+		}
+
+		cpu = cpumask_any(&victim_iter->affinity);
+		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+			return false;
+
+		cpumask_clear(tmp_cpumask);
+		err = find_l3_equivalent_bitmask(cpu, tmp_cpumask);
+		if (err) {
+			pr_debug("Failed to find L3's equivalent component to class %u component %u\n",
+				 victim->level, victim_iter->comp_id);
+			return false;
+		}
+
+		/* Any differing bits in the affinity mask? */
+		if (!cpumask_equal(tmp_cpumask, &victim_iter->affinity)) {
+			pr_debug("class %u component %u has Mismatched CPU mask with L3 equivalent\n"
+				 "L3:%*pbl != victim:%*pbl\n",
+				 victim->level, victim_iter->comp_id,
+				 cpumask_pr_args(tmp_cpumask),
+				 cpumask_pr_args(&victim_iter->affinity));
+
+			return false;
+		}
+	}
+
+	return true;
+}
+
 /* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */
 static void mpam_resctrl_pick_caches(void)
 {
@@ -329,10 +446,63 @@ static void mpam_resctrl_pick_caches(void)
 	}
 }
 
+static void mpam_resctrl_pick_mba(void)
+{
+	struct mpam_class *class, *candidate_class = NULL;
+	struct mpam_resctrl_res *res;
+
+	lockdep_assert_cpus_held();
+
+	guard(srcu)(&mpam_srcu);
+	list_for_each_entry_srcu(class, &mpam_classes, classes_list,
+				 srcu_read_lock_held(&mpam_srcu)) {
+		struct mpam_props *cprops = &class->props;
+
+		if (class->level < 3) {
+			pr_debug("class %u is before L3\n", class->level);
+			continue;
+		}
+
+		if (!class_has_usable_mba(cprops)) {
+			pr_debug("class %u has no bandwidth control\n",
+				 class->level);
+			continue;
+		}
+
+		if (!cpumask_equal(&class->affinity, cpu_possible_mask)) {
+			pr_debug("class %u has missing CPUs\n", class->level);
+			continue;
+		}
+
+		if (!topology_matches_l3(class)) {
+			pr_debug("class %u topology doesn't match L3\n",
+				 class->level);
+			continue;
+		}
+
+		/*
+		 * mba_sc reads the mbm_local counter, and waggles the MBA
+		 * controls. mbm_local is implicitly part of the L3, pick a
+		 * resource to be MBA that as close as possible to the L3.
+		 */
+		if (!candidate_class || class->level < candidate_class->level)
+			candidate_class = class;
+	}
+
+	if (candidate_class) {
+		pr_debug("selected class %u to back MBA\n",
+			 candidate_class->level);
+		res = &mpam_resctrl_controls[RDT_RESOURCE_MBA];
+		res->class = candidate_class;
+		exposed_alloc_capable = true;
+	}
+}
+
 static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
 				     enum resctrl_res_level type)
 {
 	struct mpam_class *class = res->class;
+	struct mpam_props *cprops = &class->props;
 	struct rdt_resource *r = &res->resctrl_res;
 
 	switch (r->rid) {
@@ -361,6 +531,20 @@ static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
 		 * 'all the bits' is the correct answer here.
 		 */
 		r->cache.shareable_bits = resctrl_get_default_ctrl(r);
+		break;
+	case RDT_RESOURCE_MBA:
+		r->alloc_capable = true;
+		r->schema_fmt = RESCTRL_SCHEMA_RANGE;
+		r->ctrl_scope = RESCTRL_L3_CACHE;
+
+		r->membw.delay_linear = true;
+		r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
+		r->membw.min_bw = get_mba_min(cprops);
+		r->membw.max_bw = MAX_MBA_BW;
+		r->membw.bw_gran = get_mba_granularity(cprops);
+
+		r->name = "MB";
+
 		break;
 	default:
 		break;
@@ -376,7 +560,17 @@ static int mpam_resctrl_pick_domain_id(int cpu, struct mpam_component *comp)
 	if (class->type == MPAM_CLASS_CACHE)
 		return comp->comp_id;
 
-	/* TODO: repaint domain ids to match the L3 domain ids */
+	if (topology_matches_l3(class)) {
+		/* Use the corresponding L3 component ID as the domain ID */
+		int id = get_cpu_cacheinfo_id(cpu, 3);
+
+		/* Implies topology_matches_l3() made a mistake */
+		if (WARN_ON_ONCE(id == -1))
+			return comp->comp_id;
+
+		return id;
+	}
+
 	/*
 	 * Otherwise, expose the ID used by the firmware table code.
 	 */
@@ -418,6 +612,12 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
 	case RDT_RESOURCE_L3:
 		configured_by = mpam_feat_cpor_part;
 		break;
+	case RDT_RESOURCE_MBA:
+		if (mpam_has_feature(mpam_feat_mbw_max, cprops)) {
+			configured_by = mpam_feat_mbw_max;
+			break;
+		}
+		fallthrough;
 	default:
 		return resctrl_get_default_ctrl(r);
 	}
@@ -429,6 +629,8 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
 	switch (configured_by) {
 	case mpam_feat_cpor_part:
 		return cfg->cpbm;
+	case mpam_feat_mbw_max:
+		return mbw_max_to_percent(cfg->mbw_max, cprops);
 	default:
 		return resctrl_get_default_ctrl(r);
 	}
@@ -473,6 +675,13 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
 		cfg.cpbm = cfg_val;
 		mpam_set_feature(mpam_feat_cpor_part, &cfg);
 		break;
+	case RDT_RESOURCE_MBA:
+		if (mpam_has_feature(mpam_feat_mbw_max, cprops)) {
+			cfg.mbw_max = percent_to_mbw_max(cfg_val, cprops);
+			mpam_set_feature(mpam_feat_mbw_max, &cfg);
+			break;
+		}
+		fallthrough;
 	default:
 		return -EINVAL;
 	}
@@ -754,6 +963,7 @@ int mpam_resctrl_setup(void)
 
 	/* Find some classes to use for controls */
 	mpam_resctrl_pick_caches();
+	mpam_resctrl_pick_mba();
 
 	/* Initialise the resctrl structures from the classes */
 	for (enum resctrl_res_level i = 0; i < RDT_NUM_RESOURCES; i++) {
-- 
2.43.0
Re: [PATCH v2 0/45] arm_mpam: Add KVM/arm64 and resctrl glue code
Posted by Zeng Heng 1 month, 2 weeks ago
> One major departure from the previous snapshot branches referenced in the
> base driver series is that the same MPAM setting are used for kernel-space
> and user-space. That is, MPAM1_EL1 is set to the same value as MPAM0_EL1
> rather than keeping the default value. The advantages of this are that it
> is closer to the x86 model where the closid is globally applicable, all
> partids are usable from user-space and user-space can't bypass MPAM
> controls by doing the work in the kernel. However, this causes some
> priority inversion where a high priority task waits to take a mutex held by
> another whose resources are restricted by MPAM. It also adds some extra
> isb(). I would be interested in opinions/data on the policy for MPAM in
> kernel space, i.e how MPAM1_EL1 is set.

Another advantage is that, given the small size of the L2 cache,
frequent switching of MPAM configurations between kernel and user modes
can cause cache-capacity jitter, making it difficult to isolate
interference from noisy neighborhood.

However, in addition to the issues mentioned above, updating the
MPAM1_EL1 configuration also exposes interrupt handling to the MPAM
settings of the current task.

I still agree with the current modification of setting MPAM1_EL1 to the
same value as MPAM0_EL1. However, the ARM MPAM hardware supports more
flexible configuration schemes than x86 RDT and another approach is also
worth considering: Software can let a control group choose whether
kernel mode follows the user mode MPAM settings, or whether the kernel
mode configuration is delegated to the default control group, though
this may change the existing user interface.


Best Regards,
Zeng Heng
Re: [PATCH v2 25/45] arm_mpam: resctrl: Add support for 'MB' resource
Posted by Jonathan Cameron 1 month ago
On Fri, 19 Dec 2025 18:11:27 +0000
Ben Horgan <ben.horgan@arm.com> wrote:

> From: James Morse <james.morse@arm.com>
> 
> resctrl supports 'MB', as a percentage throttling of traffic somewhere
> after the L3. This is the control that mba_sc uses, so ideally the class
> chosen should be as close as possible to the counters used for mba_local.
> 
> MB's percentage control should be backed either with the fixed point

either's "or" is missing.  (sentence should include the second choice)

> fraction MBW_MAX. The bandwidth portion bitmaps is not used as its tricky
> to pick which bits to use to avoid contention, and may be possible to
> expose this as something other than a percentage in the future.
> 
> CC: Zeng Heng <zengheng4@huawei.com>
> Co-developed-by: Dave Martin <Dave.Martin@arm.com>
> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
> Signed-off-by: James Morse <james.morse@arm.com>>
> Signed-off-by: Ben Horgan <ben.horgan@arm.com>

As mentioned in earlier review I'd like an overview doc of the heuristics
used to map to the resctrl everything is a xeon view of the world.

At least some of our platforms are far enough from this view that the
heuristics fail (others can provide more info on that than I can).
That's fine as I'd rather not squash something inappropriate into a
viewpoint that doesn't fit. Better to leave those for later solutions!


Otherwise, just minor comments inline.

Jonathan

> ---
>  drivers/resctrl/mpam_resctrl.c | 212 ++++++++++++++++++++++++++++++++-
>  1 file changed, 211 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/resctrl/mpam_resctrl.c b/drivers/resctrl/mpam_resctrl.c
> index a20656e49edc..e376841c5596 100644
> --- a/drivers/resctrl/mpam_resctrl.c
> +++ b/drivers/resctrl/mpam_resctrl.c

...

> +static u32 get_mba_min(struct mpam_props *cprops)
> +{
> +	u32 val = 0;
> +
> +	if (mba_class_use_mbw_max(cprops))
> +		val = mbw_max_to_percent(val, cprops);
> +	else
> +		WARN_ON_ONCE(1);
> +
> +	return val;

I'd be tempted to handle the error case first to make
clear this is really just a sanity checking wrapper around the max_to_percent()
function.

	if (!mba_class_use_mbw_max(cprops)) {
		WARN_ON_ONCE(1);
		return 0;
	}

	return mbw_max_to_percent(val, cprops);

> +}

> +/*
> + * topology_matches_l3() - Is the provided class the same shape as L3
> + * @victim:		The class we'd like to pretend is L3.
> + *
> + * resctrl expects all the world's a Xeon, and all counters are on the
> + * L3. We play fast and loose with this, mapping counters on other
> + * classes - provided the CPU->domain mapping is the same kind of shape.
> + *
> + * Using cacheinfo directly would make this work even if resctrl can't
> + * use the L3 - but cacheinfo can't tell us anything about offline CPUs.
> + * Using the L3 resctrl domain list also depends on CPUs being online.
> + * Using the mpam_class we picked for L3 so we can use its domain list
> + * assumes that there are MPAM controls on the L3.
> + * Instead, this path eventually uses the mpam_get_cpumask_from_cache_id()
> + * helper which can tell us about offline CPUs ... but getting the cache_id
> + * to start with relies on at least one CPU per L3 cache being online at
> + * boot.

So the usual mess of maxcpus=1 breaks it for anything other than first L3?
I'm not entirely against that but it may be a little unexpected.

> + *
> + * Walk the victim component list and compare the affinity mask with the
> + * corresponding L3. The topology matches if each victim:component's affinity
> + * mask is the same as the CPU's corresponding L3's. These lists/masks are
> + * computed from firmware tables so don't change at runtime.
> + */
> +static bool topology_matches_l3(struct mpam_class *victim)



> +
>  static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
>  				     enum resctrl_res_level type)
>  {
>  	struct mpam_class *class = res->class;
> +	struct mpam_props *cprops = &class->props;
>  	struct rdt_resource *r = &res->resctrl_res;
>  
>  	switch (r->rid) {
> @@ -361,6 +531,20 @@ static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
>  		 * 'all the bits' is the correct answer here.
>  		 */
>  		r->cache.shareable_bits = resctrl_get_default_ctrl(r);
> +		break;
> +	case RDT_RESOURCE_MBA:
> +		r->alloc_capable = true;
> +		r->schema_fmt = RESCTRL_SCHEMA_RANGE;
> +		r->ctrl_scope = RESCTRL_L3_CACHE;
> +
> +		r->membw.delay_linear = true;
> +		r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
> +		r->membw.min_bw = get_mba_min(cprops);
> +		r->membw.max_bw = MAX_MBA_BW;
> +		r->membw.bw_gran = get_mba_granularity(cprops);
> +
> +		r->name = "MB";
> +

Probably should be consistent with either a blank line before break or not.
That might also make the diff a little nicer.

>  		break;
>  	default:
>  		break;
Re: [PATCH v2 25/45] arm_mpam: resctrl: Add support for 'MB' resource
Posted by Ben Horgan 1 month ago
Hi Jonathan,

On 1/6/26 12:19, Jonathan Cameron wrote:
> On Fri, 19 Dec 2025 18:11:27 +0000
> Ben Horgan <ben.horgan@arm.com> wrote:
> 
>> From: James Morse <james.morse@arm.com>
>>
>> resctrl supports 'MB', as a percentage throttling of traffic somewhere
>> after the L3. This is the control that mba_sc uses, so ideally the class
>> chosen should be as close as possible to the counters used for mba_local.
>>
>> MB's percentage control should be backed either with the fixed point
> 
> either's "or" is missing.  (sentence should include the second choice)
> 
>> fraction MBW_MAX. The bandwidth portion bitmaps is not used as its tricky
>> to pick which bits to use to avoid contention, and may be possible to
>> expose this as something other than a percentage in the future.
>>
>> CC: Zeng Heng <zengheng4@huawei.com>
>> Co-developed-by: Dave Martin <Dave.Martin@arm.com>
>> Signed-off-by: Dave Martin <Dave.Martin@arm.com>
>> Signed-off-by: James Morse <james.morse@arm.com>>
>> Signed-off-by: Ben Horgan <ben.horgan@arm.com>
> 
> As mentioned in earlier review I'd like an overview doc of the heuristics
> used to map to the resctrl everything is a xeon view of the world.
> 
> At least some of our platforms are far enough from this view that the
> heuristics fail (others can provide more info on that than I can).
> That's fine as I'd rather not squash something inappropriate into a
> viewpoint that doesn't fit. Better to leave those for later solutions!

Yes, this is still in the pipeline.

> 
> 
> Otherwise, just minor comments inline.
> 
> Jonathan
> 
>> ---
>>  drivers/resctrl/mpam_resctrl.c | 212 ++++++++++++++++++++++++++++++++-
>>  1 file changed, 211 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/resctrl/mpam_resctrl.c b/drivers/resctrl/mpam_resctrl.c
>> index a20656e49edc..e376841c5596 100644
>> --- a/drivers/resctrl/mpam_resctrl.c
>> +++ b/drivers/resctrl/mpam_resctrl.c
> 
> ...
> 
>> +static u32 get_mba_min(struct mpam_props *cprops)
>> +{
>> +	u32 val = 0;
>> +
>> +	if (mba_class_use_mbw_max(cprops))
>> +		val = mbw_max_to_percent(val, cprops);
>> +	else
>> +		WARN_ON_ONCE(1);
>> +
>> +	return val;
> 
> I'd be tempted to handle the error case first to make
> clear this is really just a sanity checking wrapper around the max_to_percent()
> function.
> 
> 	if (!mba_class_use_mbw_max(cprops)) {
> 		WARN_ON_ONCE(1);
> 		return 0;
> 	}
> 
> 	return mbw_max_to_percent(val, cprops);

Sure, I've updated this.

> 
>> +}
> 
>> +/*
>> + * topology_matches_l3() - Is the provided class the same shape as L3
>> + * @victim:		The class we'd like to pretend is L3.
>> + *
>> + * resctrl expects all the world's a Xeon, and all counters are on the
>> + * L3. We play fast and loose with this, mapping counters on other
>> + * classes - provided the CPU->domain mapping is the same kind of shape.
>> + *
>> + * Using cacheinfo directly would make this work even if resctrl can't
>> + * use the L3 - but cacheinfo can't tell us anything about offline CPUs.
>> + * Using the L3 resctrl domain list also depends on CPUs being online.
>> + * Using the mpam_class we picked for L3 so we can use its domain list
>> + * assumes that there are MPAM controls on the L3.
>> + * Instead, this path eventually uses the mpam_get_cpumask_from_cache_id()
>> + * helper which can tell us about offline CPUs ... but getting the cache_id
>> + * to start with relies on at least one CPU per L3 cache being online at
>> + * boot.
> 
> So the usual mess of maxcpus=1 breaks it for anything other than first L3?
> I'm not entirely against that but it may be a little unexpected.

Yes but this doesn't really add any knew restriction as the mpam base
driver only enables mpam when all (known) msc have been probed and for
an msc on a cache we only ever access it from the cpus local to that cache.

> 
>> + *
>> + * Walk the victim component list and compare the affinity mask with the
>> + * corresponding L3. The topology matches if each victim:component's affinity
>> + * mask is the same as the CPU's corresponding L3's. These lists/masks are
>> + * computed from firmware tables so don't change at runtime.
>> + */
>> +static bool topology_matches_l3(struct mpam_class *victim)
> 
> 
> 
>> +
>>  static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
>>  				     enum resctrl_res_level type)
>>  {
>>  	struct mpam_class *class = res->class;
>> +	struct mpam_props *cprops = &class->props;
>>  	struct rdt_resource *r = &res->resctrl_res;
>>  
>>  	switch (r->rid) {
>> @@ -361,6 +531,20 @@ static int mpam_resctrl_control_init(struct mpam_resctrl_res *res,
>>  		 * 'all the bits' is the correct answer here.
>>  		 */
>>  		r->cache.shareable_bits = resctrl_get_default_ctrl(r);
>> +		break;
>> +	case RDT_RESOURCE_MBA:
>> +		r->alloc_capable = true;
>> +		r->schema_fmt = RESCTRL_SCHEMA_RANGE;
>> +		r->ctrl_scope = RESCTRL_L3_CACHE;
>> +
>> +		r->membw.delay_linear = true;
>> +		r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
>> +		r->membw.min_bw = get_mba_min(cprops);
>> +		r->membw.max_bw = MAX_MBA_BW;
>> +		r->membw.bw_gran = get_mba_granularity(cprops);
>> +
>> +		r->name = "MB";
>> +
> 
> Probably should be consistent with either a blank line before break or not.
> That might also make the diff a little nicer.

Dropped the blank line.

> 
>>  		break;
>>  	default:
>>  		break;
> 
> 

Thanks,

Ben