[PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets

Sean Christopherson posted 28 patches 8 months, 2 weeks ago
There is a newer version of this series
[PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets
Posted by Sean Christopherson 8 months, 2 weeks ago
Add macro-built helpers for testing, setting, and clearing MSRPM entries
without relying on precomputed offsets.  This sets the stage for eventually
removing general KVM use of precomputed offsets, which are quite confusing
and rather inefficient for the vast majority of KVM's usage.

Outside of merging L0 and L1 bitmaps for nested SVM, using u32-indexed
offsets and accesses is at best unnecessary, and at worst introduces extra
operations to retrieve the individual bit from within the offset u32 value.
And simply calling them "offsets" is very confusing, as the "unit" of the
offset isn't immediately obvious.

Use the new helpers in set_msr_interception_bitmap() and
msr_write_intercepted() to verify the math and operations, but keep the
existing offset-based logic set_msr_interception_bitmap() to sanity check
the "clear" and "set" operations.  Manipulating MSR interceptions isn't a
hot path and no kernel release is ever expected to contain this specific
version of set_msr_interception_bitmap() (it will be removed entirely in
the near future).

Add compile-time asserts to verify the bit number calculations, and also
to provide a simple demonstration of the layout (SVM and VMX use the same
concept of a bitmap, but with different layouts).

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/svm.c | 30 ++++++++++++++--------------
 arch/x86/kvm/svm/svm.h | 44 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d97711bdbfc9..76d074440bcc 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -806,11 +806,6 @@ static bool valid_msr_intercept(u32 index)
 
 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
 {
-	u8 bit_write;
-	unsigned long tmp;
-	u32 offset;
-	u32 *msrpm;
-
 	/*
 	 * For non-nested case:
 	 * If the L01 MSR bitmap does not intercept the MSR, then we need to
@@ -820,17 +815,10 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
 	 * save it.
 	 */
-	msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
-				      to_svm(vcpu)->msrpm;
+	void *msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
+					    to_svm(vcpu)->msrpm;
 
-	offset    = svm_msrpm_offset(msr);
-	bit_write = 2 * (msr & 0x0f) + 1;
-	tmp       = msrpm[offset];
-
-	if (KVM_BUG_ON(offset == MSR_INVALID, vcpu->kvm))
-		return false;
-
-	return test_bit(bit_write, &tmp);
+	return svm_test_msr_bitmap_write(msrpm, msr);
 }
 
 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
@@ -865,7 +853,17 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
 	read  ? __clear_bit(bit_read,  &tmp) : __set_bit(bit_read,  &tmp);
 	write ? __clear_bit(bit_write, &tmp) : __set_bit(bit_write, &tmp);
 
-	msrpm[offset] = tmp;
+	if (read)
+		svm_clear_msr_bitmap_read((void *)msrpm, msr);
+	else
+		svm_set_msr_bitmap_read((void *)msrpm, msr);
+
+	if (write)
+		svm_clear_msr_bitmap_write((void *)msrpm, msr);
+	else
+		svm_set_msr_bitmap_write((void *)msrpm, msr);
+
+	WARN_ON_ONCE(msrpm[offset] != (u32)tmp);
 
 	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
 	svm->nested.force_msr_bitmap_recalc = true;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 47a36a9a7fe5..e432cd7a7889 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -628,6 +628,50 @@ static_assert(SVM_MSRS_PER_RANGE == 8192);
 #define SVM_MSRPM_RANGE_1_BASE_MSR	0xc0000000
 #define SVM_MSRPM_RANGE_2_BASE_MSR	0xc0010000
 
+#define SVM_MSRPM_FIRST_MSR(range_nr)	\
+	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR)
+#define SVM_MSRPM_LAST_MSR(range_nr)	\
+	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR + SVM_MSRS_PER_RANGE - 1)
+
+#define SVM_MSRPM_BIT_NR(range_nr, msr)						\
+	(range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +			\
+	 (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR)
+
+#define SVM_MSRPM_SANITY_CHECK_BITS(range_nr)					\
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 1) ==	\
+	      range_nr * 2048 * 8 + 2);						\
+static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 7) ==	\
+	      range_nr * 2048 * 8 + 14);
+
+SVM_MSRPM_SANITY_CHECK_BITS(0);
+SVM_MSRPM_SANITY_CHECK_BITS(1);
+SVM_MSRPM_SANITY_CHECK_BITS(2);
+
+#define SVM_BUILD_MSR_BITMAP_CASE(bitmap, range_nr, msr, bitop, bit_rw)		\
+	case SVM_MSRPM_FIRST_MSR(range_nr) ... SVM_MSRPM_LAST_MSR(range_nr):	\
+		return bitop##_bit(SVM_MSRPM_BIT_NR(range_nr, msr) + bit_rw, bitmap);
+
+#define __BUILD_SVM_MSR_BITMAP_HELPER(rtype, action, bitop, access, bit_rw)	\
+static inline rtype svm_##action##_msr_bitmap_##access(unsigned long *bitmap,	\
+						       u32 msr)			\
+{										\
+	switch (msr) {								\
+	SVM_BUILD_MSR_BITMAP_CASE(bitmap, 0, msr, bitop, bit_rw)		\
+	SVM_BUILD_MSR_BITMAP_CASE(bitmap, 1, msr, bitop, bit_rw)		\
+	SVM_BUILD_MSR_BITMAP_CASE(bitmap, 2, msr, bitop, bit_rw)		\
+	default:								\
+		return (rtype)true;						\
+	}									\
+										\
+}
+#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
+	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
+	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
+
+BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
+
 #define MSR_INVALID				0xffffffffU
 
 #define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
-- 
2.49.0.1204.g71687c7c1d-goog
Re: [PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets
Posted by Paolo Bonzini 8 months, 1 week ago
On 5/30/25 01:39, Sean Christopherson wrote:
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 47a36a9a7fe5..e432cd7a7889 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -628,6 +628,50 @@ static_assert(SVM_MSRS_PER_RANGE == 8192);
>   #define SVM_MSRPM_RANGE_1_BASE_MSR	0xc0000000
>   #define SVM_MSRPM_RANGE_2_BASE_MSR	0xc0010000
>   
> +#define SVM_MSRPM_FIRST_MSR(range_nr)	\
> +	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR)
> +#define SVM_MSRPM_LAST_MSR(range_nr)	\
> +	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR + SVM_MSRS_PER_RANGE - 1)
> +
> +#define SVM_MSRPM_BIT_NR(range_nr, msr)						\
> +	(range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +			\
> +	 (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR)
> +
> +#define SVM_MSRPM_SANITY_CHECK_BITS(range_nr)					\
> +static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 1) ==	\
> +	      range_nr * 2048 * 8 + 2);						\
> +static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 7) ==	\
> +	      range_nr * 2048 * 8 + 14);
> +
> +SVM_MSRPM_SANITY_CHECK_BITS(0);
> +SVM_MSRPM_SANITY_CHECK_BITS(1);
> +SVM_MSRPM_SANITY_CHECK_BITS(2);

Replying here for patches 11/25/26.  None of this is needed, just write 
a function like this:

static inline u32 svm_msr_bit(u32 msr)
{
	u32 msr_base = msr & ~(SVM_MSRS_PER_RANGE - 1);
	if (msr_base == SVM_MSRPM_RANGE_0_BASE_MSR)
		return SVM_MSRPM_BIT_NR(0, msr);
	if (msr_base == SVM_MSRPM_RANGE_1_BASE_MSR)
		return SVM_MSRPM_BIT_NR(1, msr);
	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
		return SVM_MSRPM_BIT_NR(2, msr);
	return MSR_INVALID;
}

and you can throw away most of the other macros.  For example:

> +#define SVM_BUILD_MSR_BITMAP_CASE(bitmap, range_nr, msr, bitop, bit_rw)		\
> +	case SVM_MSRPM_FIRST_MSR(range_nr) ... SVM_MSRPM_LAST_MSR(range_nr):	\
> +		return bitop##_bit(SVM_MSRPM_BIT_NR(range_nr, msr) + bit_rw, bitmap);

... becomes a lot more lowercase:

static inline rtype svm_##action##_msr_bitmap_##access(
	unsigned long *bitmap, u32 msr)
{
	u32 bit = svm_msr_bit(msr);
	if (bit == MSR_INVALID)
		return true;
	return bitop##_bit(bit + bit_rw, bitmap);
}


In patch 25, also, you just get

static u32 svm_msrpm_offset(u32 msr)
{
	u32 bit = svm_msr_bit(msr);
	if (bit == MSR_INVALID)
		return MSR_INVALID;
	return bit / BITS_PER_BYTE;
}

And you change everything to -EINVAL in patch 26 to kill MSR_INVALID.

Another nit...

> +#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
> +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
> +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
> +
> +BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
> +BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
> +BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
Yes it's a bit duplication, but no need for the nesting, just do:

BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    write, 1)
BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, write, 1)
BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   read,  0)
BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   write, 1)

Otherwise, really nice.

Paolo
Re: [PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets
Posted by Sean Christopherson 8 months, 1 week ago
On Wed, Jun 04, 2025, Paolo Bonzini wrote:
> On 5/30/25 01:39, Sean Christopherson wrote:
> > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> > index 47a36a9a7fe5..e432cd7a7889 100644
> > --- a/arch/x86/kvm/svm/svm.h
> > +++ b/arch/x86/kvm/svm/svm.h
> > @@ -628,6 +628,50 @@ static_assert(SVM_MSRS_PER_RANGE == 8192);
> >   #define SVM_MSRPM_RANGE_1_BASE_MSR	0xc0000000
> >   #define SVM_MSRPM_RANGE_2_BASE_MSR	0xc0010000
> > +#define SVM_MSRPM_FIRST_MSR(range_nr)	\
> > +	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR)
> > +#define SVM_MSRPM_LAST_MSR(range_nr)	\
> > +	(SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR + SVM_MSRS_PER_RANGE - 1)
> > +
> > +#define SVM_MSRPM_BIT_NR(range_nr, msr)						\
> > +	(range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +			\
> > +	 (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR)
> > +
> > +#define SVM_MSRPM_SANITY_CHECK_BITS(range_nr)					\
> > +static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 1) ==	\
> > +	      range_nr * 2048 * 8 + 2);						\
> > +static_assert(SVM_MSRPM_BIT_NR(range_nr, SVM_MSRPM_FIRST_MSR(range_nr) + 7) ==	\
> > +	      range_nr * 2048 * 8 + 14);
> > +
> > +SVM_MSRPM_SANITY_CHECK_BITS(0);
> > +SVM_MSRPM_SANITY_CHECK_BITS(1);
> > +SVM_MSRPM_SANITY_CHECK_BITS(2);
> 
> Replying here for patches 11/25/26.  None of this is needed, just write a
> function like this:
> 
> static inline u32 svm_msr_bit(u32 msr)
> {
> 	u32 msr_base = msr & ~(SVM_MSRS_PER_RANGE - 1);

Ooh, clever.

> 	if (msr_base == SVM_MSRPM_RANGE_0_BASE_MSR)
> 		return SVM_MSRPM_BIT_NR(0, msr);
> 	if (msr_base == SVM_MSRPM_RANGE_1_BASE_MSR)
> 		return SVM_MSRPM_BIT_NR(1, msr);
> 	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
> 		return SVM_MSRPM_BIT_NR(2, msr);
> 	return MSR_INVALID;

I initially had something like this, but I don't like the potential for typos,
e.g. to fat finger something like:

	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
		return SVM_MSRPM_BIT_NR(1, msr);

Which is how I ended up with the (admittedly ugly) CASE macros.  Would you be ok
keeping that wrinkle?  E.g.

	#define SVM_MSR_BIT_NR_CASE(range_nr, msr)					\
	case SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR:					\
		return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +		\
		       (msr - SVM_MSRPM_RANGE_## range_nr ##_BASE_MSR) * SVM_BITS_PER_MSR);

	static __always_inline int svm_msrpm_bit_nr(u32 msr)
	{
		switch (msr & ~(SVM_MSRS_PER_RANGE - 1)) {
		SVM_BUILD_MSR_BITMAP_CASE(0, msr)
		SVM_BUILD_MSR_BITMAP_CASE(1, msr)
		SVM_BUILD_MSR_BITMAP_CASE(2, msr)
		default:
			return -EINVAL;
		}
	}

Actually, better idea!  Hopefully.  With your masking trick, there's no need to
do subtraction to get the offset within a range, which means getting the bit/byte
number for an MSR can be done entirely programmatically.  And if we do that, then
the SVM_MSRPM_RANGE_xxx_BASE_MSR defines can go away, and the (very trivial)
copy+paste that I dislike also goes away.

Completely untested, but how about this?

	#define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)

	static __always_inline int svm_msrpm_bit_nr(u32 msr)
	{
		int range_nr;

		switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
		case 0:
			range_nr = 0;
			break;
		case 0xc0000000:
			range_nr = 1;
			break;
		case 0xc0010000:
			range_nr = 2;
			break;
		default:
			return -EINVAL;
		}

		return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
		       (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR)
	}

	static inline svm_msrpm_byte_nr(u32 msr)
	{
		return svm_msrpm_bit_nr(msr) / BITS_PER_BYTE;
	}

The open coded literals aren't pretty, but VMX does the same thing, precisely
because I didn't want any code besides the innermost helper dealing with the
msr => offset math.

> }
> 
> and you can throw away most of the other macros.  For example:
> 
> > +#define SVM_BUILD_MSR_BITMAP_CASE(bitmap, range_nr, msr, bitop, bit_rw)		\
> > +	case SVM_MSRPM_FIRST_MSR(range_nr) ... SVM_MSRPM_LAST_MSR(range_nr):	\
> > +		return bitop##_bit(SVM_MSRPM_BIT_NR(range_nr, msr) + bit_rw, bitmap);
> 
> ... becomes a lot more lowercase:
> 
> static inline rtype svm_##action##_msr_bitmap_##access(
> 	unsigned long *bitmap, u32 msr)
> {
> 	u32 bit = svm_msr_bit(msr);
> 	if (bit == MSR_INVALID)
> 		return true;
> 	return bitop##_bit(bit + bit_rw, bitmap);

Yeah, much cleaner.

> }
> 
> 
> In patch 25, also, you just get
> 
> static u32 svm_msrpm_offset(u32 msr)
> {
> 	u32 bit = svm_msr_bit(msr);
> 	if (bit == MSR_INVALID)
> 		return MSR_INVALID;
> 	return bit / BITS_PER_BYTE;
> }
> 
> And you change everything to -EINVAL in patch 26 to kill MSR_INVALID.
> 
> Another nit...
> 
> > +#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
> > +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
> > +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
> > +
> > +BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
> > +BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
> > +BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
> Yes it's a bit duplication, but no need for the nesting, just do:

I don't have a super strong preference, but I do want to be consistent between
VMX and SVM, and VMX has the nesting (unsurprisingly, also written by me).  And
for that, the nested macros add a bit more value due to reads vs writes being in
entirely different areas of the bitmap.

#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)		       \
	__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0x0)     \
	__BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)

BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)

That could be mitigated to some extent by adding a #define to communicate the
offset, but IMO the nested macros are less ugly than that.

> BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    read,  0)
> BUILD_SVM_MSR_BITMAP_HELPERS(bool, test,  test,    write, 1)
> BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, read,  0)
> BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear, write, 1)
> BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   read,  0)
> BUILD_SVM_MSR_BITMAP_HELPERS(void, set,   __set,   write, 1)
> 
> Otherwise, really nice.
> 
> Paolo
>
Re: [PATCH 11/28] KVM: SVM: Add helpers for accessing MSR bitmap that don't rely on offsets
Posted by Paolo Bonzini 8 months, 1 week ago
On 6/4/25 19:35, Sean Christopherson wrote:
> On Wed, Jun 04, 2025, Paolo Bonzini wrote:
>> Replying here for patches 11/25/26.  None of this is needed, just write a
>> function like this:
>>
>> static inline u32 svm_msr_bit(u32 msr)
>> {
>> 	u32 msr_base = msr & ~(SVM_MSRS_PER_RANGE - 1);
> 
> Ooh, clever.
> 
>> 	if (msr_base == SVM_MSRPM_RANGE_0_BASE_MSR)
>> 		return SVM_MSRPM_BIT_NR(0, msr);
>> 	if (msr_base == SVM_MSRPM_RANGE_1_BASE_MSR)
>> 		return SVM_MSRPM_BIT_NR(1, msr);
>> 	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
>> 		return SVM_MSRPM_BIT_NR(2, msr);
>> 	return MSR_INVALID;
> 
> I initially had something like this, but I don't like the potential for typos,
> e.g. to fat finger something like:
> 
> 	if (msr_base == SVM_MSRPM_RANGE_2_BASE_MSR)
> 		return SVM_MSRPM_BIT_NR(1, msr);
> 
> Which is how I ended up with the (admittedly ugly) CASE macros.  [...]
> Actually, better idea!  Hopefully.  With your masking trick, there's no need to
> do subtraction to get the offset within a range, which means getting the bit/byte
> number for an MSR can be done entirely programmatically. And if we do that, then> the SVM_MSRPM_RANGE_xxx_BASE_MSR defines can go away, and the (very 
trivial)
> copy+paste that I dislike also goes away.
> 
> Completely untested, but how about this?
> 
> 	#define SVM_MSRPM_OFFSET_MASK (SVM_MSRS_PER_RANGE - 1)
> 
> 	static __always_inline int svm_msrpm_bit_nr(u32 msr)

(yeah, after hitting send I noticed that msr->msrpm would have been better)

> 	{
> 		int range_nr;
> 
> 		switch (msr & ~SVM_MSRPM_OFFSET_MASK) {
> 		case 0:
> 			range_nr = 0;
> 			break;
> 		case 0xc0000000:
> 			range_nr = 1;
> 			break;
> 		case 0xc0010000:
> 			range_nr = 2;
> 			break;
> 		default:
> 			return -EINVAL;
> 		}

I actually was going to propose something very similar, I refrained only 
because I wasn't sure if there would be other remaining uses of 
SVM_MSRPM_RANGE_?_BASE_MSR.  The above is nice.

> 		return range_nr * SVM_MSRPM_BYTES_PER_RANGE * BITS_PER_BYTE +
> 		       (msr & SVM_MSRPM_OFFSET_MASK) * SVM_BITS_PER_MSR)

Or this too:

   return ((range_nr * SVM_MSRS_PER_RANGE)
           + (msr & SVM_MSRPM_OFFSET_MASK)) * SVM_BITS_PER_MSR;

depending on personal taste.  A few less macros, a few more parentheses.

That removes the enjoyment of seeing everything collapse into a single 
LEA instruction (X*2+CONST), as was the case with SVM_MSRPM_BIT_NR.  But 
I agree that these versions are about as nice as the code can be made.

> The open coded literals aren't pretty, but VMX does the same thing, precisely
> because I didn't want any code besides the innermost helper dealing with the
> msr => offset math.

>>> +#define BUILD_SVM_MSR_BITMAP_HELPERS(ret_type, action, bitop)			\
>>> +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0)	\
>>> +	__BUILD_SVM_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 1)
>>> +
>>> +BUILD_SVM_MSR_BITMAP_HELPERS(bool, test, test)
>>> +BUILD_SVM_MSR_BITMAP_HELPERS(void, clear, __clear)
>>> +BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
>> Yes it's a bit duplication, but no need for the nesting, just do:
> 
> I don't have a super strong preference, but I do want to be consistent between
> VMX and SVM, and VMX has the nesting (unsurprisingly, also written by me).  And
> for that, the nested macros add a bit more value due to reads vs writes being in
> entirely different areas of the bitmap.

Yeah, fair enough.  Since it's copied from VMX it makes sense.

Paolo