[PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling

Chao Gao posted 2 patches 4 months ago
[PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Chao Gao 4 months ago
Extract a common function from MSR interception disabling logic and create
disabling and enabling functions based on it. This removes most of the
duplicated code for MSR interception disabling/enabling.

No functional change intended.

Signed-off-by: Chao Gao <chao.gao@intel.com>
---
 arch/x86/kvm/svm/svm.c | 23 +++++++++--------------
 arch/x86/kvm/svm/svm.h | 10 +---------
 arch/x86/kvm/vmx/vmx.c | 25 +++++++++----------------
 arch/x86/kvm/vmx/vmx.h | 10 +---------
 4 files changed, 20 insertions(+), 48 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5453478d1ca3..cc5f81afd8af 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -685,21 +685,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
 	return svm_test_msr_bitmap_write(msrpm, msr);
 }
 
-void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	void *msrpm = svm->msrpm;
 
 	/* Don't disable interception for MSRs userspace wants to handle. */
 	if (type & MSR_TYPE_R) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
 			svm_clear_msr_bitmap_read(msrpm, msr);
 		else
 			svm_set_msr_bitmap_read(msrpm, msr);
 	}
 
 	if (type & MSR_TYPE_W) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
 			svm_clear_msr_bitmap_write(msrpm, msr);
 		else
 			svm_set_msr_bitmap_write(msrpm, msr);
@@ -709,19 +709,14 @@ void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 	svm->nested.force_msr_bitmap_recalc = true;
 }
 
-void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
-	void *msrpm = svm->msrpm;
-
-	if (type & MSR_TYPE_R)
-		svm_set_msr_bitmap_read(msrpm, msr);
-
-	if (type & MSR_TYPE_W)
-		svm_set_msr_bitmap_write(msrpm, msr);
+	svm_set_intercept_for_msr(vcpu, msr, type, false);
+}
 
-	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
-	svm->nested.force_msr_bitmap_recalc = true;
+void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+	svm_set_intercept_for_msr(vcpu, msr, type, true);
 }
 
 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8d3279563261..faa478d9fc62 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -696,15 +696,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
 
 void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-
-static inline void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
-					     int type, bool enable_intercept)
-{
-	if (enable_intercept)
-		svm_enable_intercept_for_msr(vcpu, msr, type);
-	else
-		svm_disable_intercept_for_msr(vcpu, msr, type);
-}
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
 
 /* nested.c */
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 277c6b5b5d5f..559261b18512 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3952,7 +3952,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
 	vmx->nested.force_msr_bitmap_recalc = true;
 }
 
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -3963,35 +3963,28 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 	vmx_msr_bitmap_l01_changed(vmx);
 
 	if (type & MSR_TYPE_R) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
 			vmx_clear_msr_bitmap_read(msr_bitmap, msr);
 		else
 			vmx_set_msr_bitmap_read(msr_bitmap, msr);
 	}
 
 	if (type & MSR_TYPE_W) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
 			vmx_clear_msr_bitmap_write(msr_bitmap, msr);
 		else
 			vmx_set_msr_bitmap_write(msr_bitmap, msr);
 	}
 }
 
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
-
-	if (!cpu_has_vmx_msr_bitmap())
-		return;
-
-	vmx_msr_bitmap_l01_changed(vmx);
-
-	if (type & MSR_TYPE_R)
-		vmx_set_msr_bitmap_read(msr_bitmap, msr);
+	vmx_set_intercept_for_msr(vcpu, msr, type, false);
+}
 
-	if (type & MSR_TYPE_W)
-		vmx_set_msr_bitmap_write(msr_bitmap, msr);
+void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+	vmx_set_intercept_for_msr(vcpu, msr, type, true);
 }
 
 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index a26fe3d9e1d2..31acd8c726e3 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -388,21 +388,13 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 
 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
 
 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
 
 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
 
-static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
-					     int type, bool value)
-{
-	if (value)
-		vmx_enable_intercept_for_msr(vcpu, msr, type);
-	else
-		vmx_disable_intercept_for_msr(vcpu, msr, type);
-}
-
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
 
 /*
-- 
2.47.1
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Sean Christopherson 3 months, 2 weeks ago
On Thu, Jun 12, 2025, Chao Gao wrote:
> Extract a common function from MSR interception disabling logic and create
> disabling and enabling functions based on it. This removes most of the
> duplicated code for MSR interception disabling/enabling.

Awesome!

> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 5453478d1ca3..cc5f81afd8af 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -685,21 +685,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
>  	return svm_test_msr_bitmap_write(msrpm, msr);
>  }
>  
> -void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)

I don't love "enable", because without the context of the wrapper to clarify the
polarity, it might not be super obvious that "enable" means "enable interception".

I'm leaning towards "set", which is also flawed, mainly because it conflicts with
the "set" in the function name.  But IMO, that's more of a problem with the function
name.

Anyone have a strong opinion one way or the other?

> -void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)

The wrappers can be "static inline" in the header.

If no one objects to s/enable/set (or has an alternative name), I'll apply this:

---
 arch/x86/kvm/svm/svm.c | 21 +++------------------
 arch/x86/kvm/svm/svm.h | 18 ++++++++++--------
 arch/x86/kvm/vmx/vmx.c | 23 +++--------------------
 arch/x86/kvm/vmx/vmx.h | 24 +++++++++++++-----------
 4 files changed, 29 insertions(+), 57 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 3c5e82ed6bd4..803574920e41 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -679,21 +679,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
 	return svm_test_msr_bitmap_write(msrpm, msr);
 }
 
-void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	void *msrpm = svm->msrpm;
 
 	/* Don't disable interception for MSRs userspace wants to handle. */
 	if (type & MSR_TYPE_R) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
 			svm_clear_msr_bitmap_read(msrpm, msr);
 		else
 			svm_set_msr_bitmap_read(msrpm, msr);
 	}
 
 	if (type & MSR_TYPE_W) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
 			svm_clear_msr_bitmap_write(msrpm, msr);
 		else
 			svm_set_msr_bitmap_write(msrpm, msr);
@@ -703,21 +703,6 @@ void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 	svm->nested.force_msr_bitmap_recalc = true;
 }
 
-void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-	void *msrpm = svm->msrpm;
-
-	if (type & MSR_TYPE_R)
-		svm_set_msr_bitmap_read(msrpm, msr);
-
-	if (type & MSR_TYPE_W)
-		svm_set_msr_bitmap_write(msrpm, msr);
-
-	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
-	svm->nested.force_msr_bitmap_recalc = true;
-}
-
 void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
 {
 	unsigned int order = get_order(size);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8d3279563261..dabd69d6fd15 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -694,16 +694,18 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
 				     int trig_mode, int vec);
 
-void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
 
-static inline void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
-					     int type, bool enable_intercept)
+static inline void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+						 u32 msr, int type)
 {
-	if (enable_intercept)
-		svm_enable_intercept_for_msr(vcpu, msr, type);
-	else
-		svm_disable_intercept_for_msr(vcpu, msr, type);
+	svm_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+						u32 msr, int type)
+{
+	svm_set_intercept_for_msr(vcpu, msr, type, true);
 }
 
 /* nested.c */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e2de4748d662..f81710d7d992 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3963,7 +3963,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
 	vmx->nested.force_msr_bitmap_recalc = true;
 }
 
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -3974,37 +3974,20 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 	vmx_msr_bitmap_l01_changed(vmx);
 
 	if (type & MSR_TYPE_R) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
+		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
 			vmx_clear_msr_bitmap_read(msr_bitmap, msr);
 		else
 			vmx_set_msr_bitmap_read(msr_bitmap, msr);
 	}
 
 	if (type & MSR_TYPE_W) {
-		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
+		if (!set && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
 			vmx_clear_msr_bitmap_write(msr_bitmap, msr);
 		else
 			vmx_set_msr_bitmap_write(msr_bitmap, msr);
 	}
 }
 
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
-{
-	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
-
-	if (!cpu_has_vmx_msr_bitmap())
-		return;
-
-	vmx_msr_bitmap_l01_changed(vmx);
-
-	if (type & MSR_TYPE_R)
-		vmx_set_msr_bitmap_read(msr_bitmap, msr);
-
-	if (type & MSR_TYPE_W)
-		vmx_set_msr_bitmap_write(msr_bitmap, msr);
-}
-
 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
 {
 	/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 5a87be8854f3..87174d961c85 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -386,23 +386,25 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 
-void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
-void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool set);
+
+static inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
+						 u32 msr, int type)
+{
+	vmx_set_intercept_for_msr(vcpu, msr, type, false);
+}
+
+static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
+						u32 msr, int type)
+{
+	vmx_set_intercept_for_msr(vcpu, msr, type, true);
+}
 
 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
 
 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
 
-static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
-					     int type, bool value)
-{
-	if (value)
-		vmx_enable_intercept_for_msr(vcpu, msr, type);
-	else
-		vmx_disable_intercept_for_msr(vcpu, msr, type);
-}
-
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
 
 u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated);

base-commit: 58c81bc1e71de7d02848a1c1579256f5ebd38e07
--
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Chao Gao 3 months, 2 weeks ago
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index 5453478d1ca3..cc5f81afd8af 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -685,21 +685,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
>>  	return svm_test_msr_bitmap_write(msrpm, msr);
>>  }
>>  
>> -void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>> +void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)
>
>I don't love "enable", because without the context of the wrapper to clarify the
>polarity, it might not be super obvious that "enable" means "enable interception".
>
>I'm leaning towards "set", which is also flawed, mainly because it conflicts with
>the "set" in the function name.  But IMO, that's more of a problem with the function
>name.
>
>Anyone have a strong opinion one way or the other?
>
>> -void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>> +void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>
>The wrappers can be "static inline" in the header.
>
>If no one objects to s/enable/set (or has an alternative name), I'll apply this:

Looks good to me.
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Mi, Dapeng 3 months, 4 weeks ago
On 6/12/2025 4:19 PM, Chao Gao wrote:
> Extract a common function from MSR interception disabling logic and create
> disabling and enabling functions based on it. This removes most of the
> duplicated code for MSR interception disabling/enabling.
>
> No functional change intended.
>
> Signed-off-by: Chao Gao <chao.gao@intel.com>
> ---
>  arch/x86/kvm/svm/svm.c | 23 +++++++++--------------
>  arch/x86/kvm/svm/svm.h | 10 +---------
>  arch/x86/kvm/vmx/vmx.c | 25 +++++++++----------------
>  arch/x86/kvm/vmx/vmx.h | 10 +---------
>  4 files changed, 20 insertions(+), 48 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 5453478d1ca3..cc5f81afd8af 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -685,21 +685,21 @@ static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
>  	return svm_test_msr_bitmap_write(msrpm, msr);
>  }
>  
> -void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	void *msrpm = svm->msrpm;
>  
>  	/* Don't disable interception for MSRs userspace wants to handle. */
>  	if (type & MSR_TYPE_R) {
> -		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
> +		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
>  			svm_clear_msr_bitmap_read(msrpm, msr);
>  		else
>  			svm_set_msr_bitmap_read(msrpm, msr);
>  	}
>  
>  	if (type & MSR_TYPE_W) {
> -		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
> +		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
>  			svm_clear_msr_bitmap_write(msrpm, msr);
>  		else
>  			svm_set_msr_bitmap_write(msrpm, msr);
> @@ -709,19 +709,14 @@ void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>  	svm->nested.force_msr_bitmap_recalc = true;
>  }
>  
> -void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>  {
> -	struct vcpu_svm *svm = to_svm(vcpu);
> -	void *msrpm = svm->msrpm;
> -
> -	if (type & MSR_TYPE_R)
> -		svm_set_msr_bitmap_read(msrpm, msr);
> -
> -	if (type & MSR_TYPE_W)
> -		svm_set_msr_bitmap_write(msrpm, msr);
> +	svm_set_intercept_for_msr(vcpu, msr, type, false);
> +}
>  
> -	svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
> -	svm->nested.force_msr_bitmap_recalc = true;
> +void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +{
> +	svm_set_intercept_for_msr(vcpu, msr, type, true);
>  }
>  
>  void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 8d3279563261..faa478d9fc62 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -696,15 +696,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
>  
>  void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>  void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> -
> -static inline void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> -					     int type, bool enable_intercept)
> -{
> -	if (enable_intercept)
> -		svm_enable_intercept_for_msr(vcpu, msr, type);
> -	else
> -		svm_disable_intercept_for_msr(vcpu, msr, type);
> -}
> +void svm_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
>  
>  /* nested.c */
>  
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 277c6b5b5d5f..559261b18512 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -3952,7 +3952,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
>  	vmx->nested.force_msr_bitmap_recalc = true;
>  }
>  
> -void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
> @@ -3963,35 +3963,28 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>  	vmx_msr_bitmap_l01_changed(vmx);
>  
>  	if (type & MSR_TYPE_R) {
> -		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
> +		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
>  			vmx_clear_msr_bitmap_read(msr_bitmap, msr);
>  		else
>  			vmx_set_msr_bitmap_read(msr_bitmap, msr);
>  	}
>  
>  	if (type & MSR_TYPE_W) {
> -		if (kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
> +		if (!enable && kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
>  			vmx_clear_msr_bitmap_write(msr_bitmap, msr);
>  		else
>  			vmx_set_msr_bitmap_write(msr_bitmap, msr);
>  	}
>  }
>  
> -void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
>  {
> -	struct vcpu_vmx *vmx = to_vmx(vcpu);
> -	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
> -
> -	if (!cpu_has_vmx_msr_bitmap())
> -		return;
> -
> -	vmx_msr_bitmap_l01_changed(vmx);
> -
> -	if (type & MSR_TYPE_R)
> -		vmx_set_msr_bitmap_read(msr_bitmap, msr);
> +	vmx_set_intercept_for_msr(vcpu, msr, type, false);
> +}
>  
> -	if (type & MSR_TYPE_W)
> -		vmx_set_msr_bitmap_write(msr_bitmap, msr);
> +void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
> +{
> +	vmx_set_intercept_for_msr(vcpu, msr, type, true);
>  }
>  
>  static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index a26fe3d9e1d2..31acd8c726e3 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -388,21 +388,13 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
>  
>  void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>  void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
>  
>  u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
>  u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>  
>  gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
>  
> -static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> -					     int type, bool value)
> -{
> -	if (value)
> -		vmx_enable_intercept_for_msr(vcpu, msr, type);
> -	else
> -		vmx_disable_intercept_for_msr(vcpu, msr, type);
> -}
> -
>  void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
>  
>  /*

The change looks good to me. 

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>

Just curious, is there a preference on using these 3 interfaces? When
should we use the disable/enable interfaces? When should be we use the set
interface?  or no preference?


Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Chao Gao 3 months, 3 weeks ago
>> --- a/arch/x86/kvm/vmx/vmx.h
>> +++ b/arch/x86/kvm/vmx/vmx.h
>> @@ -388,21 +388,13 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
>>  
>>  void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>>  void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
>> +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
>>  
>>  u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
>>  u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
>>  
>>  gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
>>  
>> -static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
>> -					     int type, bool value)
>> -{
>> -	if (value)
>> -		vmx_enable_intercept_for_msr(vcpu, msr, type);
>> -	else
>> -		vmx_disable_intercept_for_msr(vcpu, msr, type);
>> -}
>> -
>>  void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
>>  
>>  /*
>
>The change looks good to me. 
>
>Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>

Thanks.

>
>Just curious, is there a preference on using these 3 interfaces? When
>should we use the disable/enable interfaces? When should be we use the set
>interface?  or no preference?

I think the set API is to reduce boilerplate code. So, use the set API when
you need to perform conditional logic, such as

	if (/*check guest/host caps*/)
		//disable intercept
	else
		//enable intercept

otherwise, use the disable/enable APIs.
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Sean Christopherson 3 months, 2 weeks ago
On Mon, Jun 16, 2025, Chao Gao wrote:
> >> --- a/arch/x86/kvm/vmx/vmx.h
> >> +++ b/arch/x86/kvm/vmx/vmx.h
> >> @@ -388,21 +388,13 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
> >>  
> >>  void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> >>  void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
> >> +void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool enable);
> >>  
> >>  u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
> >>  u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
> >>  
> >>  gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
> >>  
> >> -static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
> >> -					     int type, bool value)
> >> -{
> >> -	if (value)
> >> -		vmx_enable_intercept_for_msr(vcpu, msr, type);
> >> -	else
> >> -		vmx_disable_intercept_for_msr(vcpu, msr, type);
> >> -}
> >> -
> >>  void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
> >>  
> >>  /*
> >
> >The change looks good to me. 
> >
> >Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
> 
> Thanks.
> 
> >
> >Just curious, is there a preference on using these 3 interfaces? When
> >should we use the disable/enable interfaces? When should be we use the set
> >interface?  or no preference?
> 
> I think the set API is to reduce boilerplate code. So, use the set API when
> you need to perform conditional logic, such as
> 
> 	if (/*check guest/host caps*/)
> 		//disable intercept
> 	else
> 		//enable intercept
> 
> otherwise, use the disable/enable APIs.

Yep.  The preference is to use the API that is most appropriate for the code.
E.g. if the code unconditionally enables/disables interceptions, then it should
use the appropriate wrapper.
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Nikolay Borisov 4 months ago

On 6/12/25 11:19, Chao Gao wrote:
> Extract a common function from MSR interception disabling logic and create
> disabling and enabling functions based on it. This removes most of the
> duplicated code for MSR interception disabling/enabling.
> 
> No functional change intended.
> 
> Signed-off-by: Chao Gao <chao.gao@intel.com>

I believe similar change is already part of Sean's series on MSR 
intercept overhaul.
Re: [PATCH 1/2] KVM: x86: Deduplicate MSR interception enabling and disabling
Posted by Chao Gao 4 months ago
On Thu, Jun 12, 2025 at 02:25:04PM +0300, Nikolay Borisov wrote:
>
>
>On 6/12/25 11:19, Chao Gao wrote:
>> Extract a common function from MSR interception disabling logic and create
>> disabling and enabling functions based on it. This removes most of the
>> duplicated code for MSR interception disabling/enabling.
>> 
>> No functional change intended.
>> 
>> Signed-off-by: Chao Gao <chao.gao@intel.com>
>
>I believe similar change is already part of Sean's series on MSR intercept
>overhaul.

No, this series is based on Sean's work and can only be applied after applying
his v2.