From: Yang Weijiang <weijiang.yang@intel.com>
Rename kvm_{g,s}et_msr()* to kvm_emulate_msr_{read,write}()* to make it
more obvious that KVM uses these helpers to emulate guest behaviors,
i.e., host_initiated == false in these helpers.
Suggested-by: Sean Christopherson <seanjc@google.com>
Suggested-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Chao Gao <chao.gao@intel.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
---
arch/x86/include/asm/kvm_host.h | 8 ++++----
arch/x86/kvm/smm.c | 4 ++--
arch/x86/kvm/vmx/nested.c | 13 +++++++------
arch/x86/kvm/x86.c | 28 +++++++++++++++-------------
4 files changed, 28 insertions(+), 25 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 142a8421400f..1f3f8601747f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2150,11 +2150,11 @@ void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
-int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
-int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_emulate_msr_write_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 9864c057187d..51d0646622ef 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -529,7 +529,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
vcpu->arch.smbase = smstate->smbase;
- if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
+ if (kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
return X86EMUL_UNHANDLEABLE;
rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
@@ -620,7 +620,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
/* And finally go back to 32-bit mode. */
efer = 0;
- kvm_set_msr(vcpu, MSR_EFER, efer);
+ kvm_emulate_msr_write(vcpu, MSR_EFER, efer);
}
#endif
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index c69df3aba8d1..e7374834453c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -991,7 +991,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved);
goto fail;
}
- if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
+ if (kvm_emulate_msr_write_with_filter(vcpu, e.index, e.value)) {
pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
@@ -1027,7 +1027,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
}
}
- if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
+ if (kvm_emulate_msr_read_with_filter(vcpu, msr_index, data)) {
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
msr_index);
return false;
@@ -2764,7 +2764,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
- WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->guest_ia32_perf_global_ctrl))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
@@ -4752,8 +4752,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
}
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
- WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
- vmcs12->host_ia32_perf_global_ctrl));
+ WARN_ON_ONCE(kvm_emulate_msr_write(vcpu,
+ MSR_CORE_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl));
/* Set L1 segment info according to Intel SDM
27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -4931,7 +4932,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
goto vmabort;
}
- if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
+ if (kvm_emulate_msr_write_with_filter(vcpu, h.index, h.value)) {
pr_debug_ratelimited(
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
__func__, j, h.index, h.value);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7543dac7ae70..11d84075cd14 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1929,33 +1929,35 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
__kvm_get_msr);
}
-int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index,
+ u64 *data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
return KVM_MSR_RET_FILTERED;
return kvm_get_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_read_with_filter);
-int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_emulate_msr_write_with_filter(struct kvm_vcpu *vcpu, u32 index,
+ u64 data)
{
if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
return KVM_MSR_RET_FILTERED;
return kvm_set_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_write_with_filter);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
return kvm_get_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_get_msr);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_read);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
return kvm_set_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_set_msr);
+EXPORT_SYMBOL_GPL(kvm_emulate_msr_write);
static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
{
@@ -2027,7 +2029,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
u64 data;
int r;
- r = kvm_get_msr_with_filter(vcpu, ecx, &data);
+ r = kvm_emulate_msr_read_with_filter(vcpu, ecx, &data);
if (!r) {
trace_kvm_msr_read(ecx, data);
@@ -2052,7 +2054,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
u64 data = kvm_read_edx_eax(vcpu);
int r;
- r = kvm_set_msr_with_filter(vcpu, ecx, data);
+ r = kvm_emulate_msr_write_with_filter(vcpu, ecx, data);
if (!r) {
trace_kvm_msr_write(ecx, data);
@@ -8484,7 +8486,7 @@ static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
+ r = kvm_emulate_msr_read_with_filter(vcpu, msr_index, pdata);
if (r < 0)
return X86EMUL_UNHANDLEABLE;
@@ -8507,7 +8509,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_set_msr_with_filter(vcpu, msr_index, data);
+ r = kvm_emulate_msr_write_with_filter(vcpu, msr_index, data);
if (r < 0)
return X86EMUL_UNHANDLEABLE;
@@ -8527,7 +8529,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
- return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+ return kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
}
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
--
2.47.1
On 7/4/2025 1:49 AM, Chao Gao wrote: > @@ -2764,7 +2764,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, > > if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && > kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && > - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, > + WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, > vmcs12->guest_ia32_perf_global_ctrl))) { Not sure if the alignment should be adjusted based on the above modified line. > *entry_failure_code = ENTRY_FAIL_DEFAULT; > return -EINVAL; > @@ -4752,8 +4752,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, > } > if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && > kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) > - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, > - vmcs12->host_ia32_perf_global_ctrl)); > + WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, > + MSR_CORE_PERF_GLOBAL_CTRL, > + vmcs12->host_ia32_perf_global_ctrl)); Same here. > > /* Set L1 segment info according to Intel SDM > 27.5.2 Loading Host Segment and Descriptor-Table Registers */ > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 7543dac7ae70..11d84075cd14 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1929,33 +1929,35 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, > __kvm_get_msr); > } > > -int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) > +int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index, > + u64 *data) I think the extra new line doesn't improve readability, but it's the maintainer's call. > { > if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) > return KVM_MSR_RET_FILTERED; > return kvm_get_msr_ignored_check(vcpu, index, data, false); > } > -EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter); > +EXPORT_SYMBOL_GPL(kvm_emulate_msr_read_with_filter); > > -int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) > +int kvm_emulate_msr_write_with_filter(struct kvm_vcpu *vcpu, u32 index, Ditto. > + u64 data) > { > if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) > return KVM_MSR_RET_FILTERED;
On Mon, Jul 28, 2025 at 03:31:41PM -0700, Xin Li wrote: >On 7/4/2025 1:49 AM, Chao Gao wrote: >> @@ -2764,7 +2764,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, >> if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && >> kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) && >> - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, >> + WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, >> vmcs12->guest_ia32_perf_global_ctrl))) { > >Not sure if the alignment should be adjusted based on the above modified >line. I prefer to align the indentation. so will do. > >> *entry_failure_code = ENTRY_FAIL_DEFAULT; >> return -EINVAL; >> @@ -4752,8 +4752,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, >> } >> if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && >> kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu))) >> - WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, >> - vmcs12->host_ia32_perf_global_ctrl)); >> + WARN_ON_ONCE(kvm_emulate_msr_write(vcpu, >> + MSR_CORE_PERF_GLOBAL_CTRL, >> + vmcs12->host_ia32_perf_global_ctrl)); > >Same here. ack. > >> /* Set L1 segment info according to Intel SDM >> 27.5.2 Loading Host Segment and Descriptor-Table Registers */ >> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c >> index 7543dac7ae70..11d84075cd14 100644 >> --- a/arch/x86/kvm/x86.c >> +++ b/arch/x86/kvm/x86.c >> @@ -1929,33 +1929,35 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, >> __kvm_get_msr); >> } >> -int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) >> +int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index, >> + u64 *data) > >I think the extra new line doesn't improve readability, but it's the >maintainer's call. > Sure. Seems "let it poke out" is Sean's preference. I saw he made similar requests several times. e.g., https://lore.kernel.org/kvm/ZjQgA0ml4-mRJC-e@google.com/
On Tue, Jul 29, 2025, Chao Gao wrote: > On Mon, Jul 28, 2025 at 03:31:41PM -0700, Xin Li wrote: > >> /* Set L1 segment info according to Intel SDM > >> 27.5.2 Loading Host Segment and Descriptor-Table Registers */ > >> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > >> index 7543dac7ae70..11d84075cd14 100644 > >> --- a/arch/x86/kvm/x86.c > >> +++ b/arch/x86/kvm/x86.c > >> @@ -1929,33 +1929,35 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, > >> __kvm_get_msr); > >> } > >> -int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) > >> +int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index, > >> + u64 *data) > > > >I think the extra new line doesn't improve readability, but it's the > >maintainer's call. > > > > Sure. Seems "let it poke out" is Sean's preference. I saw he made similar > requests several times. e.g., Depends on the situation. I'd probably mentally flip a coin in this case. But what I'd actually do here is choose names that are (a) less verbose and (b) capture the relationship between the APIs. Instead of: int kvm_emulate_msr_read_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data); int kvm_emulate_msr_write_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data); int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); rename to: int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data); int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data); And then we can do a follow-up patch to solidify the relationship: -- From: Sean Christopherson <seanjc@google.com> Date: Tue, 29 Jul 2025 11:13:48 -0700 Subject: [PATCH] KVM: x86: Use double-underscore read/write MSR helpers as appropriate Use the double-underscore helpers for emulating MSR reads and writes in he no-underscore versions to better capture the relationship between the two sets of APIs (the double-underscore versions don't honor userspace MSR filters). No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/x86.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 09b106a5afdf..65c787bcfe8b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1932,11 +1932,24 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, __kvm_get_msr); } +int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data) +{ + return kvm_get_msr_ignored_check(vcpu, index, data, false); +} +EXPORT_SYMBOL_GPL(__kvm_emulate_msr_read); + +int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data) +{ + return kvm_set_msr_ignored_check(vcpu, index, data, false); +} +EXPORT_SYMBOL_GPL(__kvm_emulate_msr_write); + int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data) { if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) return KVM_MSR_RET_FILTERED; - return kvm_get_msr_ignored_check(vcpu, index, data, false); + + return __kvm_emulate_msr_read(vcpu, index, data); } EXPORT_SYMBOL_GPL(kvm_emulate_msr_read); @@ -1944,21 +1957,11 @@ int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data) { if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) return KVM_MSR_RET_FILTERED; - return kvm_set_msr_ignored_check(vcpu, index, data, false); + + return __kvm_emulate_msr_write(vcpu, index, data); } EXPORT_SYMBOL_GPL(kvm_emulate_msr_write); -int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data) -{ - return kvm_get_msr_ignored_check(vcpu, index, data, false); -} -EXPORT_SYMBOL_GPL(__kvm_emulate_msr_read); - -int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data) -{ - return kvm_set_msr_ignored_check(vcpu, index, data, false); -} -EXPORT_SYMBOL_GPL(__kvm_emulate_msr_write); static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) { base-commit: 1877e7b0749cbaa2d2ba4056eeda93adb373f7d4 --
On Fri, 2025-07-04 at 01:49 -0700, Chao Gao wrote: > From: Yang Weijiang <weijiang.yang@intel.com> > > Rename kvm_{g,s}et_msr()* to kvm_emulate_msr_{read,write}()* to make it > more obvious that KVM uses these helpers to emulate guest behaviors, > i.e., host_initiated == false in these helpers. > > Suggested-by: Sean Christopherson <seanjc@google.com> > Suggested-by: Chao Gao <chao.gao@intel.com> > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> > Signed-off-by: Chao Gao <chao.gao@intel.com> > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > Reviewed-by: Chao Gao <chao.gao@intel.com> Nit: I don't think your Reviewed-by is needed if the chain already has your SoB?
On Thu, Jul 24, 2025, Kai Huang wrote: > On Fri, 2025-07-04 at 01:49 -0700, Chao Gao wrote: > > From: Yang Weijiang <weijiang.yang@intel.com> > > > > Rename kvm_{g,s}et_msr()* to kvm_emulate_msr_{read,write}()* to make it > > more obvious that KVM uses these helpers to emulate guest behaviors, > > i.e., host_initiated == false in these helpers. > > > > Suggested-by: Sean Christopherson <seanjc@google.com> > > Suggested-by: Chao Gao <chao.gao@intel.com> > > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> > > Signed-off-by: Chao Gao <chao.gao@intel.com> > > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > > Reviewed-by: Chao Gao <chao.gao@intel.com> > > Nit: I don't think your Reviewed-by is needed if the chain already has > your SoB? Keep the Reviewed-by, it's still useful, e.g. to communicate that Chao has done more than just shepherd the patch along.
© 2016 - 2025 Red Hat, Inc.