From: Tianyu Lan <tiala@microsoft.com>
The wakeup_secondary_cpu callback was populated with wakeup_
cpu_via_vmgexit() which doesn't work for Hyper-V and Hyper-V
requires to call Hyper-V specific hvcall to start APs. So override
it with Hyper-V specific hook to start AP sev_es_save_area data
structure.
Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
Change since RFC v5:
* Remove some redundant structure definitions
Change sicne RFC v3:
* Replace struct sev_es_save_area with struct
vmcb_save_area
* Move code from mshyperv.c to ivm.c
Change since RFC v2:
* Add helper function to initialize segment
* Fix some coding style
---
arch/x86/hyperv/ivm.c | 98 +++++++++++++++++++++++++++++++
arch/x86/include/asm/mshyperv.h | 10 ++++
arch/x86/kernel/cpu/mshyperv.c | 13 +++-
include/asm-generic/hyperv-tlfs.h | 3 +-
4 files changed, 121 insertions(+), 3 deletions(-)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 85e4378f052f..b7b8e1ba8223 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -22,11 +22,15 @@
#include <asm/sev.h>
#include <asm/realmode.h>
#include <asm/e820/api.h>
+#include <asm/desc.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define GHCB_USAGE_HYPERV_CALL 1
+static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
+
union hv_ghcb {
struct ghcb ghcb;
struct {
@@ -443,6 +447,100 @@ __init void hv_sev_init_mem_and_cpu(void)
}
}
+#define hv_populate_vmcb_seg(seg, gdtr_base) \
+do { \
+ if (seg.selector) { \
+ seg.base = 0; \
+ seg.limit = HV_AP_SEGMENT_LIMIT; \
+ seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
+ seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
+ } \
+} while (0) \
+
+int hv_snp_boot_ap(int cpu, unsigned long start_ip)
+{
+ struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
+ __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ struct desc_ptr gdtr;
+ u64 ret, rmp_adjust, retry = 5;
+ struct hv_enable_vp_vtl *start_vp_input;
+ unsigned long flags;
+
+ native_store_gdt(&gdtr);
+
+ vmsa->gdtr.base = gdtr.address;
+ vmsa->gdtr.limit = gdtr.size;
+
+ asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
+ hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
+
+ asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
+ hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
+
+ asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
+ hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
+
+ asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
+ hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
+
+ vmsa->efer = native_read_msr(MSR_EFER);
+
+ asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
+ asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
+ asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
+
+ vmsa->xcr0 = 1;
+ vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
+ vmsa->rip = (u64)secondary_startup_64_no_verify;
+ vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
+
+ /*
+ * Set the SNP-specific fields for this VMSA:
+ * VMPL level
+ * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
+ */;
+ vmsa->vmpl = 0;
+ vmsa->sev_features = sev_status >> 2;
+
+ /*
+ * Running at VMPL0 allows the kernel to change the VMSA bit for a page
+ * using the RMPADJUST instruction. However, for the instruction to
+ * succeed it must target the permissions of a lesser privileged
+ * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
+ * instruction in the AMD64 APM Volume 3).
+ */
+ rmp_adjust = RMPADJUST_VMSA_PAGE_BIT | 1;
+ ret = rmpadjust((unsigned long)vmsa, RMP_PG_SIZE_4K,
+ rmp_adjust);
+ if (ret != 0) {
+ pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
+ return ret;
+ }
+
+ local_irq_save(flags);
+ start_vp_input =
+ (struct hv_enable_vp_vtl *)ap_start_input_arg;
+ memset(start_vp_input, 0, sizeof(*start_vp_input));
+ start_vp_input->partition_id = -1;
+ start_vp_input->vp_index = cpu;
+ start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
+ *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
+
+ do {
+ ret = hv_do_hypercall(HVCALL_START_VP,
+ start_vp_input, NULL);
+ } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
+
+ if (!hv_result_success(ret)) {
+ pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
+ goto done;
+ }
+
+done:
+ local_irq_restore(flags);
+ return ret;
+}
+
void __init hv_vtom_init(void)
{
/*
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 84e024ffacd5..9ad2a0f21d68 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -65,6 +65,13 @@ struct memory_map_entry {
u32 reserved;
};
+/*
+ * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA
+ * to start AP in enlightened SEV guest.
+ */
+#define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
+#define HV_AP_SEGMENT_LIMIT 0xffffffff
+
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
@@ -263,6 +270,7 @@ struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
struct hv_interrupt_entry *entry);
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
+int hv_snp_boot_ap(int cpu, unsigned long start_ip);
#ifdef CONFIG_AMD_MEM_ENCRYPT
void hv_ghcb_msr_write(u64 msr, u64 value);
@@ -271,6 +279,7 @@ bool hv_ghcb_negotiate_protocol(void);
void hv_ghcb_terminate(unsigned int set, unsigned int reason);
void hv_vtom_init(void);
void hv_sev_init_mem_and_cpu(void);
+int hv_snp_boot_ap(int cpu, unsigned long start_ip);
#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
@@ -278,6 +287,7 @@ static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
static inline void hv_vtom_init(void) {}
static inline void hv_sev_init_mem_and_cpu(void) {}
+static int hv_snp_boot_ap(int cpu, unsigned long start_ip) {}
#endif
extern bool hv_isolation_type_snp(void);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index dea9b881180b..0c5f9f7bd7ba 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -295,6 +295,16 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
native_smp_prepare_cpus(max_cpus);
+ /*
+ * Override wakeup_secondary_cpu_64 callback for SEV-SNP
+ * enlightened guest.
+ */
+ if (hv_isolation_type_en_snp())
+ apic->wakeup_secondary_cpu_64 = hv_snp_boot_ap;
+
+ if (!hv_root_partition)
+ return;
+
#ifdef CONFIG_X86_64
for_each_present_cpu(i) {
if (i == 0)
@@ -502,8 +512,7 @@ static void __init ms_hyperv_init_platform(void)
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
- if (hv_root_partition)
- smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
+ smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
# endif
/*
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index f4e4cc4f965f..92dcc530350c 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -146,9 +146,9 @@ union hv_reference_tsc_msr {
/* Declare the various hypercall operations. */
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_ENABLE_VP_VTL 0x000f
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_ENABLE_VP_VTL 0x000f
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
#define HVCALL_SEND_IPI_EX 0x0015
@@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
--
2.25.1
> -----Original Message----- > From: Tianyu Lan <ltykernel@gmail.com> > Sent: Monday, May 15, 2023 10:29 PM > To: luto@kernel.org; tglx@linutronix.de; mingo@redhat.com; bp@alien8.de; > dave.hansen@linux.intel.com; x86@kernel.org; hpa@zytor.com; > seanjc@google.com; pbonzini@redhat.com; jgross@suse.com; Tianyu Lan > <Tianyu.Lan@microsoft.com>; kirill@shutemov.name; > jiangshan.ljs@antgroup.com; peterz@infradead.org; ashish.kalra@amd.com; > srutherford@google.com; akpm@linux-foundation.org; > anshuman.khandual@arm.com; pawan.kumar.gupta@linux.intel.com; > adrian.hunter@intel.com; daniel.sneddon@linux.intel.com; > alexander.shishkin@linux.intel.com; sandipan.das@amd.com; > ray.huang@amd.com; brijesh.singh@amd.com; michael.roth@amd.com; > thomas.lendacky@amd.com; venu.busireddy@oracle.com; > sterritt@google.com; tony.luck@intel.com; samitolvanen@google.com; > fenghua.yu@intel.com > Cc: pangupta@amd.com; linux-kernel@vger.kernel.org; kvm@vger.kernel.org; > linux-hyperv@vger.kernel.org; linux-arch@vger.kernel.org > Subject: [EXTERNAL] [RFC PATCH V6 13/14] x86/hyperv: Add smp support for > sev-snp guest > > From: Tianyu Lan <tiala@microsoft.com> > > The wakeup_secondary_cpu callback was populated with wakeup_ > cpu_via_vmgexit() which doesn't work for Hyper-V and Hyper-V requires to > call Hyper-V specific hvcall to start APs. So override it with Hyper-V specific > hook to start AP sev_es_save_area data structure. > > Signed-off-by: Tianyu Lan <tiala@microsoft.com> > --- > Change since RFC v5: > * Remove some redundant structure definitions > > Change sicne RFC v3: > * Replace struct sev_es_save_area with struct > vmcb_save_area > * Move code from mshyperv.c to ivm.c > > Change since RFC v2: > * Add helper function to initialize segment > * Fix some coding style > --- > arch/x86/hyperv/ivm.c | 98 +++++++++++++++++++++++++++++++ > arch/x86/include/asm/mshyperv.h | 10 ++++ > arch/x86/kernel/cpu/mshyperv.c | 13 +++- > include/asm-generic/hyperv-tlfs.h | 3 +- > 4 files changed, 121 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index > 85e4378f052f..b7b8e1ba8223 100644 > --- a/arch/x86/hyperv/ivm.c > +++ b/arch/x86/hyperv/ivm.c > @@ -22,11 +22,15 @@ > #include <asm/sev.h> > #include <asm/realmode.h> > #include <asm/e820/api.h> > +#include <asm/desc.h> > > #ifdef CONFIG_AMD_MEM_ENCRYPT > > #define GHCB_USAGE_HYPERV_CALL 1 > > +static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted > +__aligned(PAGE_SIZE); static u8 ap_start_stack[PAGE_SIZE] > +__aligned(PAGE_SIZE); > + > union hv_ghcb { > struct ghcb ghcb; > struct { > @@ -443,6 +447,100 @@ __init void hv_sev_init_mem_and_cpu(void) > } > } > > +#define hv_populate_vmcb_seg(seg, gdtr_base) \ > +do { \ > + if (seg.selector) { \ > + seg.base = 0; \ > + seg.limit = HV_AP_SEGMENT_LIMIT; \ > + seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ > + seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ > + } \ > +} while (0) \ > + > +int hv_snp_boot_ap(int cpu, unsigned long start_ip) { > + struct sev_es_save_area *vmsa = (struct sev_es_save_area *) > + __get_free_page(GFP_KERNEL | __GFP_ZERO); > + struct desc_ptr gdtr; > + u64 ret, rmp_adjust, retry = 5; > + struct hv_enable_vp_vtl *start_vp_input; > + unsigned long flags; > + > + native_store_gdt(&gdtr); > + > + vmsa->gdtr.base = gdtr.address; > + vmsa->gdtr.limit = gdtr.size; > + > + asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); > + hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); > + > + asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); > + hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); > + > + asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); > + hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); > + > + asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); > + hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); > + > + vmsa->efer = native_read_msr(MSR_EFER); > + > + asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); > + asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); > + asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); > + > + vmsa->xcr0 = 1; > + vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; > + vmsa->rip = (u64)secondary_startup_64_no_verify; > + vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; > + > + /* > + * Set the SNP-specific fields for this VMSA: > + * VMPL level > + * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) > + */; > + vmsa->vmpl = 0; > + vmsa->sev_features = sev_status >> 2; > + > + /* > + * Running at VMPL0 allows the kernel to change the VMSA bit for a > page > + * using the RMPADJUST instruction. However, for the instruction to > + * succeed it must target the permissions of a lesser privileged > + * (higher numbered) VMPL level, so use VMPL1 (refer to the > RMPADJUST > + * instruction in the AMD64 APM Volume 3). > + */ > + rmp_adjust = RMPADJUST_VMSA_PAGE_BIT | 1; > + ret = rmpadjust((unsigned long)vmsa, RMP_PG_SIZE_4K, > + rmp_adjust); > + if (ret != 0) { > + pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); > + return ret; > + } > + > + local_irq_save(flags); > + start_vp_input = > + (struct hv_enable_vp_vtl *)ap_start_input_arg; > + memset(start_vp_input, 0, sizeof(*start_vp_input)); > + start_vp_input->partition_id = -1; > + start_vp_input->vp_index = cpu; > + start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; > + *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; > + > + do { > + ret = hv_do_hypercall(HVCALL_START_VP, > + start_vp_input, NULL); > + } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); can we restore local_irq here ? > + > + if (!hv_result_success(ret)) { > + pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); > + goto done; No need of goto here. Regards, Saurabh > + } > + > +done: > + local_irq_restore(flags); > + return ret; > +} > + > void __init hv_vtom_init(void) > { > /* > diff --git a/arch/x86/include/asm/mshyperv.h > b/arch/x86/include/asm/mshyperv.h index 84e024ffacd5..9ad2a0f21d68 > 100644 > --- a/arch/x86/include/asm/mshyperv.h > +++ b/arch/x86/include/asm/mshyperv.h > @@ -65,6 +65,13 @@ struct memory_map_entry { > u32 reserved; > }; > > +/* > + * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA > + * to start AP in enlightened SEV guest. > + */ > +#define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL > +#define HV_AP_SEGMENT_LIMIT 0xffffffff > + > int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); int > hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); int > hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); @@ - > 263,6 +270,7 @@ struct irq_domain *hv_create_pci_msi_domain(void); int > hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, > struct hv_interrupt_entry *entry); > int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry > *entry); > +int hv_snp_boot_ap(int cpu, unsigned long start_ip); > > #ifdef CONFIG_AMD_MEM_ENCRYPT > void hv_ghcb_msr_write(u64 msr, u64 value); @@ -271,6 +279,7 @@ bool > hv_ghcb_negotiate_protocol(void); void hv_ghcb_terminate(unsigned int set, > unsigned int reason); void hv_vtom_init(void); void > hv_sev_init_mem_and_cpu(void); > +int hv_snp_boot_ap(int cpu, unsigned long start_ip); > #else > static inline void hv_ghcb_msr_write(u64 msr, u64 value) {} static inline void > hv_ghcb_msr_read(u64 msr, u64 *value) {} @@ -278,6 +287,7 @@ static > inline bool hv_ghcb_negotiate_protocol(void) { return false; } static inline > void hv_ghcb_terminate(unsigned int set, unsigned int reason) {} static inline > void hv_vtom_init(void) {} static inline void hv_sev_init_mem_and_cpu(void) > {} > +static int hv_snp_boot_ap(int cpu, unsigned long start_ip) {} > #endif > > extern bool hv_isolation_type_snp(void); diff --git > a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index > dea9b881180b..0c5f9f7bd7ba 100644 > --- a/arch/x86/kernel/cpu/mshyperv.c > +++ b/arch/x86/kernel/cpu/mshyperv.c > @@ -295,6 +295,16 @@ static void __init hv_smp_prepare_cpus(unsigned int > max_cpus) > > native_smp_prepare_cpus(max_cpus); > > + /* > + * Override wakeup_secondary_cpu_64 callback for SEV-SNP > + * enlightened guest. > + */ > + if (hv_isolation_type_en_snp()) > + apic->wakeup_secondary_cpu_64 = hv_snp_boot_ap; > + > + if (!hv_root_partition) > + return; > + > #ifdef CONFIG_X86_64 > for_each_present_cpu(i) { > if (i == 0) > @@ -502,8 +512,7 @@ static void __init ms_hyperv_init_platform(void) > > # ifdef CONFIG_SMP > smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu; > - if (hv_root_partition) > - smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus; > + smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus; > # endif > > /* > diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv- > tlfs.h > index f4e4cc4f965f..92dcc530350c 100644 > --- a/include/asm-generic/hyperv-tlfs.h > +++ b/include/asm-generic/hyperv-tlfs.h > @@ -146,9 +146,9 @@ union hv_reference_tsc_msr { > /* Declare the various hypercall operations. */ > #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 > #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 > -#define HVCALL_ENABLE_VP_VTL 0x000f > #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 > #define HVCALL_SEND_IPI 0x000b > +#define HVCALL_ENABLE_VP_VTL 0x000f > #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 > #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 > #define HVCALL_SEND_IPI_EX 0x0015 > @@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT { > #define HV_STATUS_INVALID_PORT_ID 17 > #define HV_STATUS_INVALID_CONNECTION_ID 18 > #define HV_STATUS_INSUFFICIENT_BUFFERS 19 > +#define HV_STATUS_TIME_OUT 120 > #define HV_STATUS_VTL_ALREADY_ENABLED 134 > > /* > -- > 2.25.1
On 5/16/2023 1:16 PM, Saurabh Singh Sengar wrote: >> + (struct hv_enable_vp_vtl *)ap_start_input_arg; >> + memset(start_vp_input, 0, sizeof(*start_vp_input)); >> + start_vp_input->partition_id = -1; >> + start_vp_input->vp_index = cpu; >> + start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; >> + *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; >> + >> + do { >> + ret = hv_do_hypercall(HVCALL_START_VP, >> + start_vp_input, NULL); >> + } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); > can we restore local_irq here ? > >> + >> + if (!hv_result_success(ret)) { >> + pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); >> + goto done; > No need of goto here. > Nice catch. The goto label should be removed here. Will update in the next version.
© 2016 - 2025 Red Hat, Inc.