Hyper-V's input / output argument must be 8 bytes aligned an not cross
page boundary. The easiest way to satisfy those requirements is to use
percpu page.
For the foreseeable future we only need to provide input for TLB
and APIC hypercalls, so skip setting up an output page.
The page tracking structure is not bound to hypercall because it is a
common pattern for Xen to write guest physical address to Hyper-V while
at the same time accessing the page via a pointer.
We will also need to provide an ap_setup hook for secondary cpus to
setup its own input page.
Signed-off-by: Wei Liu <liuwe@microsoft.com>
Reviewed-by: Paul Durrant <paul@xen.org>
---
xen/arch/x86/guest/hyperv/hyperv.c | 26 ++++++++++++++++++++++++++
xen/include/asm-x86/guest/hyperv.h | 8 ++++++++
2 files changed, 34 insertions(+)
diff --git a/xen/arch/x86/guest/hyperv/hyperv.c b/xen/arch/x86/guest/hyperv/hyperv.c
index 381be2a68c..03027bd453 100644
--- a/xen/arch/x86/guest/hyperv/hyperv.c
+++ b/xen/arch/x86/guest/hyperv/hyperv.c
@@ -27,6 +27,7 @@
struct ms_hyperv_info __read_mostly ms_hyperv;
extern char hv_hypercall_page[];
+DEFINE_PER_CPU_READ_MOSTLY(struct hyperv_pcpu_page, hv_pcpu_input_arg);
static const struct hypervisor_ops ops;
const struct hypervisor_ops *__init hyperv_probe(void)
@@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void)
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
}
+static void setup_hypercall_pcpu_arg(void)
+{
+ struct page_info *pg;
+ void *mapping;
+ unsigned int cpu = smp_processor_id();
+
+ pg = alloc_domheap_page(NULL, 0);
+ if ( !pg )
+ panic("Failed to setup hypercall input page for %u\n", cpu);
+
+ mapping = __map_domain_page_global(pg);
+ if ( !mapping )
+ panic("Failed to map hypercall input page for %u\n", cpu);
+
+ this_cpu(hv_pcpu_input_arg).maddr = page_to_maddr(pg);
+ this_cpu(hv_pcpu_input_arg).mapping = mapping;
+}
+
static void __init setup(void)
{
setup_hypercall_page();
+ setup_hypercall_pcpu_arg();
+}
+
+static void ap_setup(void)
+{
+ setup_hypercall_pcpu_arg();
}
static const struct hypervisor_ops ops = {
.name = "Hyper-V",
.setup = setup,
+ .ap_setup = ap_setup,
};
/*
diff --git a/xen/include/asm-x86/guest/hyperv.h b/xen/include/asm-x86/guest/hyperv.h
index c7a7f32bd5..83f297468f 100644
--- a/xen/include/asm-x86/guest/hyperv.h
+++ b/xen/include/asm-x86/guest/hyperv.h
@@ -51,6 +51,8 @@ static inline uint64_t hv_scale_tsc(uint64_t tsc, uint64_t scale,
#ifdef CONFIG_HYPERV_GUEST
+#include <xen/percpu.h>
+
#include <asm/guest/hypervisor.h>
struct ms_hyperv_info {
@@ -63,6 +65,12 @@ struct ms_hyperv_info {
};
extern struct ms_hyperv_info ms_hyperv;
+struct hyperv_pcpu_page {
+ paddr_t maddr;
+ void *mapping;
+};
+DECLARE_PER_CPU(struct hyperv_pcpu_page, hv_pcpu_input_arg);
+
const struct hypervisor_ops *hyperv_probe(void);
#else
--
2.20.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 03/01/2020 16:08, Wei Liu wrote:
> @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void)
> wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
> }
>
> +static void setup_hypercall_pcpu_arg(void)
> +{
> + struct page_info *pg;
> + void *mapping;
> + unsigned int cpu = smp_processor_id();
> +
> + pg = alloc_domheap_page(NULL, 0);
> + if ( !pg )
> + panic("Failed to setup hypercall input page for %u\n", cpu);
> +
> + mapping = __map_domain_page_global(pg);
> + if ( !mapping )
> + panic("Failed to map hypercall input page for %u\n", cpu);
Sorry I didn't spot this before, but an always-mapped domheap page is
just alloc_xenheap_page() (give or take NUMA positioning above the 5T
boundary, which isn't used here).
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote:
> On 03/01/2020 16:08, Wei Liu wrote:
> > @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void)
> > wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
> > }
> >
> > +static void setup_hypercall_pcpu_arg(void)
> > +{
> > + struct page_info *pg;
> > + void *mapping;
> > + unsigned int cpu = smp_processor_id();
> > +
> > + pg = alloc_domheap_page(NULL, 0);
> > + if ( !pg )
> > + panic("Failed to setup hypercall input page for %u\n", cpu);
> > +
> > + mapping = __map_domain_page_global(pg);
> > + if ( !mapping )
> > + panic("Failed to map hypercall input page for %u\n", cpu);
>
> Sorry I didn't spot this before, but an always-mapped domheap page is
> just alloc_xenheap_page() (give or take NUMA positioning above the 5T
> boundary, which isn't used here).
I had the (wrong) impression that using domheap was preferred.
I'm fine with switching to xenheap, of course.
Wei.
>
> ~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 03/01/2020 16:55, Wei Liu wrote:
> On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote:
>> On 03/01/2020 16:08, Wei Liu wrote:
>>> @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void)
>>> wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
>>> }
>>>
>>> +static void setup_hypercall_pcpu_arg(void)
>>> +{
>>> + struct page_info *pg;
>>> + void *mapping;
>>> + unsigned int cpu = smp_processor_id();
>>> +
>>> + pg = alloc_domheap_page(NULL, 0);
>>> + if ( !pg )
>>> + panic("Failed to setup hypercall input page for %u\n", cpu);
>>> +
>>> + mapping = __map_domain_page_global(pg);
>>> + if ( !mapping )
>>> + panic("Failed to map hypercall input page for %u\n", cpu);
>> Sorry I didn't spot this before, but an always-mapped domheap page is
>> just alloc_xenheap_page() (give or take NUMA positioning above the 5T
>> boundary, which isn't used here).
> I had the (wrong) impression that using domheap was preferred.
>
> I'm fine with switching to xenheap, of course.
This is a frame which Xen needs to have a mapping to in perpetuity, to
make hypercalls.
Most examples in code are a regular domheap frame which, after some
guest action, requires mapping in Xen for a period of time, or frames
which we want to have specific NUMA properties, and may be beyond the
end of the directmap.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Fri, Jan 03, 2020 at 04:57:11PM +0000, Andrew Cooper wrote:
> On 03/01/2020 16:55, Wei Liu wrote:
> > On Fri, Jan 03, 2020 at 04:30:49PM +0000, Andrew Cooper wrote:
> >> On 03/01/2020 16:08, Wei Liu wrote:
> >>> @@ -83,14 +84,39 @@ static void __init setup_hypercall_page(void)
> >>> wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
> >>> }
> >>>
> >>> +static void setup_hypercall_pcpu_arg(void)
> >>> +{
> >>> + struct page_info *pg;
> >>> + void *mapping;
> >>> + unsigned int cpu = smp_processor_id();
> >>> +
> >>> + pg = alloc_domheap_page(NULL, 0);
> >>> + if ( !pg )
> >>> + panic("Failed to setup hypercall input page for %u\n", cpu);
> >>> +
> >>> + mapping = __map_domain_page_global(pg);
> >>> + if ( !mapping )
> >>> + panic("Failed to map hypercall input page for %u\n", cpu);
> >> Sorry I didn't spot this before, but an always-mapped domheap page is
> >> just alloc_xenheap_page() (give or take NUMA positioning above the 5T
> >> boundary, which isn't used here).
> > I had the (wrong) impression that using domheap was preferred.
> >
> > I'm fine with switching to xenheap, of course.
>
> This is a frame which Xen needs to have a mapping to in perpetuity, to
> make hypercalls.
>
> Most examples in code are a regular domheap frame which, after some
> guest action, requires mapping in Xen for a period of time, or frames
> which we want to have specific NUMA properties, and may be beyond the
> end of the directmap.
Alright.
If we use xenheap here I can drop the tracking structure.
I will change that in v3.
Wei.
>
> ~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2026 Red Hat, Inc.