Patch 1 is really independent, but patch 2 relies on it being in place. Patch 2 itself was added as a result of the discussion of patch 3's v1 (which was previously a standalone one). 1: short-circuit HVM-only mode flags when !HVM 2: make guest_physmap_add_entry() HVM-only 3: subsume set_gpfn_from_mfn() into guest_physmap_add_page() Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
#define-ing them to zero allows better code generation in this case,
and paves the way for more DCE, allowing to leave certain functions just
declared, but not defined.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -837,7 +837,9 @@ int paging_enable(struct domain *d, u32
switch ( mode & (PG_external | PG_translate | PG_refcounts) )
{
case 0:
+#if PG_external | PG_translate | PG_refcounts
case PG_external | PG_translate | PG_refcounts:
+#endif
break;
default:
return -EINVAL;
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -46,19 +46,29 @@
#define PG_SH_enable 0
#define PG_SH_forced 0
#endif
+#ifdef CONFIG_HVM
#define PG_HAP_enable (1U << PG_HAP_shift)
+#else
+#define PG_HAP_enable 0
+#endif
/* common paging mode bits */
#define PG_mode_shift 10
+#ifdef CONFIG_HVM
/* Refcounts based on shadow tables instead of guest tables */
#define PG_refcounts (XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT << PG_mode_shift)
-/* Enable log dirty mode */
-#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift)
/* Xen does p2m translation, not guest */
#define PG_translate (XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE << PG_mode_shift)
/* Xen does not steal address space from the domain for its own booking;
* requires VT or similar mechanisms */
#define PG_external (XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL << PG_mode_shift)
+#else
+#define PG_refcounts 0
+#define PG_translate 0
+#define PG_external 0
+#endif
+/* Enable log dirty mode */
+#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift)
/* All paging modes. */
#define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external)
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Thu, May 09, 2019 at 07:42:15AM -0600, Jan Beulich wrote: > #define-ing them to zero allows better code generation in this case, > and paves the way for more DCE, allowing to leave certain functions just > declared, but not defined. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
On 5/9/19 2:42 PM, Jan Beulich wrote: > #define-ing them to zero allows better code generation in this case, > and paves the way for more DCE, allowing to leave certain functions just > declared, but not defined. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: George Dunlap <george.dunlap@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
Lift its !paging_mode_translate() part into guest_physmap_add_page()
(which is what common code calls), eliminating the dummy use of a
(HVM-only really) P2M type in the PV case.
Suggested-by: George Dunlap <George.Dunlap@eu.citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -813,22 +813,14 @@ guest_physmap_remove_page(struct domain
}
int
-guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t t)
+guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
- unsigned long i;
- gfn_t ogfn;
- p2m_type_t ot;
- p2m_access_t a;
- mfn_t omfn;
- int pod_count = 0;
- int rc = 0;
-
/* IOMMU for PV guests is handled in get_page_type() and put_page(). */
if ( !paging_mode_translate(d) )
{
struct page_info *page = mfn_to_page(mfn);
+ unsigned long i;
/*
* Our interface for PV guests wrt IOMMU entries hasn't been very
@@ -841,7 +833,7 @@ guest_physmap_add_entry(struct domain *d
* any guest-requested type changes succeed and remove the IOMMU
* entry).
*/
- if ( !need_iommu_pt_sync(d) || t != p2m_ram_rw )
+ if ( !need_iommu_pt_sync(d) )
return 0;
for ( i = 0; i < (1UL << page_order); ++i, ++page )
@@ -855,6 +847,29 @@ guest_physmap_add_entry(struct domain *d
return 0;
}
+ return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+}
+
+#ifdef CONFIG_HVM
+int
+guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order, p2m_type_t t)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ unsigned long i;
+ gfn_t ogfn;
+ p2m_type_t ot;
+ p2m_access_t a;
+ mfn_t omfn;
+ int pod_count = 0;
+ int rc = 0;
+
+ if ( !paging_mode_translate(d) )
+ {
+ ASSERT_UNREACHABLE();
+ return -EPERM;
+ }
+
/* foreign pages are added thru p2m_add_foreign */
if ( p2m_is_foreign(t) )
return -EINVAL;
@@ -978,7 +993,6 @@ guest_physmap_add_entry(struct domain *d
gfn_x(gfn), mfn_x(mfn));
rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
p2m_invalid, p2m->default_access);
-#ifdef CONFIG_HVM
if ( rc == 0 )
{
pod_lock(p2m);
@@ -986,7 +1000,6 @@ guest_physmap_add_entry(struct domain *d
BUG_ON(p2m->pod.entry_count < 0);
pod_unlock(p2m);
}
-#endif
}
out:
@@ -994,7 +1007,7 @@ out:
return rc;
}
-
+#endif
/*
* Modify the p2m type of a single gfn from ot to nt.
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -587,14 +587,9 @@ int guest_physmap_add_entry(struct domai
mfn_t mfn, unsigned int page_order,
p2m_type_t t);
-/* Untyped version for RAM only, for compatibility */
-static inline int guest_physmap_add_page(struct domain *d,
- gfn_t gfn,
- mfn_t mfn,
- unsigned int page_order)
-{
- return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
-}
+/* Untyped version for RAM only, for compatibility and PV. */
+int guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order);
/* Set a p2m range as populate-on-demand */
int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 5/9/19 2:44 PM, Jan Beulich wrote: > Lift its !paging_mode_translate() part into guest_physmap_add_page() > (which is what common code calls), eliminating the dummy use of a > (HVM-only really) P2M type in the PV case. > > Suggested-by: George Dunlap <George.Dunlap@eu.citrix.com> > Signed-off-by: Jan Beulich <jbeulich@suse.com> Thanks, looks good: Reviewed-by: George Dunlap <george.dunlap@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
On Thu, May 09, 2019 at 07:44:25AM -0600, Jan Beulich wrote: > Lift its !paging_mode_translate() part into guest_physmap_add_page() > (which is what common code calls), eliminating the dummy use of a > (HVM-only really) P2M type in the PV case. > > Suggested-by: George Dunlap <George.Dunlap@eu.citrix.com> > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Wei Liu <wei.liu2@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
The two callers in common/memory.c currently call set_gpfn_from_mfn()
themselves, so moving the call into guest_physmap_add_page() helps
tidy their code.
The two callers in common/grant_table.c fail to make that call alongside
the one to guest_physmap_add_page(), so will actually get fixed by the
change.
Other (x86) callers are HVM only and are hence unaffected by a change
to the function's !paging_mode_translate() part.
Sadly this isn't enough yet to drop Arm's dummy macro, as there's one
more use in page_alloc.c.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <julien.grall@arm.com>
---
v2: Re-base over added earlier patch. Re-write description.
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -833,15 +833,16 @@ guest_physmap_add_page(struct domain *d,
* any guest-requested type changes succeed and remove the IOMMU
* entry).
*/
- if ( !need_iommu_pt_sync(d) )
- return 0;
-
for ( i = 0; i < (1UL << page_order); ++i, ++page )
{
- if ( get_page_and_type(page, d, PGT_writable_page) )
+ if ( !need_iommu_pt_sync(d) )
+ /* nothing */;
+ else if ( get_page_and_type(page, d, PGT_writable_page) )
put_page_and_type(page);
else
return -EINVAL;
+
+ set_gpfn_from_mfn(mfn_x(mfn) + i, gfn_x(gfn) + i);
}
return 0;
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -270,16 +270,10 @@ static void populate_physmap(struct memo
guest_physmap_add_page(d, _gfn(gpfn), mfn, a->extent_order);
- if ( !paging_mode_translate(d) )
- {
- for ( j = 0; j < (1U << a->extent_order); j++ )
- set_gpfn_from_mfn(mfn_x(mfn_add(mfn, j)), gpfn + j);
-
- /* Inform the domain of the new page's machine address. */
- if ( unlikely(__copy_mfn_to_guest_offset(a->extent_list, i,
- mfn)) )
- goto out;
- }
+ if ( !paging_mode_translate(d) &&
+ /* Inform the domain of the new page's machine address. */
+ unlikely(__copy_mfn_to_guest_offset(a->extent_list, i, mfn)) )
+ goto out;
}
}
@@ -755,15 +749,11 @@ static long memory_exchange(XEN_GUEST_HA
guest_physmap_add_page(d, _gfn(gpfn), mfn,
exch.out.extent_order);
- if ( !paging_mode_translate(d) )
- {
- for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
- set_gpfn_from_mfn(mfn_x(mfn_add(mfn, k)), gpfn + k);
- if ( __copy_mfn_to_guest_offset(exch.out.extent_start,
- (i << out_chunk_order) + j,
- mfn) )
- rc = -EFAULT;
- }
+ if ( !paging_mode_translate(d) &&
+ __copy_mfn_to_guest_offset(exch.out.extent_start,
+ (i << out_chunk_order) + j,
+ mfn) )
+ rc = -EFAULT;
}
BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 5/9/19 2:45 PM, Jan Beulich wrote: > The two callers in common/memory.c currently call set_gpfn_from_mfn() > themselves, so moving the call into guest_physmap_add_page() helps > tidy their code. > > The two callers in common/grant_table.c fail to make that call alongside > the one to guest_physmap_add_page(), so will actually get fixed by the > change. > > Other (x86) callers are HVM only and are hence unaffected by a change > to the function's !paging_mode_translate() part. > > Sadly this isn't enough yet to drop Arm's dummy macro, as there's one > more use in page_alloc.c. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > Acked-by: Julien Grall <julien.grall@arm.com> > --- > v2: Re-base over added earlier patch. Re-write description. Thanks: Reviewed-by: George Dunlap <george.dunlap@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2026 Red Hat, Inc.