Split the code that detects whether the physical and linear address of a
mapping request are suitable to be used in an L3 or L2 slot.
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
- Make the macros local to map_pages_to_xen().
- Some adjustments to macro logic.
---
xen/arch/x86/mm.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d537a799bced..8afb63c855b9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5232,6 +5232,12 @@ int map_pages_to_xen(
} \
} while (0)
+/* Check if a (virt, mfn) tuple is aligned for a given slot level. */
+#define IS_LnE_ALIGNED(v, m, n) \
+ IS_ALIGNED(PFN_DOWN(v) | mfn_x(m), (1UL << (PAGETABLE_ORDER * (n - 1))) - 1)
+#define IS_L2E_ALIGNED(v, m) IS_LnE_ALIGNED(v, m, 2)
+#define IS_L3E_ALIGNED(v, m) IS_LnE_ALIGNED(v, m, 3)
+
L3T_INIT(current_l3page);
while ( nr_mfns != 0 )
@@ -5249,9 +5255,7 @@ int map_pages_to_xen(
L3T_LOCK(current_l3page);
ol3e = *pl3e;
- if ( cpu_has_page1gb &&
- !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
- ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)) &&
+ if ( cpu_has_page1gb && IS_L3E_ALIGNED(virt, mfn) &&
nr_mfns >= (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) &&
!(flags & (_PAGE_PAT | MAP_SMALL_PAGES)) )
{
@@ -5370,8 +5374,7 @@ int map_pages_to_xen(
if ( !pl2e )
goto out;
- if ( ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
- ((1u << PAGETABLE_ORDER) - 1)) == 0) &&
+ if ( IS_L2E_ALIGNED(virt, mfn) &&
(nr_mfns >= (1u << PAGETABLE_ORDER)) &&
!(flags & (_PAGE_PAT|MAP_SMALL_PAGES)) )
{
@@ -5541,9 +5544,7 @@ int map_pages_to_xen(
check_l3:
if ( cpu_has_page1gb &&
(flags == PAGE_HYPERVISOR) &&
- ((nr_mfns == 0) ||
- !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
- ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1))) )
+ ((nr_mfns == 0) || IS_L3E_ALIGNED(virt, mfn)) )
{
unsigned long base_mfn;
const l2_pgentry_t *l2t;
@@ -5587,6 +5588,9 @@ int map_pages_to_xen(
}
}
+#undef IS_L3E_ALIGNED
+#undef IS_L2E_ALIGNED
+#undef IS_LnE_ALIGNED
#undef flush_flags
rc = 0;
--
2.46.0
On 08.11.2024 12:31, Roger Pau Monne wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5232,6 +5232,12 @@ int map_pages_to_xen(
> } \
> } while (0)
>
> +/* Check if a (virt, mfn) tuple is aligned for a given slot level. */
> +#define IS_LnE_ALIGNED(v, m, n) \
> + IS_ALIGNED(PFN_DOWN(v) | mfn_x(m), (1UL << (PAGETABLE_ORDER * (n - 1))) - 1)
> +#define IS_L2E_ALIGNED(v, m) IS_LnE_ALIGNED(v, m, 2)
> +#define IS_L3E_ALIGNED(v, m) IS_LnE_ALIGNED(v, m, 3)
> +
> L3T_INIT(current_l3page);
>
> while ( nr_mfns != 0 )
> @@ -5249,9 +5255,7 @@ int map_pages_to_xen(
> L3T_LOCK(current_l3page);
> ol3e = *pl3e;
>
> - if ( cpu_has_page1gb &&
> - !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> - ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1)) &&
> + if ( cpu_has_page1gb && IS_L3E_ALIGNED(virt, mfn) &&
> nr_mfns >= (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) &&
> !(flags & (_PAGE_PAT | MAP_SMALL_PAGES)) )
> {
> @@ -5370,8 +5374,7 @@ int map_pages_to_xen(
> if ( !pl2e )
> goto out;
>
> - if ( ((((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> - ((1u << PAGETABLE_ORDER) - 1)) == 0) &&
> + if ( IS_L2E_ALIGNED(virt, mfn) &&
> (nr_mfns >= (1u << PAGETABLE_ORDER)) &&
> !(flags & (_PAGE_PAT|MAP_SMALL_PAGES)) )
> {
> @@ -5541,9 +5544,7 @@ int map_pages_to_xen(
About 50 lines up from here there's another place where I think
IS_L2E_ALIGNED() wants using. That becomes noticeable with the remark
I'm about to make on patch 2, on this ...
> check_l3:
> if ( cpu_has_page1gb &&
> (flags == PAGE_HYPERVISOR) &&
> - ((nr_mfns == 0) ||
> - !(((virt >> PAGE_SHIFT) | mfn_x(mfn)) &
> - ((1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT)) - 1))) )
> + ((nr_mfns == 0) || IS_L3E_ALIGNED(virt, mfn)) )
> {
... similar construct (checking "flags == PAGE_HYPERVISOR"). Not sure
if I should also make this kind of an adjustment while committing ...
Jan
On 08.11.2024 12:31, Roger Pau Monne wrote: > Split the code that detects whether the physical and linear address of a > mapping request are suitable to be used in an L3 or L2 slot. > > No functional change intended. > > Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> with one further tweak: > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -5232,6 +5232,12 @@ int map_pages_to_xen( > } \ > } while (0) > > +/* Check if a (virt, mfn) tuple is aligned for a given slot level. */ > +#define IS_LnE_ALIGNED(v, m, n) \ > + IS_ALIGNED(PFN_DOWN(v) | mfn_x(m), (1UL << (PAGETABLE_ORDER * (n - 1))) - 1) n wants parenthesizing here, for Misra's sake. I'll take the liberty to do so while committing. Jan
© 2016 - 2025 Red Hat, Inc.