Introduce helper functions for safely querying the P2M (physical-to-machine)
mapping:
- add p2m_read_lock(), p2m_read_unlock(), and p2m_is_locked() for managing
P2M lock state.
- Implement p2m_get_entry() to retrieve mapping details for a given GFN,
including MFN, page order, and validity.
- Add p2m_lookup() to encapsulate read-locked MFN retrieval.
- Introduce p2m_get_page_from_gfn() to convert a GFN into a page_info
pointer, acquiring a reference to the page if valid.
- Introduce get_page().
Implementations are based on Arm's functions with some minor modifications:
- p2m_get_entry():
- Reverse traversal of page tables, as RISC-V uses the opposite level
numbering compared to Arm.
- Removed the return of p2m_access_t from p2m_get_entry() since
mem_access_settings is not introduced for RISC-V.
- Updated BUILD_BUG_ON() to check using the level 0 mask, which corresponds
to Arm's THIRD_MASK.
- Replaced open-coded bit shifts with the BIT() macro.
- Other minor changes, such as using RISC-V-specific functions to validate
P2M PTEs, and replacing Arm-specific GUEST_* macros with their RISC-V
equivalents.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in V3:
- Add is_p2m_foreign() macro and connected stuff.
- Change struct domain *d argument of p2m_get_page_from_gfn() to
struct p2m_domain.
- Update the comment above p2m_get_entry().
- s/_t/p2mt for local variable in p2m_get_entry().
- Drop local variable addr in p2m_get_entry() and use gfn_to_gaddr(gfn)
to define offsets array.
- Code style fixes.
- Update a check of rc code from p2m_next_level() in p2m_get_entry()
and drop "else" case.
- Do not call p2m_get_type() if p2m_get_entry()'s t argument is NULL.
- Use struct p2m_domain instead of struct domain for p2m_lookup() and
p2m_get_page_from_gfn().
- Move defintion of get_page() from "xen/riscv: implement mfn_valid() and page reference, ownership handling helpers"
---
Changes in V2:
- New patch.
---
xen/arch/riscv/include/asm/p2m.h | 18 ++++
xen/arch/riscv/mm.c | 13 +++
xen/arch/riscv/p2m.c | 136 +++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
index fbc73448a7..dc3a77cc15 100644
--- a/xen/arch/riscv/include/asm/p2m.h
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -202,6 +202,24 @@ static inline int p2m_is_write_locked(struct p2m_domain *p2m)
unsigned long construct_hgatp(struct p2m_domain *p2m, uint16_t vmid);
+static inline void p2m_read_lock(struct p2m_domain *p2m)
+{
+ read_lock(&p2m->lock);
+}
+
+static inline void p2m_read_unlock(struct p2m_domain *p2m)
+{
+ read_unlock(&p2m->lock);
+}
+
+static inline int p2m_is_locked(struct p2m_domain *p2m)
+{
+ return rw_is_locked(&p2m->lock);
+}
+
+struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
+ p2m_type_t *t);
+
#endif /* ASM__RISCV__P2M_H */
/*
diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c
index 3ad2b9cf93..5e09d46a75 100644
--- a/xen/arch/riscv/mm.c
+++ b/xen/arch/riscv/mm.c
@@ -677,3 +677,16 @@ struct domain *page_get_owner_and_reference(struct page_info *page)
return owner;
}
+
+bool get_page(struct page_info *page, const struct domain *domain)
+{
+ const struct domain *owner = page_get_owner_and_reference(page);
+
+ if ( likely(owner == domain) )
+ return true;
+
+ if ( owner != NULL )
+ put_page(page);
+
+ return false;
+}
diff --git a/xen/arch/riscv/p2m.c b/xen/arch/riscv/p2m.c
index e9e6818da2..24a09d4537 100644
--- a/xen/arch/riscv/p2m.c
+++ b/xen/arch/riscv/p2m.c
@@ -852,3 +852,139 @@ int map_regions_p2mt(struct domain *d,
{
return p2m_insert_mapping(p2m_get_hostp2m(d), gfn, nr, mfn, p2mt);
}
+
+/*
+ * Get the details of a given gfn.
+ *
+ * If the entry is present, the associated MFN will be returned type filled up.
+ * The page_order will correspond to the order of the mapping in the page
+ * table (i.e it could be a superpage).
+ *
+ * If the entry is not present, INVALID_MFN will be returned and the
+ * page_order will be set according to the order of the invalid range.
+ *
+ * valid will contain the value of bit[0] (e.g valid bit) of the
+ * entry.
+ */
+static mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
+ p2m_type_t *t,
+ unsigned int *page_order,
+ bool *valid)
+{
+ unsigned int level = 0;
+ pte_t entry, *table;
+ int rc;
+ mfn_t mfn = INVALID_MFN;
+ DECLARE_OFFSETS(offsets, gfn_to_gaddr(gfn));
+
+ ASSERT(p2m_is_locked(p2m));
+ BUILD_BUG_ON(XEN_PT_LEVEL_MAP_MASK(0) != PAGE_MASK);
+
+ if ( valid )
+ *valid = false;
+
+ /* XXX: Check if the mapping is lower than the mapped gfn */
+
+ /* This gfn is higher than the highest the p2m map currently holds */
+ if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
+ {
+ for ( level = P2M_ROOT_LEVEL; level; level-- )
+ if ( (gfn_x(gfn) & (XEN_PT_LEVEL_MASK(level) >> PAGE_SHIFT)) >
+ gfn_x(p2m->max_mapped_gfn) )
+ break;
+
+ goto out;
+ }
+
+ table = p2m_get_root_pointer(p2m, gfn);
+
+ /*
+ * the table should always be non-NULL because the gfn is below
+ * p2m->max_mapped_gfn and the root table pages are always present.
+ */
+ if ( !table )
+ {
+ ASSERT_UNREACHABLE();
+ level = P2M_ROOT_LEVEL;
+ goto out;
+ }
+
+ for ( level = P2M_ROOT_LEVEL; level; level-- )
+ {
+ rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
+ if ( (rc == P2M_TABLE_MAP_NONE) || (rc == P2M_TABLE_MAP_NOMEM) )
+ goto out_unmap;
+
+ if ( rc != P2M_TABLE_NORMAL )
+ break;
+ }
+
+ entry = table[offsets[level]];
+
+ if ( pte_is_valid(entry) )
+ {
+ if ( t )
+ *t = p2m_get_type(entry);
+
+ mfn = pte_get_mfn(entry);
+ /*
+ * The entry may point to a superpage. Find the MFN associated
+ * to the GFN.
+ */
+ mfn = mfn_add(mfn,
+ gfn_x(gfn) & (BIT(XEN_PT_LEVEL_ORDER(level), UL) - 1));
+
+ if ( valid )
+ *valid = pte_is_valid(entry);
+ }
+
+ out_unmap:
+ unmap_domain_page(table);
+
+ out:
+ if ( page_order )
+ *page_order = XEN_PT_LEVEL_ORDER(level);
+
+ return mfn;
+}
+
+static mfn_t p2m_lookup(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t)
+{
+ mfn_t mfn;
+
+ p2m_read_lock(p2m);
+ mfn = p2m_get_entry(p2m, gfn, t, NULL, NULL);
+ p2m_read_unlock(p2m);
+
+ return mfn;
+}
+
+struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
+ p2m_type_t *t)
+{
+ struct page_info *page;
+ p2m_type_t p2mt = p2m_invalid;
+ mfn_t mfn = p2m_lookup(p2m, gfn, t);
+
+ if ( !mfn_valid(mfn) )
+ return NULL;
+
+ if ( t )
+ p2mt = *t;
+
+ page = mfn_to_page(mfn);
+
+ /*
+ * get_page won't work on foreign mapping because the page doesn't
+ * belong to the current domain.
+ */
+ if ( p2m_is_foreign(p2mt) )
+ {
+ struct domain *fdom = page_get_owner_and_reference(page);
+ ASSERT(fdom != NULL);
+ ASSERT(fdom != p2m->domain);
+ return page;
+ }
+
+ return get_page(page, p2m->domain) ? page : NULL;
+}
--
2.50.1
On 31.07.2025 17:58, Oleksii Kurochko wrote:
> Introduce helper functions for safely querying the P2M (physical-to-machine)
> mapping:
> - add p2m_read_lock(), p2m_read_unlock(), and p2m_is_locked() for managing
> P2M lock state.
> - Implement p2m_get_entry() to retrieve mapping details for a given GFN,
> including MFN, page order, and validity.
> - Add p2m_lookup() to encapsulate read-locked MFN retrieval.
> - Introduce p2m_get_page_from_gfn() to convert a GFN into a page_info
> pointer, acquiring a reference to the page if valid.
> - Introduce get_page().
>
> Implementations are based on Arm's functions with some minor modifications:
> - p2m_get_entry():
> - Reverse traversal of page tables, as RISC-V uses the opposite level
> numbering compared to Arm.
> - Removed the return of p2m_access_t from p2m_get_entry() since
> mem_access_settings is not introduced for RISC-V.
> - Updated BUILD_BUG_ON() to check using the level 0 mask, which corresponds
> to Arm's THIRD_MASK.
> - Replaced open-coded bit shifts with the BIT() macro.
> - Other minor changes, such as using RISC-V-specific functions to validate
> P2M PTEs, and replacing Arm-specific GUEST_* macros with their RISC-V
> equivalents.
>
> Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
> ---
> Changes in V3:
> - Add is_p2m_foreign() macro and connected stuff.
What is this about?
> --- a/xen/arch/riscv/include/asm/p2m.h
> +++ b/xen/arch/riscv/include/asm/p2m.h
> @@ -202,6 +202,24 @@ static inline int p2m_is_write_locked(struct p2m_domain *p2m)
>
> unsigned long construct_hgatp(struct p2m_domain *p2m, uint16_t vmid);
>
> +static inline void p2m_read_lock(struct p2m_domain *p2m)
> +{
> + read_lock(&p2m->lock);
> +}
> +
> +static inline void p2m_read_unlock(struct p2m_domain *p2m)
> +{
> + read_unlock(&p2m->lock);
> +}
> +
> +static inline int p2m_is_locked(struct p2m_domain *p2m)
bool return type (also for p2m_is_write_locked() in patch 11)? Also perhaps
pointer-to-const parameter?
> --- a/xen/arch/riscv/p2m.c
> +++ b/xen/arch/riscv/p2m.c
> @@ -852,3 +852,139 @@ int map_regions_p2mt(struct domain *d,
> {
> return p2m_insert_mapping(p2m_get_hostp2m(d), gfn, nr, mfn, p2mt);
> }
> +
> +/*
> + * Get the details of a given gfn.
> + *
> + * If the entry is present, the associated MFN will be returned type filled up.
This sentence doesn't really parse, perhaps due to missing words.
> + * The page_order will correspond to the order of the mapping in the page
> + * table (i.e it could be a superpage).
> + *
> + * If the entry is not present, INVALID_MFN will be returned and the
> + * page_order will be set according to the order of the invalid range.
> + *
> + * valid will contain the value of bit[0] (e.g valid bit) of the
> + * entry.
> + */
> +static mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
> + p2m_type_t *t,
> + unsigned int *page_order,
> + bool *valid)
> +{
> + unsigned int level = 0;
> + pte_t entry, *table;
> + int rc;
> + mfn_t mfn = INVALID_MFN;
> + DECLARE_OFFSETS(offsets, gfn_to_gaddr(gfn));
> +
> + ASSERT(p2m_is_locked(p2m));
> + BUILD_BUG_ON(XEN_PT_LEVEL_MAP_MASK(0) != PAGE_MASK);
What function-wide property is this check about? Even when moved ...
> + if ( valid )
> + *valid = false;
> +
> + /* XXX: Check if the mapping is lower than the mapped gfn */
(Nested: What is this about?)
> + /* This gfn is higher than the highest the p2m map currently holds */
> + if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
> + {
> + for ( level = P2M_ROOT_LEVEL; level; level-- )
> + if ( (gfn_x(gfn) & (XEN_PT_LEVEL_MASK(level) >> PAGE_SHIFT)) >
... into the more narrow scope where another XEN_PT_LEVEL_MASK() exists I
can't really spot what the check is to guard against.
> + gfn_x(p2m->max_mapped_gfn) )
> + break;
> +
> + goto out;
> + }
> +
> + table = p2m_get_root_pointer(p2m, gfn);
> +
> + /*
> + * the table should always be non-NULL because the gfn is below
> + * p2m->max_mapped_gfn and the root table pages are always present.
> + */
Nit: Style.
> + if ( !table )
> + {
> + ASSERT_UNREACHABLE();
> + level = P2M_ROOT_LEVEL;
> + goto out;
> + }
> +
> + for ( level = P2M_ROOT_LEVEL; level; level-- )
> + {
> + rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
Why would you blindly allocate a page table (hierarchy) here? If anything,
this may need doing upon caller request (as it's only up the call chain
where the necessary knowledge exists). For example, ...
> +static mfn_t p2m_lookup(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t)
> +{
> + mfn_t mfn;
> +
> + p2m_read_lock(p2m);
> + mfn = p2m_get_entry(p2m, gfn, t, NULL, NULL);
... this (by its name) pretty likely won't want allocation, while ...
> + p2m_read_unlock(p2m);
> +
> + return mfn;
> +}
> +
> +struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
> + p2m_type_t *t)
> +{
... this will. Yet then ...
> + struct page_info *page;
> + p2m_type_t p2mt = p2m_invalid;
> + mfn_t mfn = p2m_lookup(p2m, gfn, t);
... you use the earlier one here.
> + if ( !mfn_valid(mfn) )
> + return NULL;
> +
> + if ( t )
> + p2mt = *t;
> +
> + page = mfn_to_page(mfn);
> +
> + /*
> + * get_page won't work on foreign mapping because the page doesn't
> + * belong to the current domain.
> + */
> + if ( p2m_is_foreign(p2mt) )
> + {
> + struct domain *fdom = page_get_owner_and_reference(page);
> + ASSERT(fdom != NULL);
> + ASSERT(fdom != p2m->domain);
> + return page;
In a release build (with no assertions) this will be wrong if either of the
two condition would not be satisfied. See x86'es respective code.
Jan
On 8/11/25 3:25 PM, Jan Beulich wrote:
>> + * The page_order will correspond to the order of the mapping in the page
>> + * table (i.e it could be a superpage).
>> + *
>> + * If the entry is not present, INVALID_MFN will be returned and the
>> + * page_order will be set according to the order of the invalid range.
>> + *
>> + * valid will contain the value of bit[0] (e.g valid bit) of the
>> + * entry.
>> + */
>> +static mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
>> + p2m_type_t *t,
>> + unsigned int *page_order,
>> + bool *valid)
>> +{
>> + unsigned int level = 0;
>> + pte_t entry, *table;
>> + int rc;
>> + mfn_t mfn = INVALID_MFN;
>> + DECLARE_OFFSETS(offsets, gfn_to_gaddr(gfn));
>> +
>> + ASSERT(p2m_is_locked(p2m));
>> + BUILD_BUG_ON(XEN_PT_LEVEL_MAP_MASK(0) != PAGE_MASK);
> What function-wide property is this check about? Even when moved ...
>
>> + if ( valid )
>> + *valid = false;
>> +
>> + /* XXX: Check if the mapping is lower than the mapped gfn */
> (Nested: What is this about?)
>
>> + /* This gfn is higher than the highest the p2m map currently holds */
>> + if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
>> + {
>> + for ( level = P2M_ROOT_LEVEL; level; level-- )
>> + if ( (gfn_x(gfn) & (XEN_PT_LEVEL_MASK(level) >> PAGE_SHIFT)) >
> ... into the more narrow scope where another XEN_PT_LEVEL_MASK() exists I
> can't really spot what the check is to guard against.
Missed to answer in my prev. reply to this and noticed that only during
start of reworking it.
I think it makes sense to update the comment above if condition, this is needed
to find the highest possible order by checking the base of the block mapping
is greater than the max mapped gfn as it is mentioned in the description of the
function, if the entry is not present, the function will return the order of
the invalid range.
I expect that probably it makes sense to do something similar for ->lowest_mapped_gfn
and it is a reason why /* XXX: ... */ comment exist.
~ Oleksii
>
>> + gfn_x(p2m->max_mapped_gfn) )
>> + break;
>> +
>> + goto out;
>> + }
On 8/11/25 3:25 PM, Jan Beulich wrote:
> On 31.07.2025 17:58, Oleksii Kurochko wrote:
>> Introduce helper functions for safely querying the P2M (physical-to-machine)
>> mapping:
>> - add p2m_read_lock(), p2m_read_unlock(), and p2m_is_locked() for managing
>> P2M lock state.
>> - Implement p2m_get_entry() to retrieve mapping details for a given GFN,
>> including MFN, page order, and validity.
>> - Add p2m_lookup() to encapsulate read-locked MFN retrieval.
>> - Introduce p2m_get_page_from_gfn() to convert a GFN into a page_info
>> pointer, acquiring a reference to the page if valid.
>> - Introduce get_page().
>>
>> Implementations are based on Arm's functions with some minor modifications:
>> - p2m_get_entry():
>> - Reverse traversal of page tables, as RISC-V uses the opposite level
>> numbering compared to Arm.
>> - Removed the return of p2m_access_t from p2m_get_entry() since
>> mem_access_settings is not introduced for RISC-V.
>> - Updated BUILD_BUG_ON() to check using the level 0 mask, which corresponds
>> to Arm's THIRD_MASK.
>> - Replaced open-coded bit shifts with the BIT() macro.
>> - Other minor changes, such as using RISC-V-specific functions to validate
>> P2M PTEs, and replacing Arm-specific GUEST_* macros with their RISC-V
>> equivalents.
>>
>> Signed-off-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
>> ---
>> Changes in V3:
>> - Add is_p2m_foreign() macro and connected stuff.
> What is this about?
Sorry for that, it is a stale change. I will drop it in the next patch version.
>> --- a/xen/arch/riscv/include/asm/p2m.h
>> +++ b/xen/arch/riscv/include/asm/p2m.h
>> @@ -202,6 +202,24 @@ static inline int p2m_is_write_locked(struct p2m_domain *p2m)
>>
>> unsigned long construct_hgatp(struct p2m_domain *p2m, uint16_t vmid);
>>
>> +static inline void p2m_read_lock(struct p2m_domain *p2m)
>> +{
>> + read_lock(&p2m->lock);
>> +}
>> +
>> +static inline void p2m_read_unlock(struct p2m_domain *p2m)
>> +{
>> + read_unlock(&p2m->lock);
>> +}
>> +
>> +static inline int p2m_is_locked(struct p2m_domain *p2m)
> bool return type (also for p2m_is_write_locked() in patch 11)? Also perhaps
> pointer-to-const parameter?
I haven't checked what is a argument type of rw_is_locked() inside, so, automatically
use just pointer parameter, but now I see that it could be really const.
>> --- a/xen/arch/riscv/p2m.c
>> +++ b/xen/arch/riscv/p2m.c
>> @@ -852,3 +852,139 @@ int map_regions_p2mt(struct domain *d,
>> {
>> return p2m_insert_mapping(p2m_get_hostp2m(d), gfn, nr, mfn, p2mt);
>> }
>> +
>> +/*
>> + * Get the details of a given gfn.
>> + *
>> + * If the entry is present, the associated MFN will be returned type filled up.
> This sentence doesn't really parse, perhaps due to missing words.
IDK what happened but it should be:
... the associated MFN will returned and type filled up ...
Perhpaps, it would be better just:
... the associated MFN will returned and the p2m type of the mapping.
(or just entry's type)
>> + * The page_order will correspond to the order of the mapping in the page
>> + * table (i.e it could be a superpage).
>> + *
>> + * If the entry is not present, INVALID_MFN will be returned and the
>> + * page_order will be set according to the order of the invalid range.
>> + *
>> + * valid will contain the value of bit[0] (e.g valid bit) of the
>> + * entry.
>> + */
>> +static mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
>> + p2m_type_t *t,
>> + unsigned int *page_order,
>> + bool *valid)
>> +{
>> + unsigned int level = 0;
>> + pte_t entry, *table;
>> + int rc;
>> + mfn_t mfn = INVALID_MFN;
>> + DECLARE_OFFSETS(offsets, gfn_to_gaddr(gfn));
>> +
>> + ASSERT(p2m_is_locked(p2m));
>> + BUILD_BUG_ON(XEN_PT_LEVEL_MAP_MASK(0) != PAGE_MASK);
> What function-wide property is this check about? Even when moved ...
I think this check isn't needed anymore.
This check is/was needed to be sure that 4k page(s) are used on L3 (in Arm terms)
mapping as Arm can support 4k, 16k and 64k.
Initially this check derived from:
https://lore.kernel.org/xen-devel/1402394278-9850-4-git-send-email-ian.campbell@citrix.com/
And it was needed because of the way how maddr is calculated, calculation for which
could be wrong if page size isn't 4k.
But then this check was migrated to p2m_get_entry():
https://lore.kernel.org/xen-devel/1469717505-8026-13-git-send-email-julien.grall@arm.com/
But the way how maddr is got isn't depends on mask and PAGE_MASK, and I don't see any other
reason to why BUILD_BUG_ON() is needed now.
>
>> + if ( valid )
>> + *valid = false;
>> +
>> + /* XXX: Check if the mapping is lower than the mapped gfn */
> (Nested: What is this about?)
>
>> + /* This gfn is higher than the highest the p2m map currently holds */
>> + if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
>> + {
>> + for ( level = P2M_ROOT_LEVEL; level; level-- )
>> + if ( (gfn_x(gfn) & (XEN_PT_LEVEL_MASK(level) >> PAGE_SHIFT)) >
> ... into the more narrow scope where another XEN_PT_LEVEL_MASK() exists I
> can't really spot what the check is to guard against.
>
>> + gfn_x(p2m->max_mapped_gfn) )
>> + break;
>> +
>> + goto out;
>> + }
>> +
>> + table = p2m_get_root_pointer(p2m, gfn);
>> +
>> + /*
>> + * the table should always be non-NULL because the gfn is below
>> + * p2m->max_mapped_gfn and the root table pages are always present.
>> + */
> Nit: Style.
>
>> + if ( !table )
>> + {
>> + ASSERT_UNREACHABLE();
>> + level = P2M_ROOT_LEVEL;
>> + goto out;
>> + }
>> +
>> + for ( level = P2M_ROOT_LEVEL; level; level-- )
>> + {
>> + rc = p2m_next_level(p2m, true, level, &table, offsets[level]);
> Why would you blindly allocate a page table (hierarchy) here? If anything,
> this may need doing upon caller request (as it's only up the call chain
> where the necessary knowledge exists).
I wanted to set it to always|false|, as based on the name|p2m_get_entry()|,
it is expected that the page tables are already allocated.
> For example, ...
>
>> +static mfn_t p2m_lookup(struct p2m_domain *p2m, gfn_t gfn, p2m_type_t *t)
>> +{
>> + mfn_t mfn;
>> +
>> + p2m_read_lock(p2m);
>> + mfn = p2m_get_entry(p2m, gfn, t, NULL, NULL);
> ... this (by its name) pretty likely won't want allocation, while ...
>
>> + p2m_read_unlock(p2m);
>> +
>> + return mfn;
>> +}
>> +
>> +struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
>> + p2m_type_t *t)
>> +{
> ... this will. Yet then ...
I didn't really get why p2m_get_page_from_gfn() is expected to allocate
page table. My understanding is that GFN will point to a page only if
a mapping was done before for the GFN.
>
>> + struct page_info *page;
>> + p2m_type_t p2mt = p2m_invalid;
>> + mfn_t mfn = p2m_lookup(p2m, gfn, t);
> ... you use the earlier one here.
We don't need|page_order| and/or the valid bit in|p2m_get_page_from_gfn()|.
>
>> + if ( !mfn_valid(mfn) )
>> + return NULL;
>> +
>> + if ( t )
>> + p2mt = *t;
>> +
>> + page = mfn_to_page(mfn);
>> +
>> + /*
>> + * get_page won't work on foreign mapping because the page doesn't
>> + * belong to the current domain.
>> + */
>> + if ( p2m_is_foreign(p2mt) )
>> + {
>> + struct domain *fdom = page_get_owner_and_reference(page);
>> + ASSERT(fdom != NULL);
>> + ASSERT(fdom != p2m->domain);
>> + return page;
> In a release build (with no assertions) this will be wrong if either of the
> two condition would not be satisfied. See x86'es respective code.
I will add the following then instead:
if ( unlikely(p2m_is_foreign(t)) )
{
const struct domain *fdom = page_get_owner_and_reference(page);
if ( fdom )
{
if ( likely(fdom != d) )
return page;
ASSERT_UNREACHABLE();
put_page(page);
}
return NULL;
}
I'm not sure that unlikely() is needed, x86 has it.
It seems then Arm needs such a change too.
Thanks.
~ Oleksii
© 2016 - 2025 Red Hat, Inc.