From: Julien Grall <julien.grall@arm.com>
No functional change intended.
Only reasonable clean-ups are done in this patch. The rest will use _gfn
for the time being.
Signed-off-by: Julien Grall <julien.grall@arm.com>
---
get_page_from_gfn() is currently using an unsafe pattern as an MFN
should be validated via mfn_valid() before using mfn_to_page().
At Jan's request, this was dropped for this patch as this was unrelated.
If we want to fix it properly, then it should be done in a separate
patch along with them modifications of all the other callers using this
bad behavior..
This was originally sent as part of "More typesafe conversion of common
interface." [1].
Changes since the original patch:
- Use cr3_to_gfn()
- Remove the re-ordering of mfn_valid() and mfn_to_page() (see
above).
[1] <20190819142651.11058-1-julien.grall@arm.com>
---
xen/arch/arm/guestcopy.c | 2 +-
xen/arch/arm/mm.c | 2 +-
xen/arch/x86/cpu/vpmu.c | 2 +-
xen/arch/x86/domctl.c | 6 +++---
xen/arch/x86/hvm/dm.c | 2 +-
xen/arch/x86/hvm/domain.c | 6 ++++--
xen/arch/x86/hvm/hvm.c | 9 +++++----
xen/arch/x86/hvm/svm/svm.c | 8 ++++----
xen/arch/x86/hvm/viridian/viridian.c | 16 ++++++++--------
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/hvm/vmx/vvmx.c | 12 ++++++------
xen/arch/x86/mm.c | 24 ++++++++++++++----------
xen/arch/x86/mm/p2m.c | 2 +-
xen/arch/x86/mm/shadow/hvm.c | 6 +++---
xen/arch/x86/physdev.c | 3 ++-
xen/arch/x86/pv/descriptor-tables.c | 4 ++--
xen/arch/x86/pv/emul-priv-op.c | 6 +++---
xen/arch/x86/pv/mm.c | 2 +-
xen/arch/x86/traps.c | 11 ++++++-----
xen/common/domain.c | 2 +-
xen/common/event_fifo.c | 12 ++++++------
xen/common/memory.c | 4 ++--
xen/include/asm-arm/p2m.h | 6 +++---
xen/include/asm-x86/p2m.h | 12 ++++++++----
24 files changed, 88 insertions(+), 75 deletions(-)
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7a0f3e9d5f..55892062bb 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, uint64_t addr,
return get_page_from_gva(info.gva.v, addr,
write ? GV2M_WRITE : GV2M_READ);
- page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC);
if ( !page )
return NULL;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 1075e5fcaf..d0ad06add4 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1446,7 +1446,7 @@ int xenmem_add_to_physmap_one(
/* Take reference to the foreign domain page.
* Reference will be released in XENMEM_remove_from_physmap */
- page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC);
if ( !page )
{
put_pg_owner(od);
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index e50d478d23..9777efa4fb 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -617,7 +617,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
struct vcpu *v;
struct vpmu_struct *vpmu;
struct page_info *page;
- uint64_t gfn = params->val;
+ gfn_t gfn = _gfn(params->val);
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
return -EINVAL;
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 02596c3810..8f5010fd58 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -391,7 +391,7 @@ long arch_do_domctl(
break;
}
- page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC);
if ( unlikely(!page) ||
unlikely(is_xen_heap_page(page)) )
@@ -461,11 +461,11 @@ long arch_do_domctl(
case XEN_DOMCTL_hypercall_init:
{
- unsigned long gmfn = domctl->u.hypercall_init.gmfn;
+ gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn);
struct page_info *page;
void *hypercall_page;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 96c5042b75..a09622007c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -188,7 +188,7 @@ static int modified_memory(struct domain *d,
{
struct page_info *page;
- page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE);
if ( page )
{
paging_mark_pfn_dirty(d, _pfn(pfn));
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 5d5a746a25..3c29ff86be 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
- struct page_info *page = get_page_from_gfn(v->domain,
- v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
+ struct page_info *page;
+
+ page = get_page_from_gfn(v->domain,
+ gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
NULL, P2M_ALLOC);
if ( !page )
{
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index a3d115b650..9f720e7aa1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2216,7 +2216,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
+ unsigned long old_value = v->arch.hvm.guest_cr[0];
struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
@@ -2271,7 +2271,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
if ( !paging_mode_hap(d) )
{
/* The guest CR3 must be pointing to the guest physical. */
- gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
+
page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
{
@@ -2363,7 +2364,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, cr3_to_gfn(value),
NULL, P2M_ALLOC);
if ( !page )
goto bad_cr3;
@@ -3191,7 +3192,7 @@ enum hvm_translation_result hvm_translate_get_page(
&& hvm_mmio_internal(gfn_to_gaddr(gfn)) )
return HVMTRANS_bad_gfn_to_mfn;
- page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
if ( !page )
return HVMTRANS_bad_gfn_to_mfn;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 32d8d847f2..a9abd6d3f1 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -299,7 +299,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
if ( c->cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, cr3_to_gfn(c->cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -2230,9 +2230,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
return NULL;
/* Need to translate L1-GPA to MPA */
- page = get_page_from_gfn(v->domain,
- nv->nv_vvmcxaddr >> PAGE_SHIFT,
- &p2mt, P2M_ALLOC | P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain,
+ gaddr_to_gfn(nv->nv_vvmcxaddr),
+ &p2mt, P2M_ALLOC | P2M_UNSHARE);
if ( !page )
return NULL;
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 977c1bc54f..3d75a0f133 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -242,16 +242,16 @@ static void dump_hypercall(const struct domain *d)
static void enable_hypercall_page(struct domain *d)
{
- unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.pfn;
- struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ gfn_t gfn = _gfn(d->arch.hvm.viridian->hypercall_gpa.pfn);
+ struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
uint8_t *p;
if ( !page || !get_page_type(page, PGT_writable_page) )
{
if ( page )
put_page(page);
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
@@ -719,13 +719,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name,
void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
{
- unsigned long gmfn = vp->msr.pfn;
+ gfn_t gfn = _gfn(vp->msr.pfn);
struct page_info *page;
if ( vp->ptr )
return;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
if ( !page )
goto fail;
@@ -746,8 +746,8 @@ void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
return;
fail:
- gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
+ gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
}
void viridian_unmap_guest_page(struct viridian_page *vp)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index a1e3a19c0a..f1898c63c5 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -681,7 +681,7 @@ static int vmx_restore_cr0_cr3(
{
if ( cr0 & X86_CR0_PG )
{
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3),
NULL, P2M_ALLOC);
if ( !page )
{
@@ -1321,7 +1321,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
goto crash;
- page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+ page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE);
if ( !page )
{
/* Ideally you don't want to crash but rather go into a wait
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 84b47ef277..eee4af3206 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -718,11 +718,11 @@ static void nvmx_update_apic_access_address(struct vcpu *v)
if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
{
p2m_type_t p2mt;
- unsigned long apic_gpfn;
+ gfn_t apic_gfn;
struct page_info *apic_pg;
- apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
- apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+ apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR));
+ apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC);
ASSERT(apic_pg && !p2m_is_paging(p2mt));
__vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
put_page(apic_pg);
@@ -739,11 +739,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v)
if ( ctrl & CPU_BASED_TPR_SHADOW )
{
p2m_type_t p2mt;
- unsigned long vapic_gpfn;
+ gfn_t vapic_gfn;
struct page_info *vapic_pg;
- vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
- vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+ vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR));
+ vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC);
ASSERT(vapic_pg && !p2m_is_paging(p2mt));
__vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
put_page(vapic_pg);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2feb7a5993..b9a656643b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2150,7 +2150,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e,
p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ?
P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC;
- page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q);
+ page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, q);
if ( p2m_is_paged(p2mt) )
{
@@ -3433,7 +3433,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
rc = -EINVAL;
@@ -3499,7 +3500,8 @@ long do_mmuext_op(
if ( paging_mode_refcounts(pg_owner) )
break;
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL,
+ P2M_ALLOC);
if ( unlikely(!page) )
{
gdprintk(XENLOG_WARNING,
@@ -3724,7 +3726,8 @@ long do_mmuext_op(
}
case MMUEXT_CLEAR_PAGE:
- page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
+ P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && page )
{
put_page(page);
@@ -3752,7 +3755,7 @@ long do_mmuext_op(
{
struct page_info *src_page, *dst_page;
- src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt,
+ src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && src_page )
{
@@ -3768,7 +3771,7 @@ long do_mmuext_op(
break;
}
- dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt,
+ dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt,
P2M_ALLOC);
if ( unlikely(p2mt != p2m_ram_rw) && dst_page )
{
@@ -3856,7 +3859,8 @@ long do_mmu_update(
{
struct mmu_update req;
void *va = NULL;
- unsigned long gpfn, gmfn;
+ unsigned long gpfn;
+ gfn_t gfn;
struct page_info *page;
unsigned int cmd, i = 0, done = 0, pt_dom;
struct vcpu *curr = current, *v = curr;
@@ -3969,8 +3973,8 @@ long do_mmu_update(
rc = -EINVAL;
req.ptr -= cmd;
- gmfn = req.ptr >> PAGE_SHIFT;
- page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC);
+ gfn = gaddr_to_gfn(req.ptr);
+ page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC);
if ( unlikely(!page) || p2mt != p2m_ram_rw )
{
@@ -3978,7 +3982,7 @@ long do_mmu_update(
put_page(page);
if ( p2m_is_paged(p2mt) )
{
- p2m_mem_paging_populate(pt_owner, gmfn);
+ p2m_mem_paging_populate(pt_owner, gfn_x(gfn));
rc = -ENOENT;
}
else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 587c062481..1ce012600c 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2967,7 +2967,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
* Take a refcnt on the mfn. NB: following supported for foreign mapping:
* ram_rw | ram_logdirty | ram_ro | paging_out.
*/
- page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC);
+ page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC);
if ( !page ||
!p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) )
{
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index 1e6024c71f..bb11f28531 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -398,15 +398,15 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
struct sh_emulate_ctxt *sh_ctxt)
{
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
mfn_t mfn;
p2m_type_t p2mt;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
/* Translate the VA to a GFN. */
- gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
- if ( gfn == gfn_x(INVALID_GFN) )
+ gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec));
+ if ( gfn_eq(gfn, INVALID_GFN) )
{
x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt);
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 3a3c15890b..4f3f438614 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
ret = -EINVAL;
- page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(current->domain, _gfn(info.gmfn),
+ NULL, P2M_ALLOC);
if ( !page )
break;
if ( !get_page_type(page, PGT_writable_page) )
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index f22beb1f3c..899ed45c6a 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries)
{
struct page_info *page;
- page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC);
if ( !page )
goto fail;
if ( !get_page_type(page, PGT_seg_desc_page) )
@@ -219,7 +219,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d)
if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) )
return -EINVAL;
- page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index e24b84f46a..552b669623 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -756,12 +756,12 @@ static int write_cr(unsigned int reg, unsigned long val,
case 3: /* Write CR3 */
{
struct domain *currd = curr->domain;
- unsigned long gfn;
+ gfn_t gfn;
struct page_info *page;
int rc;
- gfn = !is_pv_32bit_domain(currd)
- ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val);
+ gfn = _gfn(!is_pv_32bit_domain(currd)
+ ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val));
page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC);
if ( !page )
break;
diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c
index 2b0dadc8da..00df5edd6f 100644
--- a/xen/arch/x86/pv/mm.c
+++ b/xen/arch/x86/pv/mm.c
@@ -110,7 +110,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset)
if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
return false;
- page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+ page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC);
if ( unlikely(!page) )
return false;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4f524dc71e..e5de86845f 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -826,7 +826,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
case 0: /* Write hypercall page */
{
void *hypercall_page;
- unsigned long gmfn = val >> PAGE_SHIFT;
+ gfn_t gfn = gaddr_to_gfn(val);
unsigned int page_index = val & (PAGE_SIZE - 1);
struct page_info *page;
p2m_type_t t;
@@ -839,7 +839,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
return X86EMUL_EXCEPTION;
}
- page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC);
+ page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
if ( !page || !get_page_type(page, PGT_writable_page) )
{
@@ -848,13 +848,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val)
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, gmfn);
+ p2m_mem_paging_populate(d, gfn_x(gfn));
return X86EMUL_RETRY;
}
gdprintk(XENLOG_WARNING,
- "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
- gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
+ "Bad GFN %"PRI_gfn" (MFN %"PRI_mfn") to MSR %08x\n",
+ gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN),
+ base);
return X86EMUL_EXCEPTION;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index b4eb476a9c..8435528383 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1237,7 +1237,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
if ( (v != current) && !(v->pause_flags & VPF_down) )
return -EINVAL;
- page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC);
if ( !page )
return -EINVAL;
diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c
index 230f440f14..073981ab43 100644
--- a/xen/common/event_fifo.c
+++ b/xen/common/event_fifo.c
@@ -361,7 +361,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo =
.print_state = evtchn_fifo_print_state,
};
-static int map_guest_page(struct domain *d, uint64_t gfn, void **virt)
+static int map_guest_page(struct domain *d, gfn_t gfn, void **virt)
{
struct page_info *p;
@@ -422,7 +422,7 @@ static int setup_control_block(struct vcpu *v)
return 0;
}
-static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset)
{
void *virt;
unsigned int i;
@@ -508,7 +508,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
{
struct domain *d = current->domain;
uint32_t vcpu_id;
- uint64_t gfn;
+ gfn_t gfn;
uint32_t offset;
struct vcpu *v;
int rc;
@@ -516,7 +516,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
vcpu_id = init_control->vcpu;
- gfn = init_control->control_gfn;
+ gfn = _gfn(init_control->control_gfn);
offset = init_control->offset;
if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
@@ -578,7 +578,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
return rc;
}
-static int add_page_to_event_array(struct domain *d, unsigned long gfn)
+static int add_page_to_event_array(struct domain *d, gfn_t gfn)
{
void *virt;
unsigned int slot;
@@ -628,7 +628,7 @@ int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array)
return -EOPNOTSUPP;
spin_lock(&d->event_lock);
- rc = add_page_to_event_array(d, expand_array->array_gfn);
+ rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn));
spin_unlock(&d->event_lock);
return rc;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 6e4b85674d..7e3c3bb7af 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1388,7 +1388,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
- page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
+ page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC);
if ( page )
{
rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
@@ -1659,7 +1659,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly,
p2m_type_t p2mt;
struct page_info *page;
- page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q);
+ page = get_page_from_gfn(d, gfn, &p2mt, q);
#ifdef CONFIG_HAS_MEM_PAGING
if ( p2m_is_paging(p2mt) )
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 5fdb6e8183..f1d01ceb3f 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -304,7 +304,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn,
p2m_type_t *t);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
mfn_t mfn;
p2m_type_t _t;
@@ -315,7 +315,7 @@ static inline struct page_info *get_page_from_gfn(
* not auto-translated.
*/
if ( likely(d != dom_xen) )
- return p2m_get_page_from_gfn(d, _gfn(gfn), t);
+ return p2m_get_page_from_gfn(d, gfn, t);
if ( !t )
t = &_t;
@@ -326,7 +326,7 @@ static inline struct page_info *get_page_from_gfn(
* DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the
* page.
*/
- mfn = _mfn(gfn);
+ mfn = _mfn(gfn_x(gfn));
page = mfn_to_page(mfn);
if ( !mfn_valid(mfn) || !get_page(page, d) )
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 39dae242b0..da842487bb 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -487,18 +487,22 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
p2m_query_t q);
static inline struct page_info *get_page_from_gfn(
- struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+ struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q)
{
struct page_info *page;
+ mfn_t mfn;
if ( paging_mode_translate(d) )
- return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q);
+ return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q);
/* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
if ( t )
*t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
- page = mfn_to_page(_mfn(gfn));
- return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
+
+ mfn = _mfn(gfn_x(gfn));
+
+ page = mfn_to_page(mfn);
+ return mfn_valid(mfn) && get_page(page, d) ? page : NULL;
}
/* General conversion function from mfn to gfn */
--
2.17.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
> -----Original Message-----
> From: julien@xen.org <julien@xen.org>
> Sent: 22 March 2020 16:14
> To: xen-devel@lists.xenproject.org
> Cc: julien@xen.org; Julien Grall <julien.grall@arm.com>; Stefano Stabellini <sstabellini@kernel.org>;
> Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>; Andrew Cooper <andrew.cooper3@citrix.com>; George
> Dunlap <george.dunlap@citrix.com>; Ian Jackson <ian.jackson@eu.citrix.com>; Jan Beulich
> <jbeulich@suse.com>; Wei Liu <wl@xen.org>; Roger Pau Monné <roger.pau@citrix.com>; Paul Durrant
> <paul@xen.org>; Jun Nakajima <jun.nakajima@intel.com>; Kevin Tian <kevin.tian@intel.com>; Tim Deegan
> <tim@xen.org>
> Subject: [PATCH 17/17] xen: Switch parameter in get_page_from_gfn to use typesafe gfn
>
> From: Julien Grall <julien.grall@arm.com>
>
> No functional change intended.
>
> Only reasonable clean-ups are done in this patch. The rest will use _gfn
> for the time being.
>
> Signed-off-by: Julien Grall <julien.grall@arm.com>
Definitely an improvement so...
Reviewed-by: Paul Durrant <paul@xen.org>
But a couple of things I noticed...
[snip]
> diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
> index 5d5a746a25..3c29ff86be 100644
> --- a/xen/arch/x86/hvm/domain.c
> +++ b/xen/arch/x86/hvm/domain.c
> @@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
> if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
> {
> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
> - struct page_info *page = get_page_from_gfn(v->domain,
> - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
> + struct page_info *page;
> +
> + page = get_page_from_gfn(v->domain,
> + gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
Should this be cr3_to_gfn?
> NULL, P2M_ALLOC);
> if ( !page )
> {
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index a3d115b650..9f720e7aa1 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2216,7 +2216,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
> {
> struct vcpu *v = current;
> struct domain *d = v->domain;
> - unsigned long gfn, old_value = v->arch.hvm.guest_cr[0];
> + unsigned long old_value = v->arch.hvm.guest_cr[0];
> struct page_info *page;
>
> HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
> @@ -2271,7 +2271,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
> if ( !paging_mode_hap(d) )
> {
> /* The guest CR3 must be pointing to the guest physical. */
> - gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
> + gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]);
> +
Same here.
> page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
> if ( !page )
> {
> @@ -2363,7 +2364,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
> {
> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
> HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
> - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
> + page = get_page_from_gfn(v->domain, cr3_to_gfn(value),
> NULL, P2M_ALLOC);
> if ( !page )
> goto bad_cr3;
> @@ -3191,7 +3192,7 @@ enum hvm_translation_result hvm_translate_get_page(
> && hvm_mmio_internal(gfn_to_gaddr(gfn)) )
> return HVMTRANS_bad_gfn_to_mfn;
>
> - page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
> + page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
>
> if ( !page )
> return HVMTRANS_bad_gfn_to_mfn;
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 32d8d847f2..a9abd6d3f1 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -299,7 +299,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
> {
> if ( c->cr0 & X86_CR0_PG )
> {
> - page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
> + page = get_page_from_gfn(v->domain, cr3_to_gfn(c->cr3),
> NULL, P2M_ALLOC);
> if ( !page )
> {
> @@ -2230,9 +2230,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr)
> return NULL;
>
> /* Need to translate L1-GPA to MPA */
> - page = get_page_from_gfn(v->domain,
> - nv->nv_vvmcxaddr >> PAGE_SHIFT,
> - &p2mt, P2M_ALLOC | P2M_UNSHARE);
> + page = get_page_from_gfn(v->domain,
> + gaddr_to_gfn(nv->nv_vvmcxaddr),
> + &p2mt, P2M_ALLOC | P2M_UNSHARE);
> if ( !page )
> return NULL;
>
> diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
> index 977c1bc54f..3d75a0f133 100644
> --- a/xen/arch/x86/hvm/viridian/viridian.c
> +++ b/xen/arch/x86/hvm/viridian/viridian.c
> @@ -242,16 +242,16 @@ static void dump_hypercall(const struct domain *d)
>
> static void enable_hypercall_page(struct domain *d)
> {
> - unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.pfn;
> - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
> + gfn_t gfn = _gfn(d->arch.hvm.viridian->hypercall_gpa.pfn);
> + struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
> uint8_t *p;
>
> if ( !page || !get_page_type(page, PGT_writable_page) )
> {
> if ( page )
> put_page(page);
> - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
> + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
> return;
> }
>
> @@ -719,13 +719,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name,
>
> void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
> {
> - unsigned long gmfn = vp->msr.pfn;
> + gfn_t gfn = _gfn(vp->msr.pfn);
> struct page_info *page;
>
> if ( vp->ptr )
> return;
>
> - page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
> + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
> if ( !page )
> goto fail;
>
> @@ -746,8 +746,8 @@ void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
> return;
>
> fail:
> - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
> + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
> + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
> }
>
> void viridian_unmap_guest_page(struct viridian_page *vp)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index a1e3a19c0a..f1898c63c5 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -681,7 +681,7 @@ static int vmx_restore_cr0_cr3(
> {
> if ( cr0 & X86_CR0_PG )
> {
> - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
> + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3),
And here.
> NULL, P2M_ALLOC);
> if ( !page )
> {
> @@ -1321,7 +1321,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
> if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
> goto crash;
>
> - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
> + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE);
And here.
Paul
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
Hi Paul,
On 23/03/2020 08:37, Paul Durrant wrote:
>> -----Original Message-----
>> From: julien@xen.org <julien@xen.org>
>> Sent: 22 March 2020 16:14
>> To: xen-devel@lists.xenproject.org
>> Cc: julien@xen.org; Julien Grall <julien.grall@arm.com>; Stefano Stabellini <sstabellini@kernel.org>;
>> Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>; Andrew Cooper <andrew.cooper3@citrix.com>; George
>> Dunlap <george.dunlap@citrix.com>; Ian Jackson <ian.jackson@eu.citrix.com>; Jan Beulich
>> <jbeulich@suse.com>; Wei Liu <wl@xen.org>; Roger Pau Monné <roger.pau@citrix.com>; Paul Durrant
>> <paul@xen.org>; Jun Nakajima <jun.nakajima@intel.com>; Kevin Tian <kevin.tian@intel.com>; Tim Deegan
>> <tim@xen.org>
>> Subject: [PATCH 17/17] xen: Switch parameter in get_page_from_gfn to use typesafe gfn
>>
>> From: Julien Grall <julien.grall@arm.com>
>>
>> No functional change intended.
>>
>> Only reasonable clean-ups are done in this patch. The rest will use _gfn
>> for the time being.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>
> Definitely an improvement so...
>
> Reviewed-by: Paul Durrant <paul@xen.org>
>
> But a couple of things I noticed...
>
> [snip]
>> diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
>> index 5d5a746a25..3c29ff86be 100644
>> --- a/xen/arch/x86/hvm/domain.c
>> +++ b/xen/arch/x86/hvm/domain.c
>> @@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
>> if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
>> {
>> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
>> - struct page_info *page = get_page_from_gfn(v->domain,
>> - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
>> + struct page_info *page;
>> +
>> + page = get_page_from_gfn(v->domain,
>> + gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
>
> Should this be cr3_to_gfn?
Definitely yes. I thought I spotted all the use when introducing the new
helper but it looks like not. I will update the patch in the new version
to use cr3_to_gfn() everywhere you suggested.
Thanks.
Cheers,
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 22.03.2020 17:14, julien@xen.org wrote:
> --- a/xen/arch/x86/hvm/domain.c
> +++ b/xen/arch/x86/hvm/domain.c
> @@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
> if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
> {
> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
> - struct page_info *page = get_page_from_gfn(v->domain,
> - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
> + struct page_info *page;
> +
> + page = get_page_from_gfn(v->domain,
> + gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
My earlier comment on this remains - I thing this conversion makes
the problem this expression has more hidden than with the shift.
This would better use a gfn_from_cr3() helper (or whatever it'll
be that it gets named). Same elsewhere in this patch then.
> @@ -2363,7 +2364,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
> {
> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
> HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
> - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
> + page = get_page_from_gfn(v->domain, cr3_to_gfn(value),
Oh, seeing this I recall Paul did point out the above already.
> @@ -508,7 +508,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
> {
> struct domain *d = current->domain;
> uint32_t vcpu_id;
> - uint64_t gfn;
> + gfn_t gfn;
> uint32_t offset;
> struct vcpu *v;
> int rc;
> @@ -516,7 +516,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
> init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
>
> vcpu_id = init_control->vcpu;
> - gfn = init_control->control_gfn;
> + gfn = _gfn(init_control->control_gfn);
There's silent truncation here now for Arm32, afaict. Are we really
okay with this?
Jan
Hi Jan,
On 27/03/2020 13:50, Jan Beulich wrote:
> On 22.03.2020 17:14, julien@xen.org wrote:
>> --- a/xen/arch/x86/hvm/domain.c
>> +++ b/xen/arch/x86/hvm/domain.c
>> @@ -296,8 +296,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
>> if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
>> {
>> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
>> - struct page_info *page = get_page_from_gfn(v->domain,
>> - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT,
>> + struct page_info *page;
>> +
>> + page = get_page_from_gfn(v->domain,
>> + gaddr_to_gfn(v->arch.hvm.guest_cr[3]),
>
> My earlier comment on this remains - I thing this conversion makes
> the problem this expression has more hidden than with the shift.
> This would better use a gfn_from_cr3() helper (or whatever it'll
> be that it gets named). Same elsewhere in this patch then.
I will have a closer look the *cr3 helpers and reply with some suggestions.
>
>> @@ -2363,7 +2364,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer)
>> {
>> /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
>> HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
>> - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
>> + page = get_page_from_gfn(v->domain, cr3_to_gfn(value),
>
> Oh, seeing this I recall Paul did point out the above already.
>
>> @@ -508,7 +508,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
>> {
>> struct domain *d = current->domain;
>> uint32_t vcpu_id;
>> - uint64_t gfn;
>> + gfn_t gfn;
>> uint32_t offset;
>> struct vcpu *v;
>> int rc;
>> @@ -516,7 +516,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control)
>> init_control->link_bits = EVTCHN_FIFO_LINK_BITS;
>>
>> vcpu_id = init_control->vcpu;
>> - gfn = init_control->control_gfn;
>> + gfn = _gfn(init_control->control_gfn);
>
> There's silent truncation here now for Arm32, afaict. Are we really
> okay with this?
Well, the truncation was already silently happening as we call
get_page_from_gfn() in map_guest_page(). So it is not worse than the
current situation.
Although, there are a slight advantage with the new code as you can more
easily spot potential truncation. Indeed, you could add some type check
in _gfn().
Cheers,
--
Julien Grall
© 2016 - 2026 Red Hat, Inc.