The primary change here is patch 2, with the others being cleanup noticed to be worthwhile along the road. 1: don't ignore p2m_remove_page()'s return value 2: don't assert that the passed in MFN matches for a remove 3: make p2m_remove_page()'s parameters type-safe 4: drop pointless nested variable from guest_physmap_add_entry() 5: use available local variable in guest_physmap_add_entry() Jan
It's not very nice to return from guest_physmap_add_entry() after
perhaps already having made some changes to the P2M, but this is pre-
existing practice in the function, and imo better than ignoring errors.
Take the liberty and replace an mfn_add() instance with a local variable
already holding the result (as proven by the check immediately ahead).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -767,8 +767,7 @@ void p2m_final_teardown(struct domain *d
p2m_teardown_hostp2m(d);
}
-
-static int
+static int __must_check
p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
unsigned int page_order)
{
@@ -973,9 +972,9 @@ guest_physmap_add_entry(struct domain *d
ASSERT(mfn_valid(omfn));
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
gfn_x(ogfn) , mfn_x(omfn));
- if ( mfn_eq(omfn, mfn_add(mfn, i)) )
- p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(mfn_add(mfn, i)),
- 0);
+ if ( mfn_eq(omfn, mfn_add(mfn, i)) &&
+ (rc = p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(omfn), 0)) )
+ goto out;
}
}
}
@@ -997,6 +996,7 @@ guest_physmap_add_entry(struct domain *d
}
}
+out:
p2m_unlock(p2m);
return rc;
@@ -2705,9 +2705,9 @@ int p2m_change_altp2m_gfn(struct domain
if ( gfn_eq(new_gfn, INVALID_GFN) )
{
mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL);
- if ( mfn_valid(mfn) )
- p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
- rc = 0;
+ rc = mfn_valid(mfn)
+ ? p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K)
+ : 0;
goto out;
}
On 01/04/2020 12:38, Jan Beulich wrote: > It's not very nice to return from guest_physmap_add_entry() after > perhaps already having made some changes to the P2M, but this is pre- > existing practice in the function, and imo better than ignoring errors. > > Take the liberty and replace an mfn_add() instance with a local variable > already holding the result (as proven by the check immediately ahead). > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > Reviewed-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
guest_physmap_remove_page() gets handed an MFN from the outside, yet
takes the necessary lock to prevent further changes to the GFN <-> MFN
mapping itself. While some callers, in particular guest_remove_page()
(by way of having called get_gfn_query()), hold the GFN lock already,
various others (most notably perhaps the 2nd instance in
xenmem_add_to_physmap_one()) don't. While it also is an option to fix
all the callers, deal with the issue in p2m_remove_page() instead:
Replace the ASSERT() by a conditional and split the loop into two, such
that all checking gets done before any modification would occur.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -773,7 +773,6 @@ p2m_remove_page(struct p2m_domain *p2m,
{
unsigned long i;
gfn_t gfn = _gfn(gfn_l);
- mfn_t mfn_return;
p2m_type_t t;
p2m_access_t a;
@@ -784,15 +783,26 @@ p2m_remove_page(struct p2m_domain *p2m,
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
+ for ( i = 0; i < (1UL << page_order); )
+ {
+ unsigned int cur_order;
+ mfn_t mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
+ &cur_order, NULL);
+
+ if ( p2m_is_valid(t) &&
+ (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) )
+ return -EILSEQ;
+
+ i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1));
+ }
+
if ( mfn_valid(_mfn(mfn)) )
{
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
- NULL, NULL);
+ p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
- ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
}
return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
On 01/04/2020 12:39, Jan Beulich wrote: > guest_physmap_remove_page() gets handed an MFN from the outside, yet > takes the necessary lock to prevent further changes to the GFN <-> MFN > mapping itself. While some callers, in particular guest_remove_page() > (by way of having called get_gfn_query()), hold the GFN lock already, > various others (most notably perhaps the 2nd instance in > xenmem_add_to_physmap_one()) don't. While it also is an option to fix > all the callers, deal with the issue in p2m_remove_page() instead: > Replace the ASSERT() by a conditional and split the loop into two, such > that all checking gets done before any modification would occur. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > Reviewed-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Also add a couple of blank lines.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -768,11 +768,10 @@ void p2m_final_teardown(struct domain *d
}
static int __must_check
-p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
+p2m_remove_page(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
unsigned int page_order)
{
unsigned long i;
- gfn_t gfn = _gfn(gfn_l);
p2m_type_t t;
p2m_access_t a;
@@ -781,7 +780,7 @@ p2m_remove_page(struct p2m_domain *p2m,
return 0;
ASSERT(gfn_locked_by_me(p2m, gfn));
- P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
+ P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_x(gfn), mfn_x(mfn));
for ( i = 0; i < (1UL << page_order); )
{
@@ -790,21 +789,23 @@ p2m_remove_page(struct p2m_domain *p2m,
&cur_order, NULL);
if ( p2m_is_valid(t) &&
- (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) )
+ (!mfn_valid(mfn) || !mfn_eq(mfn_add(mfn, i), mfn_return)) )
return -EILSEQ;
- i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1));
+ i += (1UL << cur_order) -
+ (gfn_x(gfn_add(gfn, i)) & ((1UL << cur_order) - 1));
}
- if ( mfn_valid(_mfn(mfn)) )
+ if ( mfn_valid(mfn) )
{
for ( i = 0; i < (1UL << page_order); i++ )
{
p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
- set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
+ set_gpfn_from_mfn(mfn_x(mfn) + i, INVALID_M2P_ENTRY);
}
}
+
return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
p2m->default_access);
}
@@ -815,9 +816,11 @@ guest_physmap_remove_page(struct domain
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
+
gfn_lock(p2m, gfn, page_order);
- rc = p2m_remove_page(p2m, gfn_x(gfn), mfn_x(mfn), page_order);
+ rc = p2m_remove_page(p2m, gfn, mfn, page_order);
gfn_unlock(p2m, gfn, page_order);
+
return rc;
}
@@ -983,7 +986,7 @@ guest_physmap_add_entry(struct domain *d
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
gfn_x(ogfn) , mfn_x(omfn));
if ( mfn_eq(omfn, mfn_add(mfn, i)) &&
- (rc = p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(omfn), 0)) )
+ (rc = p2m_remove_page(p2m, ogfn, omfn, 0)) )
goto out;
}
}
@@ -2716,7 +2719,7 @@ int p2m_change_altp2m_gfn(struct domain
{
mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL);
rc = mfn_valid(mfn)
- ? p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K)
+ ? p2m_remove_page(ap2m, old_gfn, mfn, PAGE_ORDER_4K)
: 0;
goto out;
}
On 01/04/2020 12:39, Jan Beulich wrote: > @@ -790,21 +789,23 @@ p2m_remove_page(struct p2m_domain *p2m, > &cur_order, NULL); > > if ( p2m_is_valid(t) && > - (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) ) > + (!mfn_valid(mfn) || !mfn_eq(mfn_add(mfn, i), mfn_return)) ) > return -EILSEQ; > > - i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1)); > + i += (1UL << cur_order) - > + (gfn_x(gfn_add(gfn, i)) & ((1UL << cur_order) - 1)); We're gaining an number of expressions starting to look like this, but honestly, "gfn_x(gfn) + i" is equally typesafe, shorter, and easier to read IMO. ~Andrew
On 03.04.2020 00:43, Andrew Cooper wrote: > On 01/04/2020 12:39, Jan Beulich wrote: >> @@ -790,21 +789,23 @@ p2m_remove_page(struct p2m_domain *p2m, >> &cur_order, NULL); >> >> if ( p2m_is_valid(t) && >> - (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) ) >> + (!mfn_valid(mfn) || !mfn_eq(mfn_add(mfn, i), mfn_return)) ) >> return -EILSEQ; >> >> - i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1)); >> + i += (1UL << cur_order) - >> + (gfn_x(gfn_add(gfn, i)) & ((1UL << cur_order) - 1)); > > We're gaining an number of expressions starting to look like this, but > honestly, "gfn_x(gfn) + i" is equally typesafe, shorter, and easier to > read IMO. Good point - in recent reviews I've commented to the same effect on patches from Julien. This patch is way too old for me to have recalled that I did like this here, too. Will switch (also elsewhere in case I find more that I introduce). Jan
On 03.04.2020 00:43, Andrew Cooper wrote: > On 01/04/2020 12:39, Jan Beulich wrote: >> @@ -790,21 +789,23 @@ p2m_remove_page(struct p2m_domain *p2m, >> &cur_order, NULL); >> >> if ( p2m_is_valid(t) && >> - (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) ) >> + (!mfn_valid(mfn) || !mfn_eq(mfn_add(mfn, i), mfn_return)) ) >> return -EILSEQ; >> >> - i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1)); >> + i += (1UL << cur_order) - >> + (gfn_x(gfn_add(gfn, i)) & ((1UL << cur_order) - 1)); > > We're gaining an number of expressions starting to look like this, but > honestly, "gfn_x(gfn) + i" is equally typesafe, shorter, and easier to > read IMO. May I, just like you said for patch 3, imply A-b with this adjusted? Jan
On 03/04/2020 10:14, Jan Beulich wrote: > On 03.04.2020 00:43, Andrew Cooper wrote: >> On 01/04/2020 12:39, Jan Beulich wrote: >>> @@ -790,21 +789,23 @@ p2m_remove_page(struct p2m_domain *p2m, >>> &cur_order, NULL); >>> >>> if ( p2m_is_valid(t) && >>> - (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) ) >>> + (!mfn_valid(mfn) || !mfn_eq(mfn_add(mfn, i), mfn_return)) ) >>> return -EILSEQ; >>> >>> - i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1)); >>> + i += (1UL << cur_order) - >>> + (gfn_x(gfn_add(gfn, i)) & ((1UL << cur_order) - 1)); >> We're gaining an number of expressions starting to look like this, but >> honestly, "gfn_x(gfn) + i" is equally typesafe, shorter, and easier to >> read IMO. > May I, just like you said for patch 3, imply A-b with this adjusted? Yes. Sorry - it was late when I was reviewing. Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
There's an outer scope rc already, and its use for the mem-sharing logic
does not conflict with its use elsewhere in the function.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -905,7 +905,6 @@ guest_physmap_add_entry(struct domain *d
if ( p2m_is_shared(ot) )
{
/* Do an unshare to cleanly take care of all corner cases. */
- int rc;
rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
if ( rc )
{
On 01/04/2020 12:40, Jan Beulich wrote: > There's an outer scope rc already, and its use for the mem-sharing logic > does not conflict with its use elsewhere in the function. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
The domain is being passed in - no need to obtain it from p2m->domain.
Also drop a pointless cast while touching this code anyway.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -905,7 +905,7 @@ guest_physmap_add_entry(struct domain *d
if ( p2m_is_shared(ot) )
{
/* Do an unshare to cleanly take care of all corner cases. */
- rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
+ rc = mem_sharing_unshare_page(d, gfn_x(gfn_add(gfn, i)));
if ( rc )
{
p2m_unlock(p2m);
@@ -922,8 +922,7 @@ guest_physmap_add_entry(struct domain *d
* Foreign domains are okay to place an event as they
* won't go to sleep.
*/
- (void)mem_sharing_notify_enomem(p2m->domain,
- gfn_x(gfn_add(gfn, i)), false);
+ mem_sharing_notify_enomem(d, gfn_x(gfn_add(gfn, i)), false);
return rc;
}
omfn = p2m->get_entry(p2m, gfn_add(gfn, i),
On 01/04/2020 12:40, Jan Beulich wrote:
> The domain is being passed in - no need to obtain it from p2m->domain.
> Also drop a pointless cast while touching this code anyway.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -905,7 +905,7 @@ guest_physmap_add_entry(struct domain *d
> if ( p2m_is_shared(ot) )
> {
> /* Do an unshare to cleanly take care of all corner cases. */
> - rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)));
> + rc = mem_sharing_unshare_page(d, gfn_x(gfn_add(gfn, i)));
Same as patch 3. I'd recommend "gfn_x(gfn) + i" in preference (seeing
as you're cleaning up this line anyway).
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> seeing as you didn't
introduce it, but preferably with it changed.
~Andrew
© 2016 - 2026 Red Hat, Inc.