When not holding the PoD lock across the entire region covering P2M
update and stats update, the entry count should indicate too large a
value in preference to a too small one, to avoid functions bailing early
when they find the count is zero. Hence increments should happen ahead
of P2M updates, while decrements should happen only after. Deal with the
one place where this hasn't been the case yet.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1345,19 +1345,15 @@ mark_populate_on_demand(struct domain *d
}
}
+ pod_lock(p2m);
+ p2m->pod.entry_count += (1UL << order) - pod_count;
+ pod_unlock(p2m);
+
/* Now, actually do the two-way mapping */
rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
p2m_populate_on_demand, p2m->default_access);
if ( rc == 0 )
- {
- pod_lock(p2m);
- p2m->pod.entry_count += 1UL << order;
- p2m->pod.entry_count -= pod_count;
- BUG_ON(p2m->pod.entry_count < 0);
- pod_unlock(p2m);
-
ioreq_request_mapcache_invalidate(d);
- }
else if ( order )
{
/*
@@ -1369,6 +1365,13 @@ mark_populate_on_demand(struct domain *d
d, gfn_l, order, rc);
domain_crash(d);
}
+ else if ( !pod_count )
+ {
+ pod_lock(p2m);
+ BUG_ON(!p2m->pod.entry_count);
+ --p2m->pod.entry_count;
+ pod_unlock(p2m);
+ }
out:
gfn_unlock(p2m, gfn, order);
When not holding the PoD lock across the entire region covering P2M
update and stats update, the entry count should indicate too large a
value in preference to a too small one, to avoid functions bailing early
when they find the count is zero. Hence increments should happen ahead
of P2M updates, while decrements should happen only after. Deal with the
one place where this hasn't been the case yet.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1345,19 +1345,15 @@ mark_populate_on_demand(struct domain *d
}
}
+ pod_lock(p2m);
+ p2m->pod.entry_count += (1UL << order) - pod_count;
+ pod_unlock(p2m);
+
/* Now, actually do the two-way mapping */
rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
p2m_populate_on_demand, p2m->default_access);
if ( rc == 0 )
- {
- pod_lock(p2m);
- p2m->pod.entry_count += 1UL << order;
- p2m->pod.entry_count -= pod_count;
- BUG_ON(p2m->pod.entry_count < 0);
- pod_unlock(p2m);
-
ioreq_request_mapcache_invalidate(d);
- }
else if ( order )
{
/*
@@ -1369,6 +1365,13 @@ mark_populate_on_demand(struct domain *d
d, gfn_l, order, rc);
domain_crash(d);
}
+ else if ( !pod_count )
+ {
+ pod_lock(p2m);
+ BUG_ON(!p2m->pod.entry_count);
+ --p2m->pod.entry_count;
+ pod_unlock(p2m);
+ }
out:
gfn_unlock(p2m, gfn, order);
On 01/12/2021 11:02, Jan Beulich wrote:
> When not holding the PoD lock across the entire region covering P2M
> update and stats update, the entry count should indicate too large a
> value in preference to a too small one, to avoid functions bailing early
> when they find the count is zero. Hence increments should happen ahead
> of P2M updates, while decrements should happen only after. Deal with the
> one place where this hasn't been the case yet.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -1345,19 +1345,15 @@ mark_populate_on_demand(struct domain *d
> }
> }
>
> + pod_lock(p2m);
> + p2m->pod.entry_count += (1UL << order) - pod_count;
> + pod_unlock(p2m);
> +
> /* Now, actually do the two-way mapping */
> rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
> p2m_populate_on_demand, p2m->default_access);
> if ( rc == 0 )
> - {
> - pod_lock(p2m);
> - p2m->pod.entry_count += 1UL << order;
> - p2m->pod.entry_count -= pod_count;
> - BUG_ON(p2m->pod.entry_count < 0);
> - pod_unlock(p2m);
> -
> ioreq_request_mapcache_invalidate(d);
> - }
> else if ( order )
> {
> /*
> @@ -1369,6 +1365,13 @@ mark_populate_on_demand(struct domain *d
> d, gfn_l, order, rc);
> domain_crash(d);
> }
> + else if ( !pod_count )
> + {
> + pod_lock(p2m);
> + BUG_ON(!p2m->pod.entry_count);
> + --p2m->pod.entry_count;
> + pod_unlock(p2m);
> + }
>
> out:
> gfn_unlock(p2m, gfn, order);
This email appears to contain the same patch twice, presumably split at
this point.
Which one should be reviewed?
~Andrew
> When not holding the PoD lock across the entire region covering P2M
> update and stats update, the entry count should indicate too large a
> value in preference to a too small one, to avoid functions bailing early
> when they find the count is zero. Hence increments should happen ahead
> of P2M updates, while decrements should happen only after. Deal with the
> one place where this hasn't been the case yet.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -1345,19 +1345,15 @@ mark_populate_on_demand(struct domain *d
> }
> }
>
> + pod_lock(p2m);
> + p2m->pod.entry_count += (1UL << order) - pod_count;
> + pod_unlock(p2m);
> +
> /* Now, actually do the two-way mapping */
> rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
> p2m_populate_on_demand, p2m->default_access);
> if ( rc == 0 )
> - {
> - pod_lock(p2m);
> - p2m->pod.entry_count += 1UL << order;
> - p2m->pod.entry_count -= pod_count;
> - BUG_ON(p2m->pod.entry_count < 0);
> - pod_unlock(p2m);
> -
> ioreq_request_mapcache_invalidate(d);
> - }
> else if ( order )
> {
> /*
> @@ -1369,6 +1365,13 @@ mark_populate_on_demand(struct domain *d
> d, gfn_l, order, rc);
> domain_crash(d);
> }
> + else if ( !pod_count )
> + {
> + pod_lock(p2m);
> + BUG_ON(!p2m->pod.entry_count);
> + --p2m->pod.entry_count;
> + pod_unlock(p2m);
> + }
>
> out:
> gfn_unlock(p2m, gfn, order);
>
>
On 01.12.2021 12:27, Andrew Cooper wrote:
> On 01/12/2021 11:02, Jan Beulich wrote:
>> When not holding the PoD lock across the entire region covering P2M
>> update and stats update, the entry count should indicate too large a
>> value in preference to a too small one, to avoid functions bailing early
>> when they find the count is zero. Hence increments should happen ahead
>> of P2M updates, while decrements should happen only after. Deal with the
>> one place where this hasn't been the case yet.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> --- a/xen/arch/x86/mm/p2m-pod.c
>> +++ b/xen/arch/x86/mm/p2m-pod.c
>> @@ -1345,19 +1345,15 @@ mark_populate_on_demand(struct domain *d
>> }
>> }
>>
>> + pod_lock(p2m);
>> + p2m->pod.entry_count += (1UL << order) - pod_count;
>> + pod_unlock(p2m);
>> +
>> /* Now, actually do the two-way mapping */
>> rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order,
>> p2m_populate_on_demand, p2m->default_access);
>> if ( rc == 0 )
>> - {
>> - pod_lock(p2m);
>> - p2m->pod.entry_count += 1UL << order;
>> - p2m->pod.entry_count -= pod_count;
>> - BUG_ON(p2m->pod.entry_count < 0);
>> - pod_unlock(p2m);
>> -
>> ioreq_request_mapcache_invalidate(d);
>> - }
>> else if ( order )
>> {
>> /*
>> @@ -1369,6 +1365,13 @@ mark_populate_on_demand(struct domain *d
>> d, gfn_l, order, rc);
>> domain_crash(d);
>> }
>> + else if ( !pod_count )
>> + {
>> + pod_lock(p2m);
>> + BUG_ON(!p2m->pod.entry_count);
>> + --p2m->pod.entry_count;
>> + pod_unlock(p2m);
>> + }
>>
>> out:
>> gfn_unlock(p2m, gfn, order);
>
> This email appears to contain the same patch twice, presumably split at
> this point.
Urgh - no idea how this has happened.
> Which one should be reviewed?
Just everything up from here. Or let me simply resend.
Jan
© 2016 - 2026 Red Hat, Inc.