From: Paul Durrant <pdurrant@amazon.com>
This patch avoids calling iommu_iotlb_flush() for each individual GNTTABOP and
instead calls iommu_iotlb_flush_all() at the end of a batch. This should mean
non-singleton map/unmap operations perform better.
NOTE: A batch is the number of operations done before a pre-emption check and,
in the case of unmap, a TLB flush.
Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Julien Grall <julien@xen.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Wei Liu <wl@xen.org>
v5:
- Add batching to gnttab_map_grant_ref() to handle flushing before pre-
emption check
- Maintain per-op flushing in the case of singletons
v3:
- New in v3
---
xen/common/grant_table.c | 199 ++++++++++++++++++++++++++-------------
1 file changed, 134 insertions(+), 65 deletions(-)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index a844a94a5b..8c6d1dcea7 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -241,7 +241,13 @@ struct gnttab_unmap_common {
grant_ref_t ref;
};
-/* Number of unmap operations that are done between each tlb flush */
+/* Number of map operations that are done between each pre-emption check */
+#define GNTTAB_MAP_BATCH_SIZE 32
+
+/*
+ * Number of unmap operations that are done between each tlb flush and
+ * pre-emption check.
+ */
#define GNTTAB_UNMAP_BATCH_SIZE 32
@@ -979,7 +985,7 @@ static unsigned int mapkind(
static void
map_grant_ref(
- struct gnttab_map_grant_ref *op)
+ struct gnttab_map_grant_ref *op, bool do_flush, unsigned int *flush_flags)
{
struct domain *ld, *rd, *owner = NULL;
struct grant_table *lgt, *rgt;
@@ -1229,12 +1235,11 @@ map_grant_ref(
if ( kind )
{
dfn_t dfn = _dfn(mfn_x(mfn));
- unsigned int flush_flags = 0;
int err;
- err = iommu_map(ld, dfn, mfn, 1ul, kind, &flush_flags);
- if ( !err )
- err = iommu_iotlb_flush(ld, dfn, 1ul, flush_flags);
+ err = iommu_map(ld, dfn, mfn, 1ul, kind, flush_flags);
+ if ( do_flush && !err )
+ err = iommu_iotlb_flush(ld, dfn, 1ul, *flush_flags);
if ( err )
{
double_gt_unlock(lgt, rgt);
@@ -1319,29 +1324,60 @@ static long
gnttab_map_grant_ref(
XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count)
{
- int i;
- struct gnttab_map_grant_ref op;
+ struct domain *currd = current->domain;
+ unsigned int done = 0;
+ int rc = 0;
- for ( i = 0; i < count; i++ )
+ while ( count )
{
- if ( i && hypercall_preempt_check() )
- return i;
+ unsigned int i, c = min_t(unsigned int, count, GNTTAB_MAP_BATCH_SIZE);
+ unsigned int flush_flags = 0;
- if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
- return -EFAULT;
+ for ( i = 0; i < c; i++ )
+ {
+ struct gnttab_map_grant_ref op;
- map_grant_ref(&op);
+ if ( unlikely(__copy_from_guest(&op, uop, 1)) )
+ {
+ rc = -EFAULT;
+ break;
+ }
- if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
- return -EFAULT;
+ map_grant_ref(&op, c == 1, &flush_flags);
+
+ if ( unlikely(__copy_to_guest(uop, &op, 1)) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+
+ guest_handle_add_offset(uop, 1);
+ }
+
+ if ( c > 1 )
+ {
+ int err = iommu_iotlb_flush_all(currd, flush_flags);
+
+ if ( !rc )
+ rc = err;
+ }
+
+ if ( rc )
+ break;
+
+ count -= c;
+ done += c;
+
+ if ( count && hypercall_preempt_check() )
+ return done;
}
- return 0;
+ return rc;
}
static void
unmap_common(
- struct gnttab_unmap_common *op)
+ struct gnttab_unmap_common *op, bool do_flush, unsigned int *flush_flags)
{
domid_t dom;
struct domain *ld, *rd;
@@ -1485,22 +1521,21 @@ unmap_common(
{
unsigned int kind;
dfn_t dfn = _dfn(mfn_x(op->mfn));
- unsigned int flush_flags = 0;
int err = 0;
double_gt_lock(lgt, rgt);
kind = mapkind(lgt, rd, op->mfn);
if ( !kind )
- err = iommu_unmap(ld, dfn, 1ul, &flush_flags);
+ err = iommu_unmap(ld, dfn, 1ul, flush_flags);
else if ( !(kind & MAPKIND_WRITE) )
err = iommu_map(ld, dfn, op->mfn, 1ul, IOMMUF_readable,
- &flush_flags);
+ flush_flags);
double_gt_unlock(lgt, rgt);
- if ( !err )
- err = iommu_iotlb_flush(ld, dfn, 1ul, flush_flags);
+ if ( do_flush && !err )
+ err = iommu_iotlb_flush(ld, dfn, 1ul, *flush_flags);
if ( err )
rc = GNTST_general_error;
}
@@ -1599,8 +1634,8 @@ unmap_common_complete(struct gnttab_unmap_common *op)
static void
unmap_grant_ref(
- struct gnttab_unmap_grant_ref *op,
- struct gnttab_unmap_common *common)
+ struct gnttab_unmap_grant_ref *op, struct gnttab_unmap_common *common,
+ bool do_flush, unsigned int *flush_flags)
{
common->host_addr = op->host_addr;
common->dev_bus_addr = op->dev_bus_addr;
@@ -1612,7 +1647,7 @@ unmap_grant_ref(
common->rd = NULL;
common->mfn = INVALID_MFN;
- unmap_common(common);
+ unmap_common(common, do_flush, flush_flags);
op->status = common->status;
}
@@ -1621,31 +1656,55 @@ static long
gnttab_unmap_grant_ref(
XEN_GUEST_HANDLE_PARAM(gnttab_unmap_grant_ref_t) uop, unsigned int count)
{
- int i, c, partial_done, done = 0;
- struct gnttab_unmap_grant_ref op;
- struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
+ struct domain *currd = current->domain;
+ unsigned int done = 0;
+ int rc = 0;
- while ( count != 0 )
+ while ( count )
{
- c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
- partial_done = 0;
+ struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
+ unsigned int i, c, partial_done = 0;
+ unsigned int flush_flags = 0;
+
+ c = min_t(unsigned int, count, GNTTAB_UNMAP_BATCH_SIZE);
for ( i = 0; i < c; i++ )
{
+ struct gnttab_unmap_grant_ref op;
+
if ( unlikely(__copy_from_guest(&op, uop, 1)) )
- goto fault;
- unmap_grant_ref(&op, &common[i]);
+ {
+ rc = -EFAULT;
+ break;
+ }
+
+ unmap_grant_ref(&op, &common[i], c == 1, &flush_flags);
++partial_done;
+
if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
- goto fault;
+ {
+ rc = -EFAULT;
+ break;
+ }
+
guest_handle_add_offset(uop, 1);
}
- gnttab_flush_tlb(current->domain);
+ gnttab_flush_tlb(currd);
+ if ( c > 1 )
+ {
+ int err = iommu_iotlb_flush_all(currd, flush_flags);
+
+ if ( !rc )
+ rc = err;
+ }
for ( i = 0; i < partial_done; i++ )
unmap_common_complete(&common[i]);
+ if ( rc )
+ break;
+
count -= c;
done += c;
@@ -1653,20 +1712,13 @@ gnttab_unmap_grant_ref(
return done;
}
- return 0;
-
-fault:
- gnttab_flush_tlb(current->domain);
-
- for ( i = 0; i < partial_done; i++ )
- unmap_common_complete(&common[i]);
- return -EFAULT;
+ return rc;
}
static void
unmap_and_replace(
- struct gnttab_unmap_and_replace *op,
- struct gnttab_unmap_common *common)
+ struct gnttab_unmap_and_replace *op, struct gnttab_unmap_common *common,
+ bool do_flush, unsigned int *flush_flags)
{
common->host_addr = op->host_addr;
common->new_addr = op->new_addr;
@@ -1678,7 +1730,7 @@ unmap_and_replace(
common->rd = NULL;
common->mfn = INVALID_MFN;
- unmap_common(common);
+ unmap_common(common, do_flush, flush_flags);
op->status = common->status;
}
@@ -1686,31 +1738,55 @@ static long
gnttab_unmap_and_replace(
XEN_GUEST_HANDLE_PARAM(gnttab_unmap_and_replace_t) uop, unsigned int count)
{
- int i, c, partial_done, done = 0;
- struct gnttab_unmap_and_replace op;
- struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
+ struct domain *currd = current->domain;
+ unsigned int done = 0;
+ int rc = 0;
- while ( count != 0 )
+ while ( count )
{
- c = min(count, (unsigned int)GNTTAB_UNMAP_BATCH_SIZE);
- partial_done = 0;
+ struct gnttab_unmap_common common[GNTTAB_UNMAP_BATCH_SIZE];
+ unsigned int i, c, partial_done = 0;
+ unsigned int flush_flags = 0;
+
+ c = min_t(unsigned int, count, GNTTAB_UNMAP_BATCH_SIZE);
for ( i = 0; i < c; i++ )
{
+ struct gnttab_unmap_and_replace op;
+
if ( unlikely(__copy_from_guest(&op, uop, 1)) )
- goto fault;
- unmap_and_replace(&op, &common[i]);
+ {
+ rc = -EFAULT;
+ break;
+ }
+
+ unmap_and_replace(&op, &common[i], c == 1, &flush_flags);
++partial_done;
+
if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
- goto fault;
+ {
+ rc = -EFAULT;
+ break;
+ }
+
guest_handle_add_offset(uop, 1);
}
- gnttab_flush_tlb(current->domain);
+ gnttab_flush_tlb(currd);
+ if ( c > 1 )
+ {
+ int err = iommu_iotlb_flush_all(currd, flush_flags);
+
+ if ( !rc )
+ rc = err;
+ }
for ( i = 0; i < partial_done; i++ )
unmap_common_complete(&common[i]);
+ if ( rc )
+ break;
+
count -= c;
done += c;
@@ -1718,14 +1794,7 @@ gnttab_unmap_and_replace(
return done;
}
- return 0;
-
-fault:
- gnttab_flush_tlb(current->domain);
-
- for ( i = 0; i < partial_done; i++ )
- unmap_common_complete(&common[i]);
- return -EFAULT;
+ return rc;
}
static int
--
2.20.1
On 07.09.2020 09:40, Paul Durrant wrote:
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -241,7 +241,13 @@ struct gnttab_unmap_common {
> grant_ref_t ref;
> };
>
> -/* Number of unmap operations that are done between each tlb flush */
> +/* Number of map operations that are done between each pre-emption check */
> +#define GNTTAB_MAP_BATCH_SIZE 32
> +
> +/*
> + * Number of unmap operations that are done between each tlb flush and
> + * pre-emption check.
> + */
> #define GNTTAB_UNMAP_BATCH_SIZE 32
Interesting - I don't think I've ever seen preemption spelled like
this (with a hyphen). In the interest of grep-ability, would you
mind changing to the more "conventional" spelling? Albeit I now
notice we have two such spellings in the tree already ...
> @@ -979,7 +985,7 @@ static unsigned int mapkind(
>
> static void
> map_grant_ref(
> - struct gnttab_map_grant_ref *op)
> + struct gnttab_map_grant_ref *op, bool do_flush, unsigned int *flush_flags)
Why two parameters? Simply pass NULL for singletons instead? Although,
you'd need another local variable then, which maybe isn't overly much
nicer.
> @@ -1319,29 +1324,60 @@ static long
> gnttab_map_grant_ref(
> XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count)
> {
> - int i;
> - struct gnttab_map_grant_ref op;
> + struct domain *currd = current->domain;
Is this a worthwhile variable to have in this case? It's used
exactly once, granted in the loop body, but still not the inner
one.
> + unsigned int done = 0;
> + int rc = 0;
>
> - for ( i = 0; i < count; i++ )
> + while ( count )
> {
> - if ( i && hypercall_preempt_check() )
> - return i;
> + unsigned int i, c = min_t(unsigned int, count, GNTTAB_MAP_BATCH_SIZE);
> + unsigned int flush_flags = 0;
>
> - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
> - return -EFAULT;
> + for ( i = 0; i < c; i++ )
> + {
> + struct gnttab_map_grant_ref op;
>
> - map_grant_ref(&op);
> + if ( unlikely(__copy_from_guest(&op, uop, 1)) )
> + {
> + rc = -EFAULT;
> + break;
> + }
>
> - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
> - return -EFAULT;
> + map_grant_ref(&op, c == 1, &flush_flags);
> +
> + if ( unlikely(__copy_to_guest(uop, &op, 1)) )
> + {
> + rc = -EFAULT;
> + break;
> + }
> +
> + guest_handle_add_offset(uop, 1);
> + }
> +
> + if ( c > 1 )
> + {
> + int err = iommu_iotlb_flush_all(currd, flush_flags);
There's still some double flushing involved in the error case here.
Trying to eliminate this (by having map_grant_ref() write zero
into *flush_flags) may not be overly important, but I thought I'd
still mention it.
Jan
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 10 September 2020 14:49
> To: Paul Durrant <paul@xen.org>
> Cc: xen-devel@lists.xenproject.org; Paul Durrant <pdurrant@amazon.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; George Dunlap <george.dunlap@citrix.com>; Ian Jackson
> <ian.jackson@eu.citrix.com>; Julien Grall <julien@xen.org>; Stefano Stabellini
> <sstabellini@kernel.org>; Wei Liu <wl@xen.org>
> Subject: Re: [PATCH v5 6/8] common/grant_table: batch flush I/O TLB
>
> On 07.09.2020 09:40, Paul Durrant wrote:
> > --- a/xen/common/grant_table.c
> > +++ b/xen/common/grant_table.c
> > @@ -241,7 +241,13 @@ struct gnttab_unmap_common {
> > grant_ref_t ref;
> > };
> >
> > -/* Number of unmap operations that are done between each tlb flush */
> > +/* Number of map operations that are done between each pre-emption check */
> > +#define GNTTAB_MAP_BATCH_SIZE 32
> > +
> > +/*
> > + * Number of unmap operations that are done between each tlb flush and
> > + * pre-emption check.
> > + */
> > #define GNTTAB_UNMAP_BATCH_SIZE 32
>
> Interesting - I don't think I've ever seen preemption spelled like
> this (with a hyphen). In the interest of grep-ability, would you
> mind changing to the more "conventional" spelling? Albeit I now
> notice we have two such spellings in the tree already ...
>
Sure, I'll change it.
> > @@ -979,7 +985,7 @@ static unsigned int mapkind(
> >
> > static void
> > map_grant_ref(
> > - struct gnttab_map_grant_ref *op)
> > + struct gnttab_map_grant_ref *op, bool do_flush, unsigned int *flush_flags)
>
> Why two parameters? Simply pass NULL for singletons instead? Although,
> you'd need another local variable then, which maybe isn't overly much
> nicer.
>
No, I think the extra arg is clearer.
> > @@ -1319,29 +1324,60 @@ static long
> > gnttab_map_grant_ref(
> > XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count)
> > {
> > - int i;
> > - struct gnttab_map_grant_ref op;
> > + struct domain *currd = current->domain;
>
> Is this a worthwhile variable to have in this case? It's used
> exactly once, granted in the loop body, but still not the inner
> one.
>
I thought it was nicer for consistency with the unmap function (where curd is used more than once) but I'll drop it.
> > + unsigned int done = 0;
> > + int rc = 0;
> >
> > - for ( i = 0; i < count; i++ )
> > + while ( count )
> > {
> > - if ( i && hypercall_preempt_check() )
> > - return i;
> > + unsigned int i, c = min_t(unsigned int, count, GNTTAB_MAP_BATCH_SIZE);
> > + unsigned int flush_flags = 0;
> >
> > - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
> > - return -EFAULT;
> > + for ( i = 0; i < c; i++ )
> > + {
> > + struct gnttab_map_grant_ref op;
> >
> > - map_grant_ref(&op);
> > + if ( unlikely(__copy_from_guest(&op, uop, 1)) )
> > + {
> > + rc = -EFAULT;
> > + break;
> > + }
> >
> > - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
> > - return -EFAULT;
> > + map_grant_ref(&op, c == 1, &flush_flags);
> > +
> > + if ( unlikely(__copy_to_guest(uop, &op, 1)) )
> > + {
> > + rc = -EFAULT;
> > + break;
> > + }
> > +
> > + guest_handle_add_offset(uop, 1);
> > + }
> > +
> > + if ( c > 1 )
> > + {
> > + int err = iommu_iotlb_flush_all(currd, flush_flags);
>
> There's still some double flushing involved in the error case here.
> Trying to eliminate this (by having map_grant_ref() write zero
> into *flush_flags) may not be overly important, but I thought I'd
> still mention it.
>
This only kicks in if there's more than one operation and is it safe to squash the flush_flags if some ops succeed and others fail? If all ops fail then flush_flags should still be zero because the call to iommu_map() is the last failure point in map_grant_ref() anyway.
Paul
> Jan
On 10.09.2020 16:17, Paul Durrant wrote:
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: 10 September 2020 14:49
>>
>> On 07.09.2020 09:40, Paul Durrant wrote:
>>> --- a/xen/common/grant_table.c
>>> +++ b/xen/common/grant_table.c
>>> @@ -241,7 +241,13 @@ struct gnttab_unmap_common {
>>> grant_ref_t ref;
>>> };
>>>
>>> -/* Number of unmap operations that are done between each tlb flush */
>>> +/* Number of map operations that are done between each pre-emption check */
>>> +#define GNTTAB_MAP_BATCH_SIZE 32
>>> +
>>> +/*
>>> + * Number of unmap operations that are done between each tlb flush and
>>> + * pre-emption check.
>>> + */
>>> #define GNTTAB_UNMAP_BATCH_SIZE 32
>>
>> Interesting - I don't think I've ever seen preemption spelled like
>> this (with a hyphen). In the interest of grep-ability, would you
>> mind changing to the more "conventional" spelling? Albeit I now
>> notice we have two such spellings in the tree already ...
>>
>
> Sure, I'll change it.
>
>>> @@ -979,7 +985,7 @@ static unsigned int mapkind(
>>>
>>> static void
>>> map_grant_ref(
>>> - struct gnttab_map_grant_ref *op)
>>> + struct gnttab_map_grant_ref *op, bool do_flush, unsigned int *flush_flags)
>>
>> Why two parameters? Simply pass NULL for singletons instead? Although,
>> you'd need another local variable then, which maybe isn't overly much
>> nicer.
>>
>
> No, I think the extra arg is clearer.
>
>>> @@ -1319,29 +1324,60 @@ static long
>>> gnttab_map_grant_ref(
>>> XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count)
>>> {
>>> - int i;
>>> - struct gnttab_map_grant_ref op;
>>> + struct domain *currd = current->domain;
>>
>> Is this a worthwhile variable to have in this case? It's used
>> exactly once, granted in the loop body, but still not the inner
>> one.
>>
>
> I thought it was nicer for consistency with the unmap function (where curd is used more than once) but I'll drop it.
>
>>> + unsigned int done = 0;
>>> + int rc = 0;
>>>
>>> - for ( i = 0; i < count; i++ )
>>> + while ( count )
>>> {
>>> - if ( i && hypercall_preempt_check() )
>>> - return i;
>>> + unsigned int i, c = min_t(unsigned int, count, GNTTAB_MAP_BATCH_SIZE);
>>> + unsigned int flush_flags = 0;
>>>
>>> - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
>>> - return -EFAULT;
>>> + for ( i = 0; i < c; i++ )
>>> + {
>>> + struct gnttab_map_grant_ref op;
>>>
>>> - map_grant_ref(&op);
>>> + if ( unlikely(__copy_from_guest(&op, uop, 1)) )
>>> + {
>>> + rc = -EFAULT;
>>> + break;
>>> + }
>>>
>>> - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
>>> - return -EFAULT;
>>> + map_grant_ref(&op, c == 1, &flush_flags);
>>> +
>>> + if ( unlikely(__copy_to_guest(uop, &op, 1)) )
>>> + {
>>> + rc = -EFAULT;
>>> + break;
>>> + }
>>> +
>>> + guest_handle_add_offset(uop, 1);
>>> + }
>>> +
>>> + if ( c > 1 )
>>> + {
>>> + int err = iommu_iotlb_flush_all(currd, flush_flags);
>>
>> There's still some double flushing involved in the error case here.
>> Trying to eliminate this (by having map_grant_ref() write zero
>> into *flush_flags) may not be overly important, but I thought I'd
>> still mention it.
>>
>
> This only kicks in if there's more than one operation and is it safe to squash the flush_flags if some ops succeed and others fail? If all ops fail then flush_flags should still be zero because the call to iommu_map() is the last failure point in map_grant_ref() anyway.
Well, yes, not a common thing to run into. With the small changes
further up
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Jan
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: 10 September 2020 15:39
> To: paul@xen.org
> Cc: xen-devel@lists.xenproject.org; 'Paul Durrant' <pdurrant@amazon.com>; 'Andrew Cooper'
> <andrew.cooper3@citrix.com>; 'George Dunlap' <george.dunlap@citrix.com>; 'Ian Jackson'
> <ian.jackson@eu.citrix.com>; 'Julien Grall' <julien@xen.org>; 'Stefano Stabellini'
> <sstabellini@kernel.org>; 'Wei Liu' <wl@xen.org>
> Subject: Re: [PATCH v5 6/8] common/grant_table: batch flush I/O TLB
>
> On 10.09.2020 16:17, Paul Durrant wrote:
> >> From: Jan Beulich <jbeulich@suse.com>
> >> Sent: 10 September 2020 14:49
> >>
> >> On 07.09.2020 09:40, Paul Durrant wrote:
> >>> --- a/xen/common/grant_table.c
> >>> +++ b/xen/common/grant_table.c
> >>> @@ -241,7 +241,13 @@ struct gnttab_unmap_common {
> >>> grant_ref_t ref;
> >>> };
> >>>
> >>> -/* Number of unmap operations that are done between each tlb flush */
> >>> +/* Number of map operations that are done between each pre-emption check */
> >>> +#define GNTTAB_MAP_BATCH_SIZE 32
> >>> +
> >>> +/*
> >>> + * Number of unmap operations that are done between each tlb flush and
> >>> + * pre-emption check.
> >>> + */
> >>> #define GNTTAB_UNMAP_BATCH_SIZE 32
> >>
> >> Interesting - I don't think I've ever seen preemption spelled like
> >> this (with a hyphen). In the interest of grep-ability, would you
> >> mind changing to the more "conventional" spelling? Albeit I now
> >> notice we have two such spellings in the tree already ...
> >>
> >
> > Sure, I'll change it.
> >
> >>> @@ -979,7 +985,7 @@ static unsigned int mapkind(
> >>>
> >>> static void
> >>> map_grant_ref(
> >>> - struct gnttab_map_grant_ref *op)
> >>> + struct gnttab_map_grant_ref *op, bool do_flush, unsigned int *flush_flags)
> >>
> >> Why two parameters? Simply pass NULL for singletons instead? Although,
> >> you'd need another local variable then, which maybe isn't overly much
> >> nicer.
> >>
> >
> > No, I think the extra arg is clearer.
> >
> >>> @@ -1319,29 +1324,60 @@ static long
> >>> gnttab_map_grant_ref(
> >>> XEN_GUEST_HANDLE_PARAM(gnttab_map_grant_ref_t) uop, unsigned int count)
> >>> {
> >>> - int i;
> >>> - struct gnttab_map_grant_ref op;
> >>> + struct domain *currd = current->domain;
> >>
> >> Is this a worthwhile variable to have in this case? It's used
> >> exactly once, granted in the loop body, but still not the inner
> >> one.
> >>
> >
> > I thought it was nicer for consistency with the unmap function (where curd is used more than once)
> but I'll drop it.
> >
> >>> + unsigned int done = 0;
> >>> + int rc = 0;
> >>>
> >>> - for ( i = 0; i < count; i++ )
> >>> + while ( count )
> >>> {
> >>> - if ( i && hypercall_preempt_check() )
> >>> - return i;
> >>> + unsigned int i, c = min_t(unsigned int, count, GNTTAB_MAP_BATCH_SIZE);
> >>> + unsigned int flush_flags = 0;
> >>>
> >>> - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
> >>> - return -EFAULT;
> >>> + for ( i = 0; i < c; i++ )
> >>> + {
> >>> + struct gnttab_map_grant_ref op;
> >>>
> >>> - map_grant_ref(&op);
> >>> + if ( unlikely(__copy_from_guest(&op, uop, 1)) )
> >>> + {
> >>> + rc = -EFAULT;
> >>> + break;
> >>> + }
> >>>
> >>> - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
> >>> - return -EFAULT;
> >>> + map_grant_ref(&op, c == 1, &flush_flags);
> >>> +
> >>> + if ( unlikely(__copy_to_guest(uop, &op, 1)) )
> >>> + {
> >>> + rc = -EFAULT;
> >>> + break;
> >>> + }
> >>> +
> >>> + guest_handle_add_offset(uop, 1);
> >>> + }
> >>> +
> >>> + if ( c > 1 )
> >>> + {
> >>> + int err = iommu_iotlb_flush_all(currd, flush_flags);
> >>
> >> There's still some double flushing involved in the error case here.
> >> Trying to eliminate this (by having map_grant_ref() write zero
> >> into *flush_flags) may not be overly important, but I thought I'd
> >> still mention it.
> >>
> >
> > This only kicks in if there's more than one operation and is it safe to squash the flush_flags if
> some ops succeed and others fail? If all ops fail then flush_flags should still be zero because the
> call to iommu_map() is the last failure point in map_grant_ref() anyway.
>
> Well, yes, not a common thing to run into. With the small changes
> further up
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
>
Thanks. Will send v6 shortly.
Paul
> Jan
© 2016 - 2026 Red Hat, Inc.