There are exactly 3 callers of sort() in the hypervisor.
Both arm callers pass in NULL for the swap function. While this might seem
like an attractive option at first, it causes generic_swap() to be used which
forced a byte-wise copy. Provide real swap functions which the compiler can
optimise sensibly.
Furthermore, use of function pointers in tight loops like that can be very bad
for performance. Implement sort() as extern inline, so the optimiser can
judge whether to inline things or not.
On x86, the diffstat shows how much of a better job the compiler can do when
it is able to see the cmp/swap implementations.
$ ../scripts/bloat-o-meter xen-syms-before xen-syms-after
add/remove: 0/5 grow/shrink: 1/1 up/down: 505/-735 (-230)
Function old new delta
sort_exception_table 31 536 +505
u32_swap 9 - -9
generic_swap 34 - -34
cmp_ex 36 - -36
swap_ex 41 - -41
sort_exception_tables 90 38 -52
sort 563 - -563
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien@xen.org>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
CC: Bertrand Marquis <bertrand.marquis@arm.com>
---
xen/arch/arm/bootfdt.c | 9 +++++-
xen/arch/arm/io.c | 9 +++++-
xen/include/xen/sort.h | 55 +++++++++++++++++++++++++++++++++-
xen/lib/sort.c | 80 ++------------------------------------------------
4 files changed, 72 insertions(+), 81 deletions(-)
diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
index afaa0e249b71..e318ef960386 100644
--- a/xen/arch/arm/bootfdt.c
+++ b/xen/arch/arm/bootfdt.c
@@ -448,6 +448,13 @@ static int __init cmp_memory_node(const void *key, const void *elem)
return 0;
}
+static void __init swap_memory_node(void *_a, void *_b, size_t size)
+{
+ struct membank *a = _a, *b = _b;
+
+ SWAP(*a, *b);
+}
+
/**
* boot_fdt_info - initialize bootinfo from a DTB
* @fdt: flattened device tree binary
@@ -472,7 +479,7 @@ size_t __init boot_fdt_info(const void *fdt, paddr_t paddr)
* the banks sorted in ascending order. So sort them through.
*/
sort(bootinfo.mem.bank, bootinfo.mem.nr_banks, sizeof(struct membank),
- cmp_memory_node, NULL);
+ cmp_memory_node, swap_memory_node);
early_print_info();
diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 729287e37c59..1a066f9ae502 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -80,6 +80,13 @@ static int cmp_mmio_handler(const void *key, const void *elem)
return 0;
}
+static void swap_mmio_handler(void *_a, void *_b, size_t size)
+{
+ struct mmio_handler *a = _a, *b = _b;
+
+ SWAP(*a, *b);
+}
+
static const struct mmio_handler *find_mmio_handler(struct domain *d,
paddr_t gpa)
{
@@ -170,7 +177,7 @@ void register_mmio_handler(struct domain *d,
/* Sort mmio handlers in ascending order based on base address */
sort(vmmio->handlers, vmmio->num_entries, sizeof(struct mmio_handler),
- cmp_mmio_handler, NULL);
+ cmp_mmio_handler, swap_mmio_handler);
write_unlock(&vmmio->lock);
}
diff --git a/xen/include/xen/sort.h b/xen/include/xen/sort.h
index a403652948e7..01479ea44606 100644
--- a/xen/include/xen/sort.h
+++ b/xen/include/xen/sort.h
@@ -3,8 +3,61 @@
#include <xen/types.h>
+/*
+ * sort - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp: pointer to comparison function
+ * @swap: pointer to swap function or NULL
+ *
+ * This function does a heapsort on the given array. You may provide a
+ * swap function optimized to your element type.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * qsort is about 20% faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+#ifndef SORT_IMPLEMENTATION
+extern gnu_inline
+#endif
void sort(void *base, size_t num, size_t size,
int (*cmp)(const void *, const void *),
- void (*swap)(void *, void *, size_t));
+ void (*swap)(void *, void *, size_t))
+{
+ /* pre-scale counters for performance */
+ size_t i = (num / 2) * size, n = num * size, c, r;
+
+ /* heapify */
+ while ( i > 0 )
+ {
+ for ( r = i -= size; r * 2 + size < n; r = c )
+ {
+ c = r * 2 + size;
+ if ( (c < n - size) && (cmp(base + c, base + c + size) < 0) )
+ c += size;
+ if ( cmp(base + r, base + c) >= 0 )
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+
+ /* sort */
+ for ( i = n; i > 0; )
+ {
+ i -= size;
+ swap(base, base + i, size);
+ for ( r = 0; r * 2 + size < i; r = c )
+ {
+ c = r * 2 + size;
+ if ( (c < i - size) && (cmp(base + c, base + c + size) < 0) )
+ c += size;
+ if ( cmp(base + r, base + c) >= 0 )
+ break;
+ swap(base + r, base + c, size);
+ }
+ }
+}
#endif /* __XEN_SORT_H__ */
diff --git a/xen/lib/sort.c b/xen/lib/sort.c
index 35ce0d7abdec..b7e78cc0e8d2 100644
--- a/xen/lib/sort.c
+++ b/xen/lib/sort.c
@@ -4,81 +4,5 @@
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
-#include <xen/types.h>
-
-static void u32_swap(void *a, void *b, size_t size)
-{
- uint32_t t = *(uint32_t *)a;
-
- *(uint32_t *)a = *(uint32_t *)b;
- *(uint32_t *)b = t;
-}
-
-static void generic_swap(void *a, void *b, size_t size)
-{
- char t;
-
- do {
- t = *(char *)a;
- *(char *)a++ = *(char *)b;
- *(char *)b++ = t;
- } while ( --size > 0 );
-}
-
-/*
- * sort - sort an array of elements
- * @base: pointer to data to sort
- * @num: number of elements
- * @size: size of each element
- * @cmp: pointer to comparison function
- * @swap: pointer to swap function or NULL
- *
- * This function does a heapsort on the given array. You may provide a
- * swap function optimized to your element type.
- *
- * Sorting time is O(n log n) both on average and worst-case. While
- * qsort is about 20% faster on average, it suffers from exploitable
- * O(n*n) worst-case behavior and extra memory requirements that make
- * it less suitable for kernel use.
- */
-
-void sort(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *),
- void (*swap)(void *, void *, size_t size))
-{
- /* pre-scale counters for performance */
- size_t i = (num / 2) * size, n = num * size, c, r;
-
- if ( !swap )
- swap = (size == 4 ? u32_swap : generic_swap);
-
- /* heapify */
- while ( i > 0 )
- {
- for ( r = i -= size; r * 2 + size < n; r = c )
- {
- c = r * 2 + size;
- if ( (c < n - size) && (cmp(base + c, base + c + size) < 0) )
- c += size;
- if ( cmp(base + r, base + c) >= 0 )
- break;
- swap(base + r, base + c, size);
- }
- }
-
- /* sort */
- for ( i = n; i > 0; )
- {
- i -= size;
- swap(base, base + i, size);
- for ( r = 0; r * 2 + size < i; r = c )
- {
- c = r * 2 + size;
- if ( (c < i - size) && (cmp(base + c, base + c + size) < 0) )
- c += size;
- if ( cmp(base + r, base + c) >= 0 )
- break;
- swap(base + r, base + c, size);
- }
- }
-}
+#define SORT_IMPLEMENTATION
+#include <xen/sort.h>
--
2.11.0
On 11.11.2021 18:57, Andrew Cooper wrote:
> There are exactly 3 callers of sort() in the hypervisor.
>
> Both arm callers pass in NULL for the swap function. While this might seem
> like an attractive option at first, it causes generic_swap() to be used which
> forced a byte-wise copy. Provide real swap functions which the compiler can
> optimise sensibly.
>
> Furthermore, use of function pointers in tight loops like that can be very bad
> for performance. Implement sort() as extern inline, so the optimiser can
> judge whether to inline things or not.
>
> On x86, the diffstat shows how much of a better job the compiler can do when
> it is able to see the cmp/swap implementations.
>
> $ ../scripts/bloat-o-meter xen-syms-before xen-syms-after
> add/remove: 0/5 grow/shrink: 1/1 up/down: 505/-735 (-230)
> Function old new delta
> sort_exception_table 31 536 +505
> u32_swap 9 - -9
> generic_swap 34 - -34
> cmp_ex 36 - -36
> swap_ex 41 - -41
> sort_exception_tables 90 38 -52
> sort 563 - -563
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Technically
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Yet again without the intention of overriding Julien's concerns in any
way. To address one of them, how about retaining generic_swap() (as an
inline function), ...
> --- a/xen/include/xen/sort.h
> +++ b/xen/include/xen/sort.h
> @@ -3,8 +3,61 @@
>
> #include <xen/types.h>
>
> +/*
> + * sort - sort an array of elements
> + * @base: pointer to data to sort
> + * @num: number of elements
> + * @size: size of each element
> + * @cmp: pointer to comparison function
> + * @swap: pointer to swap function or NULL
> + *
> + * This function does a heapsort on the given array. You may provide a
> + * swap function optimized to your element type.
> + *
> + * Sorting time is O(n log n) both on average and worst-case. While
> + * qsort is about 20% faster on average, it suffers from exploitable
> + * O(n*n) worst-case behavior and extra memory requirements that make
> + * it less suitable for kernel use.
> + */
> +#ifndef SORT_IMPLEMENTATION
> +extern gnu_inline
> +#endif
> void sort(void *base, size_t num, size_t size,
> int (*cmp)(const void *, const void *),
> - void (*swap)(void *, void *, size_t));
> + void (*swap)(void *, void *, size_t))
> +{
> + /* pre-scale counters for performance */
> + size_t i = (num / 2) * size, n = num * size, c, r;
> +
> + /* heapify */
> + while ( i > 0 )
> + {
> + for ( r = i -= size; r * 2 + size < n; r = c )
> + {
> + c = r * 2 + size;
> + if ( (c < n - size) && (cmp(base + c, base + c + size) < 0) )
> + c += size;
> + if ( cmp(base + r, base + c) >= 0 )
> + break;
> + swap(base + r, base + c, size);
... doing
if ( swap )
swap(base + r, base + c, size);
else
generic_swap(base + r, base + c, size);
here and below. The compiler would then still be able to eliminate the
indirect calls (as well as the added conditional), I think.
Jan
Hi Andrew, On 11/11/2021 17:57, Andrew Cooper wrote: > There are exactly 3 callers of sort() in the hypervisor. > > Both arm callers pass in NULL for the swap function. While this might seem > like an attractive option at first, it causes generic_swap() to be used which > forced a byte-wise copy. Provide real swap functions which the compiler can > optimise sensibly. > > Furthermore, use of function pointers in tight loops like that can be very bad > for performance. Implement sort() as extern inline, so the optimiser can > judge whether to inline things or not. > > On x86, the diffstat shows how much of a better job the compiler can do when > it is able to see the cmp/swap implementations. For completness, here the Arm bloat-o-meter: add/remove: 0/5 grow/shrink: 2/0 up/down: 928/-660 (268) Function old new delta boot_fdt_info 640 1132 +492 register_mmio_handler 292 728 +436 u32_swap 20 - -20 generic_swap 40 - -40 cmp_mmio_handler 44 - -44 cmp_memory_node 44 - -44 sort 512 - -512 Total: Before=966915, After=967183, chg +0.03% Cheers, -- Julien Grall
Hi Andrew, On 11/11/2021 17:57, Andrew Cooper wrote: > There are exactly 3 callers of sort() in the hypervisor. > > Both arm callers pass in NULL for the swap function. While this might seem > like an attractive option at first, it causes generic_swap() to be used which > forced a byte-wise copy. Provide real swap functions which the compiler can > optimise sensibly. I understand the theory, but none of the two calls are in hot paths or deal with large set on Arm. So I am rather hesitant to introduce specialised swap in each case as it doesn't seem we will gain much from this change. Cheers, -- Julien Grall
On Thu, 11 Nov 2021, Julien Grall wrote:
> On 11/11/2021 17:57, Andrew Cooper wrote:
> > There are exactly 3 callers of sort() in the hypervisor.
> >
> > Both arm callers pass in NULL for the swap function. While this might seem
> > like an attractive option at first, it causes generic_swap() to be used
> > which
> > forced a byte-wise copy. Provide real swap functions which the compiler can
> > optimise sensibly.
>
> I understand the theory, but none of the two calls are in hot paths or deal
> with large set on Arm. So I am rather hesitant to introduce specialised swap
> in each case as it doesn't seem we will gain much from this change.
While I like Andrew's optimization, like Julien, I would also rather not
have to introduce specialized swap functions any time a sort() is
called. Why not keeping around generic_swap as extern gnu_inline in
sort.h as well so that the ARM callers can simply:
sort(bootinfo.mem.bank, bootinfo.mem.nr_banks, sizeof(struct membank),
cmp_memory_node, generic_swap);
?
That looks like a good option, although it would still result in a
small increase in bloat.
On 16/11/2021 00:36, Stefano Stabellini wrote: > On Thu, 11 Nov 2021, Julien Grall wrote: >> On 11/11/2021 17:57, Andrew Cooper wrote: >>> There are exactly 3 callers of sort() in the hypervisor. >>> >>> Both arm callers pass in NULL for the swap function. While this might seem >>> like an attractive option at first, it causes generic_swap() to be used >>> which >>> forced a byte-wise copy. Provide real swap functions which the compiler can >>> optimise sensibly. >> I understand the theory, but none of the two calls are in hot paths or deal >> with large set on Arm. So I am rather hesitant to introduce specialised swap >> in each case as it doesn't seem we will gain much from this change. > While I like Andrew's optimization, like Julien, I would also rather not > have to introduce specialized swap functions any time a sort() is > called. Why not keeping around generic_swap as extern gnu_inline in > sort.h as well so that the ARM callers can simply: > > sort(bootinfo.mem.bank, bootinfo.mem.nr_banks, sizeof(struct membank), > cmp_memory_node, generic_swap); > > ? > > That looks like a good option, although it would still result in a > small increase in bloat. That is basically what Jan suggested. I'm still unconvinced that you actually want to be doing byte-wise swapping, even if it isn't a hotpath. A custom swap function will compile to less code than using generic_swap() like this, and run faster. ~Andrew
On 16/11/2021 00:41, Andrew Cooper wrote: > On 16/11/2021 00:36, Stefano Stabellini wrote: >> On Thu, 11 Nov 2021, Julien Grall wrote: >>> On 11/11/2021 17:57, Andrew Cooper wrote: >>>> There are exactly 3 callers of sort() in the hypervisor. >>>> >>>> Both arm callers pass in NULL for the swap function. While this might seem >>>> like an attractive option at first, it causes generic_swap() to be used >>>> which >>>> forced a byte-wise copy. Provide real swap functions which the compiler can >>>> optimise sensibly. >>> I understand the theory, but none of the two calls are in hot paths or deal >>> with large set on Arm. So I am rather hesitant to introduce specialised swap >>> in each case as it doesn't seem we will gain much from this change. >> While I like Andrew's optimization, like Julien, I would also rather not >> have to introduce specialized swap functions any time a sort() is >> called. Why not keeping around generic_swap as extern gnu_inline in >> sort.h as well so that the ARM callers can simply: >> >> sort(bootinfo.mem.bank, bootinfo.mem.nr_banks, sizeof(struct membank), >> cmp_memory_node, generic_swap); >> >> ? >> >> That looks like a good option, although it would still result in a >> small increase in bloat. > That is basically what Jan suggested. > > I'm still unconvinced that you actually want to be doing byte-wise > swapping, even if it isn't a hotpath. A custom swap function will > compile to less code than using generic_swap() like this, and run faster. ARM Ping. I really think this is a bad course of action. If you're going to insist on it, I want something in writing. ~Andrew
Hi Andrew, On 17/12/2021 15:56, Andrew Cooper wrote: > On 16/11/2021 00:41, Andrew Cooper wrote: >> On 16/11/2021 00:36, Stefano Stabellini wrote: >>> On Thu, 11 Nov 2021, Julien Grall wrote: >>>> On 11/11/2021 17:57, Andrew Cooper wrote: >>>>> There are exactly 3 callers of sort() in the hypervisor. >>>>> >>>>> Both arm callers pass in NULL for the swap function. While this might seem >>>>> like an attractive option at first, it causes generic_swap() to be used >>>>> which >>>>> forced a byte-wise copy. Provide real swap functions which the compiler can >>>>> optimise sensibly. >>>> I understand the theory, but none of the two calls are in hot paths or deal >>>> with large set on Arm. So I am rather hesitant to introduce specialised swap >>>> in each case as it doesn't seem we will gain much from this change. >>> While I like Andrew's optimization, like Julien, I would also rather not >>> have to introduce specialized swap functions any time a sort() is >>> called. Why not keeping around generic_swap as extern gnu_inline in >>> sort.h as well so that the ARM callers can simply: >>> >>> sort(bootinfo.mem.bank, bootinfo.mem.nr_banks, sizeof(struct membank), >>> cmp_memory_node, generic_swap); >>> >>> ? >>> >>> That looks like a good option, although it would still result in a >>> small increase in bloat. >> That is basically what Jan suggested. >> >> I'm still unconvinced that you actually want to be doing byte-wise >> swapping, even if it isn't a hotpath. A custom swap function will >> compile to less code than using generic_swap() like this, and run faster. > > ARM Ping. > > I really think this is a bad course of action. If you're going to > insist on it, I want something in writing. I agree with all the points you wrote. However, you also have to put into perspective how this is used. On Arm, the two callers happen either during boot a domain creation. Neither of them have a significant impact on the time spent. In fact, I would be surprised if you notice the change. So unless there are other (good) reasons to suppress generic_swap(), I still want to be able to use generic_swap() when the performance doesn't matter (like the two Arm callers). Cheers, -- Julien Grall
© 2016 - 2026 Red Hat, Inc.