Take care of the multiplication(s) involved in determining overall size
in the macros themselves, saturating to ULONG_MAX. This way on 64-bit
systems the subsequent check against UINT_MAX will fail, while on 32-
bit systems allocations of this size simply cannot be fulfilled anyway
(such an allocation would consume the entire address space).
The only place where we truly consume guest input (but constrained to
hwdom) is cpufreq_statistic_init(). Play safe however and convert the
other three instances where a multiplication is involved as well.
While touching those sites also switch to xv*alloc_array(), following
what was settled upon when those were introduced. Don't bother extending
x*alloc_array() the same way.
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
To my surprise code gen even improves a tiny bit for do_memory_op(), in
surrounding code (with gcc14).
---
v2: Drop (unused) logic for 3- and 4-dim arrays. Re-base.
--- a/xen/arch/x86/cpu/mcheck/mctelem.c
+++ b/xen/arch/x86/cpu/mcheck/mctelem.c
@@ -20,6 +20,7 @@
#include <xen/sched.h>
#include <xen/cpumask.h>
#include <xen/event.h>
+#include <xen/xvmalloc.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -340,7 +341,7 @@ void __init mctelem_init(unsigned int da
if ((mctctl.mctc_elems = xmalloc_array(struct mctelem_ent,
MC_NENT)) == NULL ||
- (datarr = xmalloc_bytes(MC_NENT * datasz)) == NULL) {
+ (datarr = xvmalloc_array(char, MC_NENT, datasz)) == NULL) {
xfree(mctctl.mctc_elems);
printk("Allocations for MCA telemetry failed\n");
return;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -26,6 +26,8 @@
#include <xen/hypercall.h>
#include <xen/vm_event.h>
#include <xen/monitor.h>
+#include <xen/xvmalloc.h>
+
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -160,7 +162,7 @@ void vnuma_destroy(struct vnuma_info *vn
{
xfree(vnuma->vmemrange);
xfree(vnuma->vcpu_to_vnode);
- xfree(vnuma->vdistance);
+ xvfree(vnuma->vdistance);
xfree(vnuma->vnode_to_pnode);
xfree(vnuma);
}
@@ -197,7 +199,7 @@ static struct vnuma_info *vnuma_alloc(un
if ( !vnuma )
return ERR_PTR(-ENOMEM);
- vnuma->vdistance = xmalloc_array(unsigned int, nr_vnodes * nr_vnodes);
+ vnuma->vdistance = xvmalloc_array(unsigned int, nr_vnodes, nr_vnodes);
vnuma->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus);
vnuma->vnode_to_pnode = xmalloc_array(nodeid_t, nr_vnodes);
vnuma->vmemrange = xmalloc_array(xen_vmemrange_t, nr_ranges);
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -26,6 +26,8 @@
#include <xen/sections.h>
#include <xen/trace.h>
#include <xen/types.h>
+#include <xen/xvmalloc.h>
+
#include <asm/current.h>
#include <asm/hardirq.h>
#include <asm/p2m.h>
@@ -1750,7 +1752,7 @@ long do_memory_op(unsigned long cmd, XEN
read_unlock(&d->vnuma_rwlock);
- tmp.vdistance = xmalloc_array(unsigned int, dom_vnodes * dom_vnodes);
+ tmp.vdistance = xvmalloc_array(unsigned int, dom_vnodes, dom_vnodes);
tmp.vmemrange = xmalloc_array(xen_vmemrange_t, dom_vranges);
tmp.vcpu_to_vnode = xmalloc_array(unsigned int, dom_vcpus);
@@ -1813,7 +1815,7 @@ long do_memory_op(unsigned long cmd, XEN
vnumainfo_out:
rcu_unlock_domain(d);
- xfree(tmp.vdistance);
+ xvfree(tmp.vdistance);
xfree(tmp.vmemrange);
xfree(tmp.vcpu_to_vnode);
break;
--- a/xen/drivers/acpi/pmstat.c
+++ b/xen/drivers/acpi/pmstat.c
@@ -36,6 +36,7 @@
#include <xen/percpu.h>
#include <xen/domain.h>
#include <xen/acpi.h>
+#include <xen/xvmalloc.h>
#include <public/sysctl.h>
#include <acpi/cpufreq/cpufreq.h>
@@ -125,7 +126,7 @@ int cpufreq_statistic_init(unsigned int
return -ENOMEM;
}
- pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
+ pxpt->u.trans_pt = xvzalloc_array(uint64_t, count, count);
if ( !pxpt->u.trans_pt )
{
xfree(pxpt);
@@ -136,7 +137,7 @@ int cpufreq_statistic_init(unsigned int
pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
if ( !pxpt->u.pt )
{
- xfree(pxpt->u.trans_pt);
+ xvfree(pxpt->u.trans_pt);
xfree(pxpt);
spin_unlock(cpufreq_statistic_lock);
return -ENOMEM;
@@ -172,7 +173,7 @@ void cpufreq_statistic_exit(unsigned int
return;
}
- xfree(pxpt->u.trans_pt);
+ xvfree(pxpt->u.trans_pt);
xfree(pxpt->u.pt);
xfree(pxpt);
per_cpu(cpufreq_statistic_data, cpu) = NULL;
--- a/xen/include/xen/xvmalloc.h
+++ b/xen/include/xen/xvmalloc.h
@@ -22,11 +22,21 @@
(typeof(*(ptr)) *)p_; \
})
+#define DIM_MUL1(n) (n)
+#define DIM_MUL2(n1, n2) ({ \
+ unsigned long res_; \
+ __builtin_umull_overflow(n1, n2, &res_) ? ULONG_MAX : res_; \
+})
+#define DIM_MUL_(n, nums...) DIM_MUL##n(nums)
+#define DIM_MUL(n, nums...) DIM_MUL_(n, ## nums)
+
/* Allocate space for array of typed objects. */
-#define xvmalloc_array(_type, _num) \
- ((_type *)_xvmalloc_array(sizeof(_type), __alignof__(_type), _num))
-#define xvzalloc_array(_type, _num) \
- ((_type *)_xvzalloc_array(sizeof(_type), __alignof__(_type), _num))
+#define xvmalloc_array(type, num, nums...) \
+ ((type *)_xvmalloc_array(sizeof(type), __alignof__(type), \
+ DIM_MUL(count_args(num, ## nums), num, ## nums)))
+#define xvzalloc_array(type, num, nums...) \
+ ((type *)_xvzalloc_array(sizeof(type), __alignof__(type), \
+ DIM_MUL(count_args(num, ## nums), num, ## nums)))
/* Allocate space for a structure with a flexible array of typed objects. */
#define xvzalloc_flex_struct(type, field, nr) \
Hi Jan, On 10/07/2025 12:02, Jan Beulich wrote: > Take care of the multiplication(s) involved in determining overall size > in the macros themselves, saturating to ULONG_MAX. This way on 64-bit > systems the subsequent check against UINT_MAX will fail, while on 32- > bit systems allocations of this size simply cannot be fulfilled anyway > (such an allocation would consume the entire address space). > > The only place where we truly consume guest input (but constrained to > hwdom) is cpufreq_statistic_init(). Play safe however and convert the > other three instances where a multiplication is involved as well. > > While touching those sites also switch to xv*alloc_array(), following > what was settled upon when those were introduced. Don't bother extending > x*alloc_array() the same way. > > Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Julien Grall <jgrall@amazon.com> Cheers, -- Julien Grall
© 2016 - 2025 Red Hat, Inc.