[PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area

Pasha Tatashin posted 3 patches 3 months, 3 weeks ago
[PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Pasha Tatashin 3 months, 3 weeks ago
It is invalid for KHO metadata or preserved memory regions to be located
within the KHO scratch area, as this area is overwritten when the next
kernel is loaded, and used early in boot by the next kernel. This can
lead to memory corruption.

Adds checks to kho_preserve_* and KHO's internal metadata allocators
(xa_load_or_alloc, new_chunk) to verify that the physical address of the
memory does not overlap with any defined scratch region. If an overlap
is detected, the operation will fail and a WARN_ON is triggered. To
avoid performance overhead in production kernels, these checks are
enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
---
 kernel/Kconfig.kexec             |  9 ++++++
 kernel/Makefile                  |  1 +
 kernel/kexec_handover.c          | 53 ++++++++++++++++++++++----------
 kernel/kexec_handover_debug.c    | 25 +++++++++++++++
 kernel/kexec_handover_internal.h | 16 ++++++++++
 5 files changed, 87 insertions(+), 17 deletions(-)
 create mode 100644 kernel/kexec_handover_debug.c
 create mode 100644 kernel/kexec_handover_internal.h

diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
index 422270d64820..c94d36b5fcd9 100644
--- a/kernel/Kconfig.kexec
+++ b/kernel/Kconfig.kexec
@@ -109,6 +109,15 @@ config KEXEC_HANDOVER
 	  to keep data or state alive across the kexec. For this to work,
 	  both source and target kernels need to have this option enabled.
 
+config KEXEC_HANDOVER_DEBUG
+	bool "Enable Kexec Handover debug checks"
+	depends on KEXEC_HANDOVER_DEBUGFS
+	help
+	  This option enables extra sanity checks for the Kexec Handover
+	  subsystem. Since, KHO performance is crucial in live update
+	  scenarios and the extra code might be adding overhead it is
+	  only optionally enabled.
+
 config CRASH_DUMP
 	bool "kernel crash dumps"
 	default ARCH_DEFAULT_CRASH_DUMP
diff --git a/kernel/Makefile b/kernel/Makefile
index df3dd8291bb6..9fe722305c9b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_KEXEC) += kexec.o
 obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
 obj-$(CONFIG_KEXEC_ELF) += kexec_elf.o
 obj-$(CONFIG_KEXEC_HANDOVER) += kexec_handover.o
+obj-$(CONFIG_KEXEC_HANDOVER_DEBUG) += kexec_handover_debug.o
 obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
 obj-$(CONFIG_COMPAT) += compat.o
 obj-$(CONFIG_CGROUPS) += cgroup/
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index 76f0940fb485..7b460806ef4f 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -8,6 +8,7 @@
 
 #define pr_fmt(fmt) "KHO: " fmt
 
+#include <linux/cleanup.h>
 #include <linux/cma.h>
 #include <linux/count_zeros.h>
 #include <linux/debugfs.h>
@@ -22,6 +23,7 @@
 
 #include <asm/early_ioremap.h>
 
+#include "kexec_handover_internal.h"
 /*
  * KHO is tightly coupled with mm init and needs access to some of mm
  * internal APIs.
@@ -133,26 +135,26 @@ static struct kho_out kho_out = {
 
 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
 {
-	void *elm, *res;
+	void *res = xa_load(xa, index);
 
-	elm = xa_load(xa, index);
-	if (elm)
-		return elm;
+	if (res)
+		return res;
+
+	void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
 
-	elm = kzalloc(sz, GFP_KERNEL);
 	if (!elm)
 		return ERR_PTR(-ENOMEM);
 
+	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), sz)))
+		return ERR_PTR(-EINVAL);
+
 	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
 	if (xa_is_err(res))
-		res = ERR_PTR(xa_err(res));
-
-	if (res) {
-		kfree(elm);
+		return ERR_PTR(xa_err(res));
+	else if (res)
 		return res;
-	}
 
-	return elm;
+	return no_free_ptr(elm);
 }
 
 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
@@ -345,15 +347,19 @@ static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
 					  unsigned long order)
 {
-	struct khoser_mem_chunk *chunk;
+	struct khoser_mem_chunk *chunk __free(kfree) = NULL;
 
 	chunk = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!chunk)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
+
+	if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
+		return ERR_PTR(-EINVAL);
+
 	chunk->hdr.order = order;
 	if (cur_chunk)
 		KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
-	return chunk;
+	return no_free_ptr(chunk);
 }
 
 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
@@ -374,14 +380,17 @@ static int kho_mem_serialize(struct kho_serialization *ser)
 	struct khoser_mem_chunk *chunk = NULL;
 	struct kho_mem_phys *physxa;
 	unsigned long order;
+	int err = -ENOMEM;
 
 	xa_for_each(&ser->track.orders, order, physxa) {
 		struct kho_mem_phys_bits *bits;
 		unsigned long phys;
 
 		chunk = new_chunk(chunk, order);
-		if (!chunk)
+		if (IS_ERR(chunk)) {
+			err = PTR_ERR(chunk);
 			goto err_free;
+		}
 
 		if (!first_chunk)
 			first_chunk = chunk;
@@ -391,8 +400,10 @@ static int kho_mem_serialize(struct kho_serialization *ser)
 
 			if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
 				chunk = new_chunk(chunk, order);
-				if (!chunk)
+				if (IS_ERR(chunk)) {
+					err = PTR_ERR(chunk);
 					goto err_free;
+				}
 			}
 
 			elm = &chunk->bitmaps[chunk->hdr.num_elms];
@@ -409,7 +420,7 @@ static int kho_mem_serialize(struct kho_serialization *ser)
 
 err_free:
 	kho_mem_ser_free(first_chunk);
-	return -ENOMEM;
+	return err;
 }
 
 static void __init deserialize_bitmap(unsigned int order,
@@ -752,6 +763,9 @@ int kho_preserve_folio(struct folio *folio)
 	const unsigned int order = folio_order(folio);
 	struct kho_mem_track *track = &kho_out.ser.track;
 
+	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
+		return -EINVAL;
+
 	return __kho_preserve_order(track, pfn, order);
 }
 EXPORT_SYMBOL_GPL(kho_preserve_folio);
@@ -775,6 +789,11 @@ int kho_preserve_pages(struct page *page, unsigned int nr_pages)
 	unsigned long failed_pfn = 0;
 	int err = 0;
 
+	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
+					nr_pages << PAGE_SHIFT))) {
+		return -EINVAL;
+	}
+
 	while (pfn < end_pfn) {
 		const unsigned int order =
 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
diff --git a/kernel/kexec_handover_debug.c b/kernel/kexec_handover_debug.c
new file mode 100644
index 000000000000..6efb696f5426
--- /dev/null
+++ b/kernel/kexec_handover_debug.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kexec_handover_debug.c - kexec handover optional debug functionality
+ * Copyright (C) 2025 Google LLC, Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#define pr_fmt(fmt) "KHO: " fmt
+
+#include "kexec_handover_internal.h"
+
+bool kho_scratch_overlap(phys_addr_t phys, size_t size)
+{
+	phys_addr_t scratch_start, scratch_end;
+	unsigned int i;
+
+	for (i = 0; i < kho_scratch_cnt; i++) {
+		scratch_start = kho_scratch[i].addr;
+		scratch_end = kho_scratch[i].addr + kho_scratch[i].size;
+
+		if (phys < scratch_end && (phys + size) > scratch_start)
+			return true;
+	}
+
+	return false;
+}
diff --git a/kernel/kexec_handover_internal.h b/kernel/kexec_handover_internal.h
new file mode 100644
index 000000000000..05e9720ba7b9
--- /dev/null
+++ b/kernel/kexec_handover_internal.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_KEXEC_HANDOVER_INTERNAL_H
+#define LINUX_KEXEC_HANDOVER_INTERNAL_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KEXEC_HANDOVER_DEBUG
+bool kho_scratch_overlap(phys_addr_t phys, size_t size);
+#else
+static inline bool kho_scratch_overlap(phys_addr_t phys, size_t size)
+{
+	return false;
+}
+#endif /* CONFIG_KEXEC_HANDOVER_DEBUG */
+
+#endif /* LINUX_KEXEC_HANDOVER_INTERNAL_H */
-- 
2.51.0.869.ge66316f041-goog
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Mike Rapoport 3 months, 2 weeks ago
Hi Pasha,

On Mon, Oct 20, 2025 at 08:08:50PM -0400, Pasha Tatashin wrote:
> It is invalid for KHO metadata or preserved memory regions to be located
> within the KHO scratch area, as this area is overwritten when the next
> kernel is loaded, and used early in boot by the next kernel. This can
> lead to memory corruption.
> 
> Adds checks to kho_preserve_* and KHO's internal metadata allocators
> (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> memory does not overlap with any defined scratch region. If an overlap
> is detected, the operation will fail and a WARN_ON is triggered. To
> avoid performance overhead in production kernels, these checks are
> enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
> 
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
>  kernel/Kconfig.kexec             |  9 ++++++
>  kernel/Makefile                  |  1 +
>  kernel/kexec_handover.c          | 53 ++++++++++++++++++++++----------
>  kernel/kexec_handover_debug.c    | 25 +++++++++++++++
>  kernel/kexec_handover_internal.h | 16 ++++++++++
>  5 files changed, 87 insertions(+), 17 deletions(-)
>  create mode 100644 kernel/kexec_handover_debug.c
>  create mode 100644 kernel/kexec_handover_internal.h
> 
> diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
> index 422270d64820..c94d36b5fcd9 100644
> --- a/kernel/Kconfig.kexec
> +++ b/kernel/Kconfig.kexec
> @@ -109,6 +109,15 @@ config KEXEC_HANDOVER
>  	  to keep data or state alive across the kexec. For this to work,
>  	  both source and target kernels need to have this option enabled.
>  
> +config KEXEC_HANDOVER_DEBUG
> +	bool "Enable Kexec Handover debug checks"
> +	depends on KEXEC_HANDOVER_DEBUGFS

I missed that in the earlier review, should be "depends on KEXEC_HANDOVER"

@Andrew, can you please fold this into what's now commit 0e0faeffd144
("kho: warn and fail on metadata or preserved memory in scratch area")

diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
index c94d36b5fcd9..54e581072617 100644
--- a/kernel/Kconfig.kexec
+++ b/kernel/Kconfig.kexec
@@ -111,7 +111,7 @@ config KEXEC_HANDOVER
 
 config KEXEC_HANDOVER_DEBUG
 	bool "Enable Kexec Handover debug checks"
-	depends on KEXEC_HANDOVER_DEBUGFS
+	depends on KEXEC_HANDOVER
 	help
 	  This option enables extra sanity checks for the Kexec Handover
 	  subsystem. Since, KHO performance is crucial in live update

> +	help
> +	  This option enables extra sanity checks for the Kexec Handover
> +	  subsystem. Since, KHO performance is crucial in live update
> +	  scenarios and the extra code might be adding overhead it is
> +	  only optionally enabled.
> +
>  config CRASH_DUMP
>  	bool "kernel crash dumps"
>  	default ARCH_DEFAULT_CRASH_DUMP

-- 
Sincerely yours,
Mike.
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Pasha Tatashin 3 months, 2 weeks ago
Hi Andrew,

Would you like me to resend the series with the "+       depends on
KEXEC_HANDOVER" fix from Mike, or would you apply it into your tree
directly?

Thank you,
Pasha

On Wed, Oct 29, 2025 at 4:48 AM Mike Rapoport <rppt@kernel.org> wrote:
>
> Hi Pasha,
>
> On Mon, Oct 20, 2025 at 08:08:50PM -0400, Pasha Tatashin wrote:
> > It is invalid for KHO metadata or preserved memory regions to be located
> > within the KHO scratch area, as this area is overwritten when the next
> > kernel is loaded, and used early in boot by the next kernel. This can
> > lead to memory corruption.
> >
> > Adds checks to kho_preserve_* and KHO's internal metadata allocators
> > (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> > memory does not overlap with any defined scratch region. If an overlap
> > is detected, the operation will fail and a WARN_ON is triggered. To
> > avoid performance overhead in production kernels, these checks are
> > enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
> >
> > Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> > ---
> >  kernel/Kconfig.kexec             |  9 ++++++
> >  kernel/Makefile                  |  1 +
> >  kernel/kexec_handover.c          | 53 ++++++++++++++++++++++----------
> >  kernel/kexec_handover_debug.c    | 25 +++++++++++++++
> >  kernel/kexec_handover_internal.h | 16 ++++++++++
> >  5 files changed, 87 insertions(+), 17 deletions(-)
> >  create mode 100644 kernel/kexec_handover_debug.c
> >  create mode 100644 kernel/kexec_handover_internal.h
> >
> > diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
> > index 422270d64820..c94d36b5fcd9 100644
> > --- a/kernel/Kconfig.kexec
> > +++ b/kernel/Kconfig.kexec
> > @@ -109,6 +109,15 @@ config KEXEC_HANDOVER
> >         to keep data or state alive across the kexec. For this to work,
> >         both source and target kernels need to have this option enabled.
> >
> > +config KEXEC_HANDOVER_DEBUG
> > +     bool "Enable Kexec Handover debug checks"
> > +     depends on KEXEC_HANDOVER_DEBUGFS
>
> I missed that in the earlier review, should be "depends on KEXEC_HANDOVER"
>
> @Andrew, can you please fold this into what's now commit 0e0faeffd144
> ("kho: warn and fail on metadata or preserved memory in scratch area")
>
> diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
> index c94d36b5fcd9..54e581072617 100644
> --- a/kernel/Kconfig.kexec
> +++ b/kernel/Kconfig.kexec
> @@ -111,7 +111,7 @@ config KEXEC_HANDOVER
>
>  config KEXEC_HANDOVER_DEBUG
>         bool "Enable Kexec Handover debug checks"
> -       depends on KEXEC_HANDOVER_DEBUGFS
> +       depends on KEXEC_HANDOVER
>         help
>           This option enables extra sanity checks for the Kexec Handover
>           subsystem. Since, KHO performance is crucial in live update
>
> > +     help
> > +       This option enables extra sanity checks for the Kexec Handover
> > +       subsystem. Since, KHO performance is crucial in live update
> > +       scenarios and the extra code might be adding overhead it is
> > +       only optionally enabled.
> > +
> >  config CRASH_DUMP
> >       bool "kernel crash dumps"
> >       default ARCH_DEFAULT_CRASH_DUMP
>
> --
> Sincerely yours,
> Mike.
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Andrew Morton 3 months, 1 week ago
On Wed, 29 Oct 2025 18:22:46 -0400 Pasha Tatashin <pasha.tatashin@soleen.com> wrote:

> Would you like me to resend the series with the "+       depends on
> KEXEC_HANDOVER" fix from Mike, or would you apply it into your tree
> directly?

Let's not be resending a patch series for a one-line fix!  I'll add
Mike's fix as a -fix patch.
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by David Matlack 3 months, 2 weeks ago
On Mon, Oct 20, 2025 at 5:08 PM Pasha Tatashin
<pasha.tatashin@soleen.com> wrote:
>
> It is invalid for KHO metadata or preserved memory regions to be located
> within the KHO scratch area, as this area is overwritten when the next
> kernel is loaded, and used early in boot by the next kernel. This can
> lead to memory corruption.
>
> Adds checks to kho_preserve_* and KHO's internal metadata allocators
> (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> memory does not overlap with any defined scratch region. If an overlap
> is detected, the operation will fail and a WARN_ON is triggered. To
> avoid performance overhead in production kernels, these checks are
> enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.

How many scratch regions are there in practice? Checking
unconditionally seems like a small price to pay to avoid possible
memory corruption. Especially since most KHO preservation should
happen while the VM is still running (so does not have to by
hyper-optimized).

>  static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
>  {
> -       void *elm, *res;
> +       void *res = xa_load(xa, index);
>
> -       elm = xa_load(xa, index);
> -       if (elm)
> -               return elm;
> +       if (res)
> +               return res;
> +
> +       void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);

nit: This breaks the local style of always declaring variables at the
beginning of blocks.
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Pasha Tatashin 3 months, 2 weeks ago
On Mon, Oct 27, 2025 at 6:29 PM David Matlack <dmatlack@google.com> wrote:
>
> On Mon, Oct 20, 2025 at 5:08 PM Pasha Tatashin
> <pasha.tatashin@soleen.com> wrote:
> >
> > It is invalid for KHO metadata or preserved memory regions to be located
> > within the KHO scratch area, as this area is overwritten when the next
> > kernel is loaded, and used early in boot by the next kernel. This can
> > lead to memory corruption.
> >
> > Adds checks to kho_preserve_* and KHO's internal metadata allocators
> > (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> > memory does not overlap with any defined scratch region. If an overlap
> > is detected, the operation will fail and a WARN_ON is triggered. To
> > avoid performance overhead in production kernels, these checks are
> > enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
>
> How many scratch regions are there in practice? Checking
> unconditionally seems like a small price to pay to avoid possible
> memory corruption. Especially since most KHO preservation should
> happen while the VM is still running (so does not have to by
> hyper-optimized).

The debug option can be enabled on production system as well, we have
some debug options enabled, but I do not see a reason to make this a
fixed cost that can add up; the runtime cost scares me, as we might be
using KHO preserve/unpreserve often once stateless KHO + slab
preservation is implemented during some allocations paths. Let's keep
it optional.

>
> >  static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
> >  {
> > -       void *elm, *res;
> > +       void *res = xa_load(xa, index);
> >
> > -       elm = xa_load(xa, index);
> > -       if (elm)
> > -               return elm;
> > +       if (res)
> > +               return res;
> > +
> > +       void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
>
> nit: This breaks the local style of always declaring variables at the
> beginning of blocks.

I think this suggestion came from Mike, to me it looks alright, as it
is only part of the clean-up path.

Pasha
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Jason Gunthorpe 3 months, 2 weeks ago
On Mon, Oct 27, 2025 at 08:01:59PM -0400, Pasha Tatashin wrote:
> > >  static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
> > >  {
> > > -       void *elm, *res;
> > > +       void *res = xa_load(xa, index);
> > >
> > > -       elm = xa_load(xa, index);
> > > -       if (elm)
> > > -               return elm;
> > > +       if (res)
> > > +               return res;
> > > +
> > > +       void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
> >
> > nit: This breaks the local style of always declaring variables at the
> > beginning of blocks.
> 
> I think this suggestion came from Mike, to me it looks alright, as it
> is only part of the clean-up path.

It is the recommended style for using cleanup.h stuff, declare and
assign in one statement.

Jason
Re: [PATCH v3 1/3] liveupdate: kho: warn and fail on metadata or preserved memory in scratch area
Posted by Pratyush Yadav 3 months, 3 weeks ago
On Mon, Oct 20 2025, Pasha Tatashin wrote:

> It is invalid for KHO metadata or preserved memory regions to be located
> within the KHO scratch area, as this area is overwritten when the next
> kernel is loaded, and used early in boot by the next kernel. This can
> lead to memory corruption.
>
> Adds checks to kho_preserve_* and KHO's internal metadata allocators
> (xa_load_or_alloc, new_chunk) to verify that the physical address of the
> memory does not overlap with any defined scratch region. If an overlap
> is detected, the operation will fail and a WARN_ON is triggered. To
> avoid performance overhead in production kernels, these checks are
> enabled only when CONFIG_KEXEC_HANDOVER_DEBUG is selected.
>
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
[...]
> @@ -133,26 +135,26 @@ static struct kho_out kho_out = {
>  
>  static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz)
>  {
> -	void *elm, *res;
> +	void *res = xa_load(xa, index);
>  
> -	elm = xa_load(xa, index);
> -	if (elm)
> -		return elm;
> +	if (res)
> +		return res;
> +
> +	void *elm __free(kfree) = kzalloc(sz, GFP_KERNEL);
>  
> -	elm = kzalloc(sz, GFP_KERNEL);
>  	if (!elm)
>  		return ERR_PTR(-ENOMEM);
>  
> +	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), sz)))
> +		return ERR_PTR(-EINVAL);
> +
>  	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
>  	if (xa_is_err(res))
> -		res = ERR_PTR(xa_err(res));
> -
> -	if (res) {
> -		kfree(elm);
> +		return ERR_PTR(xa_err(res));
> +	else if (res)
>  		return res;
> -	}
>  
> -	return elm;
> +	return no_free_ptr(elm);

Super small nit: there exists return_ptr(p) which is a tiny bit neater
IMO but certainly not worth doing a new revision over. So,

Reviewed-by: Pratyush Yadav <pratyush@kernel.org>

[...]

-- 
Regards,
Pratyush Yadav