kernel/kexec_core.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-)
Kexec does not accept segments for which the destination address is not
page aligned. Therefore there is no need for page offset handling when
loading segments.
Signed-off-by: Justinien Bouron <jbouron@amazon.com>
Reviewed-by: Gunnar Kudrjavets <gunnarku@amazon.com>
---
kernel/kexec_core.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 31203f0bacaf..7d4c9eebea79 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -761,9 +761,7 @@ static int kimage_load_cma_segment(struct kimage *image, int idx)
while (mbytes) {
size_t uchunk, mchunk;
- ptr += maddr & ~PAGE_MASK;
- mchunk = min_t(size_t, mbytes,
- PAGE_SIZE - (maddr & ~PAGE_MASK));
+ mchunk = min_t(size_t, mbytes, PAGE_SIZE);
uchunk = min(ubytes, mchunk);
if (uchunk) {
@@ -815,6 +813,7 @@ static int kimage_load_normal_segment(struct kimage *image, int idx)
mbytes = segment->memsz;
maddr = segment->mem;
+
if (image->segment_cma[idx])
return kimage_load_cma_segment(image, idx);
@@ -840,9 +839,7 @@ static int kimage_load_normal_segment(struct kimage *image, int idx)
ptr = kmap_local_page(page);
/* Start with a clear page */
clear_page(ptr);
- ptr += maddr & ~PAGE_MASK;
- mchunk = min_t(size_t, mbytes,
- PAGE_SIZE - (maddr & ~PAGE_MASK));
+ mchunk = min_t(size_t, mbytes, PAGE_SIZE);
uchunk = min(ubytes, mchunk);
if (uchunk) {
@@ -905,9 +902,7 @@ static int kimage_load_crash_segment(struct kimage *image, int idx)
}
arch_kexec_post_alloc_pages(page_address(page), 1, 0);
ptr = kmap_local_page(page);
- ptr += maddr & ~PAGE_MASK;
- mchunk = min_t(size_t, mbytes,
- PAGE_SIZE - (maddr & ~PAGE_MASK));
+ mchunk = min_t(size_t, mbytes, PAGE_SIZE);
uchunk = min(ubytes, mchunk);
if (mchunk > uchunk) {
/* Zero the trailing part of the page */
--
2.43.0
Hi Justinien,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on linus/master v6.17-rc5 next-20250911]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Justinien-Bouron/kexec_core-Remove-superfluous-page-offset-handling-in-segment-loading/20250911-003354
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250910163116.49148-1-jbouron%40amazon.com
patch subject: [PATCH] kexec_core: Remove superfluous page offset handling in segment loading
config: x86_64-buildonly-randconfig-004-20250911 (https://download.01.org/0day-ci/archive/20250911/202509112118.1NElSKNk-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250911/202509112118.1NElSKNk-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202509112118.1NElSKNk-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> kernel/kexec_core.c:745:16: warning: variable 'maddr' set but not used [-Wunused-but-set-variable]
745 | unsigned long maddr;
| ^
1 warning generated.
vim +/maddr +745 kernel/kexec_core.c
2965faa5e03d1e Dave Young 2015-09-09 739
07d24902977e47 Alexander Graf 2025-06-10 740 static int kimage_load_cma_segment(struct kimage *image, int idx)
07d24902977e47 Alexander Graf 2025-06-10 741 {
07d24902977e47 Alexander Graf 2025-06-10 742 struct kexec_segment *segment = &image->segment[idx];
07d24902977e47 Alexander Graf 2025-06-10 743 struct page *cma = image->segment_cma[idx];
07d24902977e47 Alexander Graf 2025-06-10 744 char *ptr = page_address(cma);
07d24902977e47 Alexander Graf 2025-06-10 @745 unsigned long maddr;
07d24902977e47 Alexander Graf 2025-06-10 746 size_t ubytes, mbytes;
07d24902977e47 Alexander Graf 2025-06-10 747 int result = 0;
07d24902977e47 Alexander Graf 2025-06-10 748 unsigned char __user *buf = NULL;
07d24902977e47 Alexander Graf 2025-06-10 749 unsigned char *kbuf = NULL;
07d24902977e47 Alexander Graf 2025-06-10 750
07d24902977e47 Alexander Graf 2025-06-10 751 if (image->file_mode)
07d24902977e47 Alexander Graf 2025-06-10 752 kbuf = segment->kbuf;
07d24902977e47 Alexander Graf 2025-06-10 753 else
07d24902977e47 Alexander Graf 2025-06-10 754 buf = segment->buf;
07d24902977e47 Alexander Graf 2025-06-10 755 ubytes = segment->bufsz;
07d24902977e47 Alexander Graf 2025-06-10 756 mbytes = segment->memsz;
07d24902977e47 Alexander Graf 2025-06-10 757 maddr = segment->mem;
07d24902977e47 Alexander Graf 2025-06-10 758
07d24902977e47 Alexander Graf 2025-06-10 759 /* Then copy from source buffer to the CMA one */
07d24902977e47 Alexander Graf 2025-06-10 760 while (mbytes) {
07d24902977e47 Alexander Graf 2025-06-10 761 size_t uchunk, mchunk;
07d24902977e47 Alexander Graf 2025-06-10 762
b4cdefb6ef1453 Justinien Bouron 2025-09-10 763 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
07d24902977e47 Alexander Graf 2025-06-10 764 uchunk = min(ubytes, mchunk);
07d24902977e47 Alexander Graf 2025-06-10 765
07d24902977e47 Alexander Graf 2025-06-10 766 if (uchunk) {
07d24902977e47 Alexander Graf 2025-06-10 767 /* For file based kexec, source pages are in kernel memory */
07d24902977e47 Alexander Graf 2025-06-10 768 if (image->file_mode)
07d24902977e47 Alexander Graf 2025-06-10 769 memcpy(ptr, kbuf, uchunk);
07d24902977e47 Alexander Graf 2025-06-10 770 else
07d24902977e47 Alexander Graf 2025-06-10 771 result = copy_from_user(ptr, buf, uchunk);
07d24902977e47 Alexander Graf 2025-06-10 772 ubytes -= uchunk;
07d24902977e47 Alexander Graf 2025-06-10 773 if (image->file_mode)
07d24902977e47 Alexander Graf 2025-06-10 774 kbuf += uchunk;
07d24902977e47 Alexander Graf 2025-06-10 775 else
07d24902977e47 Alexander Graf 2025-06-10 776 buf += uchunk;
07d24902977e47 Alexander Graf 2025-06-10 777 }
07d24902977e47 Alexander Graf 2025-06-10 778
07d24902977e47 Alexander Graf 2025-06-10 779 if (result) {
07d24902977e47 Alexander Graf 2025-06-10 780 result = -EFAULT;
07d24902977e47 Alexander Graf 2025-06-10 781 goto out;
07d24902977e47 Alexander Graf 2025-06-10 782 }
07d24902977e47 Alexander Graf 2025-06-10 783
07d24902977e47 Alexander Graf 2025-06-10 784 ptr += mchunk;
07d24902977e47 Alexander Graf 2025-06-10 785 maddr += mchunk;
07d24902977e47 Alexander Graf 2025-06-10 786 mbytes -= mchunk;
07d24902977e47 Alexander Graf 2025-06-10 787
07d24902977e47 Alexander Graf 2025-06-10 788 cond_resched();
07d24902977e47 Alexander Graf 2025-06-10 789 }
07d24902977e47 Alexander Graf 2025-06-10 790
07d24902977e47 Alexander Graf 2025-06-10 791 /* Clear any remainder */
07d24902977e47 Alexander Graf 2025-06-10 792 memset(ptr, 0, mbytes);
07d24902977e47 Alexander Graf 2025-06-10 793
07d24902977e47 Alexander Graf 2025-06-10 794 out:
07d24902977e47 Alexander Graf 2025-06-10 795 return result;
07d24902977e47 Alexander Graf 2025-06-10 796 }
07d24902977e47 Alexander Graf 2025-06-10 797
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On 09/10/25 at 09:31am, Justinien Bouron wrote:
> Kexec does not accept segments for which the destination address is not
> page aligned. Therefore there is no need for page offset handling when
> loading segments.
Do you mean we will adjust the memsz and buf_align to PAGE_SIZE aligned
in kexec_add_buffer()? That better be explained in log.
int kexec_add_buffer(struct kexec_buf *kbuf)
{
......
/* Ensure minimum alignment needed for segments. */
kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
kbuf->cma = NULL;
......
}
>
> Signed-off-by: Justinien Bouron <jbouron@amazon.com>
> Reviewed-by: Gunnar Kudrjavets <gunnarku@amazon.com>
> ---
> kernel/kexec_core.c | 13 ++++---------
> 1 file changed, 4 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> index 31203f0bacaf..7d4c9eebea79 100644
> --- a/kernel/kexec_core.c
> +++ b/kernel/kexec_core.c
> @@ -761,9 +761,7 @@ static int kimage_load_cma_segment(struct kimage *image, int idx)
> while (mbytes) {
> size_t uchunk, mchunk;
>
> - ptr += maddr & ~PAGE_MASK;
> - mchunk = min_t(size_t, mbytes,
> - PAGE_SIZE - (maddr & ~PAGE_MASK));
> + mchunk = min_t(size_t, mbytes, PAGE_SIZE);
I am not so eager to remove it as keeping it makes a little sense on
defensive programming. Surely, I am not opposing it as it's truly not
necessary for now.
> uchunk = min(ubytes, mchunk);
>
> if (uchunk) {
> @@ -815,6 +813,7 @@ static int kimage_load_normal_segment(struct kimage *image, int idx)
> mbytes = segment->memsz;
> maddr = segment->mem;
>
> +
> if (image->segment_cma[idx])
> return kimage_load_cma_segment(image, idx);
>
> @@ -840,9 +839,7 @@ static int kimage_load_normal_segment(struct kimage *image, int idx)
> ptr = kmap_local_page(page);
> /* Start with a clear page */
> clear_page(ptr);
> - ptr += maddr & ~PAGE_MASK;
> - mchunk = min_t(size_t, mbytes,
> - PAGE_SIZE - (maddr & ~PAGE_MASK));
> + mchunk = min_t(size_t, mbytes, PAGE_SIZE);
> uchunk = min(ubytes, mchunk);
>
> if (uchunk) {
> @@ -905,9 +902,7 @@ static int kimage_load_crash_segment(struct kimage *image, int idx)
> }
> arch_kexec_post_alloc_pages(page_address(page), 1, 0);
> ptr = kmap_local_page(page);
> - ptr += maddr & ~PAGE_MASK;
> - mchunk = min_t(size_t, mbytes,
> - PAGE_SIZE - (maddr & ~PAGE_MASK));
> + mchunk = min_t(size_t, mbytes, PAGE_SIZE);
> uchunk = min(ubytes, mchunk);
> if (mchunk > uchunk) {
> /* Zero the trailing part of the page */
> --
> 2.43.0
>
>
On 9/11/25, 02:42, "Baoquan He" <bhe@redhat.com <mailto:bhe@redhat.com>> wrote:
> Do you mean we will adjust the memsz and buf_align to PAGE_SIZE aligned
> in kexec_add_buffer()?
That and mostly the fact that `sanity_check_segment_list()` explicitely rejects
any segment that either does not start or end on a page boundary:
int sanity_check_segment_list(struct kimage *image)
{
// ...
for (i = 0; i < nr_segments; i++) {
unsigned long mstart, mend;
mstart = image->segment[i].mem;
mend = mstart + image->segment[i].memsz;
if (mstart > mend)
return -EADDRNOTAVAIL;
if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
return -EADDRNOTAVAIL;
if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
return -EADDRNOTAVAIL;
}
// ...
}
> That better be explained in log.
Does it warrant a second revision to change the log?
Thanks,
Justinien
On 09/11/25 at 03:30pm, Bouron, Justinien wrote:
> On 9/11/25, 02:42, "Baoquan He" <bhe@redhat.com <mailto:bhe@redhat.com>> wrote:
> > Do you mean we will adjust the memsz and buf_align to PAGE_SIZE aligned
> > in kexec_add_buffer()?
> That and mostly the fact that `sanity_check_segment_list()` explicitely rejects
> any segment that either does not start or end on a page boundary:
Ah, yes. I missed this one.
>
> int sanity_check_segment_list(struct kimage *image)
> {
> // ...
> for (i = 0; i < nr_segments; i++) {
> unsigned long mstart, mend;
>
> mstart = image->segment[i].mem;
> mend = mstart + image->segment[i].memsz;
> if (mstart > mend)
> return -EADDRNOTAVAIL;
> if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
> return -EADDRNOTAVAIL;
> if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
> return -EADDRNOTAVAIL;
> }
> // ...
> }
>
> > That better be explained in log.
> Does it warrant a second revision to change the log?
I would appreciate it if we can tell the reason a little bit in patch
log. Because we have codes as below where people assign a non PAGE_SIZE
to kbuf.buf_align. With a general conclusion, people need explore code
to find out. At least that's what I do when I check this patch.
arch/x86/kernel/kexec-bzimage64.c:
static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned long kernel_len, char *initrd,
unsigned long initrd_len, char *cmdline,
unsigned long cmdline_len)
{
......
kbuf.buffer = params;
kbuf.memsz = kbuf.bufsz;
kbuf.buf_align = 16;
kbuf.buf_min = MIN_BOOTPARAM_ADDR;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
......
}
Thanks
Baoquan
On Fri, 12 Sep 2025 14:11:04 +0800 Baoquan He <bhe@redhat.com> wrote:
> > int sanity_check_segment_list(struct kimage *image)
> > {
> > // ...
> > for (i = 0; i < nr_segments; i++) {
> > unsigned long mstart, mend;
> >
> > mstart = image->segment[i].mem;
> > mend = mstart + image->segment[i].memsz;
> > if (mstart > mend)
> > return -EADDRNOTAVAIL;
> > if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
> > return -EADDRNOTAVAIL;
> > if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
> > return -EADDRNOTAVAIL;
> > }
> > // ...
> > }
> >
> > > That better be explained in log.
> > Does it warrant a second revision to change the log?
>
> I would appreciate it if we can tell the reason a little bit in patch
> log. Because we have codes as below where people assign a non PAGE_SIZE
> to kbuf.buf_align. With a general conclusion, people need explore code
> to find out. At least that's what I do when I check this patch.
>
> arch/x86/kernel/kexec-bzimage64.c:
> static void *bzImage64_load(struct kimage *image, char *kernel,
> unsigned long kernel_len, char *initrd,
> unsigned long initrd_len, char *cmdline,
> unsigned long cmdline_len)
> {
I'll drop this patch. Justinien, please send along an updated version
for the next -rc cycle, thanks.
© 2016 - 2026 Red Hat, Inc.