Use struct boot_module fields, start and size, when calculating the relocation
address and size. It also ensures that early_mod references are kept in sync.
Signed-off-by: Daniel P. Smith <dpsmith@apertussolutions.com>
---
Changes since v5:
- removed unnecessary paddr_to_pfn, allowing condition to collapse to one line
- correct a missed conversion from .mod->mod_start to .start
---
xen/arch/x86/setup.c | 38 +++++++++++++++++---------------------
1 file changed, 17 insertions(+), 21 deletions(-)
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index d51b0b03bc97..44cec48fc877 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1476,7 +1476,7 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
* move mod[0], we incorporate this as extra space at the start.
*/
struct boot_module *bm = &bi->mods[j];
- unsigned long size = PAGE_ALIGN(bm->headroom + bm->mod->mod_end);
+ unsigned long size = PAGE_ALIGN(bm->headroom + bm->size);
if ( bi->mods[j].flags & BOOTMOD_FLAG_X86_RELOCATED )
continue;
@@ -1488,15 +1488,13 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
if ( highmem_start && end > highmem_start )
continue;
- if ( s < end &&
- (bm->headroom ||
- ((end - size) >> PAGE_SHIFT) > bm->mod->mod_start) )
+ if ( s < end && (bm->headroom || (end - size) > bm->start) )
{
- move_memory(end - size + bm->headroom,
- (uint64_t)bm->mod->mod_start << PAGE_SHIFT,
- bm->mod->mod_end);
- bm->mod->mod_start = (end - size) >> PAGE_SHIFT;
- bm->mod->mod_end += bm->headroom;
+ move_memory(end - size + bm->headroom, bm->start, bm->size);
+ bm->start = (end - size);
+ bm->mod->mod_start = paddr_to_pfn(bm->start);
+ bm->size += bm->headroom;
+ bm->mod->mod_end = bm->size;
bm->flags |= BOOTMOD_FLAG_X86_RELOCATED;
}
}
@@ -1528,11 +1526,10 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
panic("Not enough memory to relocate the dom0 kernel image\n");
for ( i = 0; i < bi->nr_modules; ++i )
{
- uint64_t s = (uint64_t)bi->mods[i].mod->mod_start
- << PAGE_SHIFT;
+ uint64_t s = (uint64_t)bi->mods[i].start;
reserve_e820_ram(&boot_e820, s,
- s + PAGE_ALIGN(bi->mods[i].mod->mod_end));
+ s + PAGE_ALIGN(bi->mods[i].size));
}
if ( !xen_phys_start )
@@ -1610,9 +1607,8 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
map_e = boot_e820.map[j].addr + boot_e820.map[j].size;
for ( j = 0; j < bi->nr_modules; ++j )
{
- uint64_t end = pfn_to_paddr(
- bi->mods[j].mod->mod_start) +
- bi->mods[j].mod->mod_end;
+ uint64_t end = bi->mods[j].start +
+ bi->mods[j].size;
if ( map_e < end )
map_e = end;
@@ -1686,12 +1682,12 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
for ( i = 0; i < bi->nr_modules; ++i )
{
- set_pdx_range(bi->mods[i].mod->mod_start,
- bi->mods[i].mod->mod_start +
- PFN_UP(bi->mods[i].mod->mod_end));
- map_pages_to_xen((unsigned long)mfn_to_virt(bi->mods[i].mod->mod_start),
- _mfn(bi->mods[i].mod->mod_start),
- PFN_UP(bi->mods[i].mod->mod_end), PAGE_HYPERVISOR);
+ set_pdx_range(paddr_to_pfn(bi->mods[i].start),
+ paddr_to_pfn(bi->mods[i].start) +
+ PFN_UP(bi->mods[i].size));
+ map_pages_to_xen((unsigned long)maddr_to_virt(bi->mods[i].start),
+ maddr_to_mfn(bi->mods[i].start),
+ PFN_UP(bi->mods[i].size), PAGE_HYPERVISOR);
}
#ifdef CONFIG_KEXEC
--
2.30.2