Use struct boot_module fields, start and size, when calculating the relocation
address and size. It also ensures that early_mod references are kept in sync.
Signed-off-by: Daniel P. Smith <dpsmith@apertussolutions.com>
---
Changes since v6:
- introduce local s and l refs for start and size, shortening the lines while
correcting the indentation style
Changes since v5:
- removed unnecessary paddr_to_pfn, allowing condition to collapse to one line
- correct a missed conversion from .mod->mod_start to .start
---
xen/arch/x86/setup.c | 34 ++++++++++++++--------------------
1 file changed, 14 insertions(+), 20 deletions(-)
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 4e5d4055e7dd..b02391b887b1 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1507,7 +1507,7 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
* move mod[0], we incorporate this as extra space at the start.
*/
struct boot_module *bm = &bi->mods[j];
- unsigned long size = PAGE_ALIGN(bm->headroom + bm->mod->mod_end);
+ unsigned long size = PAGE_ALIGN(bm->headroom + bm->size);
if ( bm->relocated )
continue;
@@ -1519,15 +1519,13 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
if ( highmem_start && end > highmem_start )
continue;
- if ( s < end &&
- (bm->headroom ||
- ((end - size) >> PAGE_SHIFT) > bm->mod->mod_start) )
+ if ( s < end && (bm->headroom || (end - size) > bm->start) )
{
- move_memory(end - size + bm->headroom,
- (uint64_t)bm->mod->mod_start << PAGE_SHIFT,
- bm->mod->mod_end);
- bm->mod->mod_start = (end - size) >> PAGE_SHIFT;
- bm->mod->mod_end += bm->headroom;
+ move_memory(end - size + bm->headroom, bm->start, bm->size);
+ bm->start = (end - size);
+ bm->mod->mod_start = paddr_to_pfn(bm->start);
+ bm->size += bm->headroom;
+ bm->mod->mod_end = bm->size;
bm->relocated = true;
}
}
@@ -1558,10 +1556,9 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
panic("Not enough memory to relocate the dom0 kernel image\n");
for ( i = 0; i < bi->nr_modules; ++i )
{
- uint64_t s = (uint64_t)bi->mods[i].mod->mod_start << PAGE_SHIFT;
+ uint64_t s = bi->mods[i].start;
- reserve_e820_ram(&boot_e820, s,
- s + PAGE_ALIGN(bi->mods[i].mod->mod_end));
+ reserve_e820_ram(&boot_e820, s, s + PAGE_ALIGN(bi->mods[i].size));
}
if ( !xen_phys_start )
@@ -1639,8 +1636,7 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
map_e = boot_e820.map[j].addr + boot_e820.map[j].size;
for ( j = 0; j < bi->nr_modules; ++j )
{
- uint64_t end = pfn_to_paddr(bi->mods[j].mod->mod_start) +
- bi->mods[j].mod->mod_end;
+ uint64_t end = bi->mods[j].start + bi->mods[j].size;
if ( map_e < end )
map_e = end;
@@ -1714,12 +1710,10 @@ void asmlinkage __init noreturn __start_xen(unsigned long mbi_p)
for ( i = 0; i < bi->nr_modules; ++i )
{
- set_pdx_range(bi->mods[i].mod->mod_start,
- bi->mods[i].mod->mod_start +
- PFN_UP(bi->mods[i].mod->mod_end));
- map_pages_to_xen((unsigned long)mfn_to_virt(bi->mods[i].mod->mod_start),
- _mfn(bi->mods[i].mod->mod_start),
- PFN_UP(bi->mods[i].mod->mod_end), PAGE_HYPERVISOR);
+ unsigned long s = bi->mods[i].start, l = bi->mods[i].size;
+ set_pdx_range(paddr_to_pfn(s), paddr_to_pfn(s) + PFN_UP(l));
+ map_pages_to_xen((unsigned long)maddr_to_virt(s), maddr_to_mfn(s),
+ PFN_UP(l), PAGE_HYPERVISOR);
}
#ifdef CONFIG_KEXEC
--
2.30.2