Except for switching LINK_TO_LOAD() to virt_to_maddr(),
LINK_TO_LOAD() is dropped, as virt_to_maddr() covers all
the cases where LINK_TO_LOAD() is used.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
xen/arch/riscv/mm.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c
index 7a1919e07e..59da61716c 100644
--- a/xen/arch/riscv/mm.c
+++ b/xen/arch/riscv/mm.c
@@ -29,7 +29,6 @@ struct mmu_desc {
unsigned long __ro_after_init phys_offset;
#define LOAD_TO_LINK(addr) ((unsigned long)(addr) - phys_offset)
-#define LINK_TO_LOAD(addr) ((unsigned long)(addr) + phys_offset)
/*
* It is expected that Xen won't be more then 2 MB.
@@ -122,7 +121,7 @@ static void __init setup_initial_mapping(struct mmu_desc *mmu_desc,
unsigned long paddr = (page_addr - map_start) + pa_start;
unsigned int permissions = PTE_LEAF_DEFAULT;
unsigned long addr = is_identity_mapping
- ? page_addr : LINK_TO_LOAD(page_addr);
+ ? page_addr : virt_to_maddr(page_addr);
pte_t pte_to_be_written;
index = pt_index(0, page_addr);
@@ -225,7 +224,7 @@ void __init setup_fixmap_mappings(void)
BUG_ON(pte_is_valid(*pte));
- tmp = paddr_to_pte(LINK_TO_LOAD((unsigned long)&xen_fixmap), PTE_TABLE);
+ tmp = paddr_to_pte(virt_to_maddr(&xen_fixmap), PTE_TABLE);
write_pte(pte, tmp);
RISCV_FENCE(rw, rw);
@@ -312,7 +311,7 @@ void __init remove_identity_mapping(void)
pte_t *pgtbl;
unsigned int index, xen_index;
unsigned long ident_start =
- LINK_TO_LOAD(turn_on_mmu) & XEN_PT_LEVEL_MAP_MASK(0);
+ virt_to_maddr((unsigned long)turn_on_mmu) & XEN_PT_LEVEL_MAP_MASK(0);
for ( pgtbl = stage1_pgtbl_root, i = CONFIG_PAGING_LEVELS; i; i-- )
{
--
2.46.1
On 30.09.2024 17:08, Oleksii Kurochko wrote: > Except for switching LINK_TO_LOAD() to virt_to_maddr(), > LINK_TO_LOAD() is dropped, as virt_to_maddr() covers all > the cases where LINK_TO_LOAD() is used. Why "Except for ..."? I'm afraid I can'rt make sense of this. > @@ -225,7 +224,7 @@ void __init setup_fixmap_mappings(void) > > BUG_ON(pte_is_valid(*pte)); > > - tmp = paddr_to_pte(LINK_TO_LOAD((unsigned long)&xen_fixmap), PTE_TABLE); > + tmp = paddr_to_pte(virt_to_maddr(&xen_fixmap), PTE_TABLE); Just like you don't open-code a cast here, ... > @@ -312,7 +311,7 @@ void __init remove_identity_mapping(void) > pte_t *pgtbl; > unsigned int index, xen_index; > unsigned long ident_start = > - LINK_TO_LOAD(turn_on_mmu) & XEN_PT_LEVEL_MAP_MASK(0); > + virt_to_maddr((unsigned long)turn_on_mmu) & XEN_PT_LEVEL_MAP_MASK(0); ... you shouldn't do so here. The virt_to_maddr() wrapper macro does so already. Jan
On Tue, 2024-10-01 at 17:44 +0200, Jan Beulich wrote: > On 30.09.2024 17:08, Oleksii Kurochko wrote: > > Except for switching LINK_TO_LOAD() to virt_to_maddr(), > > LINK_TO_LOAD() is dropped, as virt_to_maddr() covers all > > the cases where LINK_TO_LOAD() is used. > > Why "Except for ..."? I'm afraid I can'rt make sense of this. I will reword that for clarity: Use virt_to_maddr() instead of LINK_TO_LOAD as virt_to_maddr() covers all the cases where LINK_TO_LOAD() is used. Thanks. ~ Oleksii > > > @@ -225,7 +224,7 @@ void __init setup_fixmap_mappings(void) > > > > BUG_ON(pte_is_valid(*pte)); > > > > - tmp = paddr_to_pte(LINK_TO_LOAD((unsigned long)&xen_fixmap), > > PTE_TABLE); > > + tmp = paddr_to_pte(virt_to_maddr(&xen_fixmap), PTE_TABLE); > > Just like you don't open-code a cast here, ... > > > @@ -312,7 +311,7 @@ void __init remove_identity_mapping(void) > > pte_t *pgtbl; > > unsigned int index, xen_index; > > unsigned long ident_start = > > - LINK_TO_LOAD(turn_on_mmu) & XEN_PT_LEVEL_MAP_MASK(0); > > + virt_to_maddr((unsigned long)turn_on_mmu) & > > XEN_PT_LEVEL_MAP_MASK(0); > > ... you shouldn't do so here. The virt_to_maddr() wrapper macro does > so already. > > Jan
© 2016 - 2024 Red Hat, Inc.