Unlike general architectures, there are two pages for one TLB entry
on LoongArch system. For kernel space, it requires both two pte
entries with PAGE_GLOBAL set, else HW treats it as non-global tlb,
there will be potential problems if tlb entry for kernel space is
not global. Such as fail to flush kernel tlb with function
local_flush_tlb_kernel_range() which only flush tlb with global bit.
Here function kernel_pte_init() is added, it can be used to init
pte table when it is created, so the default inital pte is
PAGE_GLOBAL rather than zero at beginning.
Kernel space areas includes fixmap, percpu, vmalloc and kasan areas
set default pte entry with PAGE_GLOBAL set.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
arch/loongarch/include/asm/pgalloc.h | 13 +++++++++++++
arch/loongarch/include/asm/pgtable.h | 1 +
arch/loongarch/mm/init.c | 4 +++-
arch/loongarch/mm/kasan_init.c | 4 +++-
arch/loongarch/mm/pgtable.c | 22 ++++++++++++++++++++++
5 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 4e2d6b7ca2ee..b2698c03dc2c 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -10,8 +10,21 @@
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ kernel_pte_init(pte);
+ return pte;
+}
+
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 9965f52ef65b..22e3a8f96213 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
extern void pgd_init(void *addr);
extern void pud_init(void *addr);
extern void pmd_init(void *addr);
+extern void kernel_pte_init(void *addr);
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 8a87a482c8f4..9f26e933a8a3 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -198,9 +198,11 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte;
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate memory\n", __func__);
+
+ kernel_pte_init(pte);
pmd_populate_kernel(&init_mm, pmd, pte);
}
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index 427d6b1aec09..34988573b0d5 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -152,6 +152,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
phys_addr_t page_phys = early ?
__pa_symbol(kasan_early_shadow_page)
: kasan_alloc_zeroed_page(node);
+ if (!early)
+ kernel_pte_init(__va(page_phys));
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
@@ -287,7 +289,7 @@ void __init kasan_init(void)
set_pte(&kasan_early_shadow_pte[i],
pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
- memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ kernel_pte_init(kasan_early_shadow_page);
csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
local_flush_tlb_all();
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index eb6a29b491a7..228ffc1db0a3 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -38,6 +38,28 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(pgd_alloc);
+void kernel_pte_init(void *addr)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+ entry = (unsigned long)_PAGE_GLOBAL;
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PTE;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+
void pgd_init(void *addr)
{
unsigned long *p, *end;
--
2.39.3
Hi, Bibo,
On Thu, Oct 10, 2024 at 11:50 AM Bibo Mao <maobibo@loongson.cn> wrote:
>
> Unlike general architectures, there are two pages for one TLB entry
> on LoongArch system. For kernel space, it requires both two pte
> entries with PAGE_GLOBAL set, else HW treats it as non-global tlb,
> there will be potential problems if tlb entry for kernel space is
> not global. Such as fail to flush kernel tlb with function
> local_flush_tlb_kernel_range() which only flush tlb with global bit.
>
> Here function kernel_pte_init() is added, it can be used to init
> pte table when it is created, so the default inital pte is
> PAGE_GLOBAL rather than zero at beginning.
I think kernel_pte_init() is also needed in zero_pmd_populate() in
mm/kasan/init.c. And moreover, the second patch should be squashed in
this one because they should be as a whole. Though the second one
touches the common code, I can merge it with mm maintainer's acked-by.
Huacai
>
> Kernel space areas includes fixmap, percpu, vmalloc and kasan areas
> set default pte entry with PAGE_GLOBAL set.
>
> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> ---
> arch/loongarch/include/asm/pgalloc.h | 13 +++++++++++++
> arch/loongarch/include/asm/pgtable.h | 1 +
> arch/loongarch/mm/init.c | 4 +++-
> arch/loongarch/mm/kasan_init.c | 4 +++-
> arch/loongarch/mm/pgtable.c | 22 ++++++++++++++++++++++
> 5 files changed, 42 insertions(+), 2 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
> index 4e2d6b7ca2ee..b2698c03dc2c 100644
> --- a/arch/loongarch/include/asm/pgalloc.h
> +++ b/arch/loongarch/include/asm/pgalloc.h
> @@ -10,8 +10,21 @@
>
> #define __HAVE_ARCH_PMD_ALLOC_ONE
> #define __HAVE_ARCH_PUD_ALLOC_ONE
> +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
> #include <asm-generic/pgalloc.h>
>
> +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
> +{
> + pte_t *pte;
> +
> + pte = (pte_t *) __get_free_page(GFP_KERNEL);
> + if (!pte)
> + return NULL;
> +
> + kernel_pte_init(pte);
> + return pte;
> +}
> +
> static inline void pmd_populate_kernel(struct mm_struct *mm,
> pmd_t *pmd, pte_t *pte)
> {
> diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
> index 9965f52ef65b..22e3a8f96213 100644
> --- a/arch/loongarch/include/asm/pgtable.h
> +++ b/arch/loongarch/include/asm/pgtable.h
> @@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
> extern void pgd_init(void *addr);
> extern void pud_init(void *addr);
> extern void pmd_init(void *addr);
> +extern void kernel_pte_init(void *addr);
>
> /*
> * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
> diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
> index 8a87a482c8f4..9f26e933a8a3 100644
> --- a/arch/loongarch/mm/init.c
> +++ b/arch/loongarch/mm/init.c
> @@ -198,9 +198,11 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
> if (!pmd_present(pmdp_get(pmd))) {
> pte_t *pte;
>
> - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> + pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
> if (!pte)
> panic("%s: Failed to allocate memory\n", __func__);
> +
> + kernel_pte_init(pte);
> pmd_populate_kernel(&init_mm, pmd, pte);
> }
>
> diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
> index 427d6b1aec09..34988573b0d5 100644
> --- a/arch/loongarch/mm/kasan_init.c
> +++ b/arch/loongarch/mm/kasan_init.c
> @@ -152,6 +152,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
> phys_addr_t page_phys = early ?
> __pa_symbol(kasan_early_shadow_page)
> : kasan_alloc_zeroed_page(node);
> + if (!early)
> + kernel_pte_init(__va(page_phys));
> next = addr + PAGE_SIZE;
> set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
> } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
> @@ -287,7 +289,7 @@ void __init kasan_init(void)
> set_pte(&kasan_early_shadow_pte[i],
> pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
>
> - memset(kasan_early_shadow_page, 0, PAGE_SIZE);
> + kernel_pte_init(kasan_early_shadow_page);
> csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
> local_flush_tlb_all();
>
> diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
> index eb6a29b491a7..228ffc1db0a3 100644
> --- a/arch/loongarch/mm/pgtable.c
> +++ b/arch/loongarch/mm/pgtable.c
> @@ -38,6 +38,28 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
> }
> EXPORT_SYMBOL_GPL(pgd_alloc);
>
> +void kernel_pte_init(void *addr)
> +{
> + unsigned long *p, *end;
> + unsigned long entry;
> +
> + entry = (unsigned long)_PAGE_GLOBAL;
> + p = (unsigned long *)addr;
> + end = p + PTRS_PER_PTE;
> +
> + do {
> + p[0] = entry;
> + p[1] = entry;
> + p[2] = entry;
> + p[3] = entry;
> + p[4] = entry;
> + p += 8;
> + p[-3] = entry;
> + p[-2] = entry;
> + p[-1] = entry;
> + } while (p != end);
> +}
> +
> void pgd_init(void *addr)
> {
> unsigned long *p, *end;
> --
> 2.39.3
>
Huacai,
On 2024/10/12 上午10:15, Huacai Chen wrote:
> Hi, Bibo,
>
> On Thu, Oct 10, 2024 at 11:50 AM Bibo Mao <maobibo@loongson.cn> wrote:
>>
>> Unlike general architectures, there are two pages for one TLB entry
>> on LoongArch system. For kernel space, it requires both two pte
>> entries with PAGE_GLOBAL set, else HW treats it as non-global tlb,
>> there will be potential problems if tlb entry for kernel space is
>> not global. Such as fail to flush kernel tlb with function
>> local_flush_tlb_kernel_range() which only flush tlb with global bit.
>>
>> Here function kernel_pte_init() is added, it can be used to init
>> pte table when it is created, so the default inital pte is
>> PAGE_GLOBAL rather than zero at beginning.
> I think kernel_pte_init() is also needed in zero_pmd_populate() in
> mm/kasan/init.c. And moreover, the second patch should be squashed in
yes, it is needed in zero_pmd_populate() in mm/kasan/init.c, will add it
in next version.
> this one because they should be as a whole. Though the second one
> touches the common code, I can merge it with mm maintainer's acked-by.
Sure, will merge it with the second one into one patch.
Regards
Bibo Mao
>
>
> Huacai
>
>>
>> Kernel space areas includes fixmap, percpu, vmalloc and kasan areas
>> set default pte entry with PAGE_GLOBAL set.
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>> arch/loongarch/include/asm/pgalloc.h | 13 +++++++++++++
>> arch/loongarch/include/asm/pgtable.h | 1 +
>> arch/loongarch/mm/init.c | 4 +++-
>> arch/loongarch/mm/kasan_init.c | 4 +++-
>> arch/loongarch/mm/pgtable.c | 22 ++++++++++++++++++++++
>> 5 files changed, 42 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
>> index 4e2d6b7ca2ee..b2698c03dc2c 100644
>> --- a/arch/loongarch/include/asm/pgalloc.h
>> +++ b/arch/loongarch/include/asm/pgalloc.h
>> @@ -10,8 +10,21 @@
>>
>> #define __HAVE_ARCH_PMD_ALLOC_ONE
>> #define __HAVE_ARCH_PUD_ALLOC_ONE
>> +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
>> #include <asm-generic/pgalloc.h>
>>
>> +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
>> +{
>> + pte_t *pte;
>> +
>> + pte = (pte_t *) __get_free_page(GFP_KERNEL);
>> + if (!pte)
>> + return NULL;
>> +
>> + kernel_pte_init(pte);
>> + return pte;
>> +}
>> +
>> static inline void pmd_populate_kernel(struct mm_struct *mm,
>> pmd_t *pmd, pte_t *pte)
>> {
>> diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
>> index 9965f52ef65b..22e3a8f96213 100644
>> --- a/arch/loongarch/include/asm/pgtable.h
>> +++ b/arch/loongarch/include/asm/pgtable.h
>> @@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm
>> extern void pgd_init(void *addr);
>> extern void pud_init(void *addr);
>> extern void pmd_init(void *addr);
>> +extern void kernel_pte_init(void *addr);
>>
>> /*
>> * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
>> diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
>> index 8a87a482c8f4..9f26e933a8a3 100644
>> --- a/arch/loongarch/mm/init.c
>> +++ b/arch/loongarch/mm/init.c
>> @@ -198,9 +198,11 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
>> if (!pmd_present(pmdp_get(pmd))) {
>> pte_t *pte;
>>
>> - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> + pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
>> if (!pte)
>> panic("%s: Failed to allocate memory\n", __func__);
>> +
>> + kernel_pte_init(pte);
>> pmd_populate_kernel(&init_mm, pmd, pte);
>> }
>>
>> diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
>> index 427d6b1aec09..34988573b0d5 100644
>> --- a/arch/loongarch/mm/kasan_init.c
>> +++ b/arch/loongarch/mm/kasan_init.c
>> @@ -152,6 +152,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
>> phys_addr_t page_phys = early ?
>> __pa_symbol(kasan_early_shadow_page)
>> : kasan_alloc_zeroed_page(node);
>> + if (!early)
>> + kernel_pte_init(__va(page_phys));
>> next = addr + PAGE_SIZE;
>> set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
>> } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
>> @@ -287,7 +289,7 @@ void __init kasan_init(void)
>> set_pte(&kasan_early_shadow_pte[i],
>> pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
>>
>> - memset(kasan_early_shadow_page, 0, PAGE_SIZE);
>> + kernel_pte_init(kasan_early_shadow_page);
>> csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
>> local_flush_tlb_all();
>>
>> diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
>> index eb6a29b491a7..228ffc1db0a3 100644
>> --- a/arch/loongarch/mm/pgtable.c
>> +++ b/arch/loongarch/mm/pgtable.c
>> @@ -38,6 +38,28 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
>> }
>> EXPORT_SYMBOL_GPL(pgd_alloc);
>>
>> +void kernel_pte_init(void *addr)
>> +{
>> + unsigned long *p, *end;
>> + unsigned long entry;
>> +
>> + entry = (unsigned long)_PAGE_GLOBAL;
>> + p = (unsigned long *)addr;
>> + end = p + PTRS_PER_PTE;
>> +
>> + do {
>> + p[0] = entry;
>> + p[1] = entry;
>> + p[2] = entry;
>> + p[3] = entry;
>> + p[4] = entry;
>> + p += 8;
>> + p[-3] = entry;
>> + p[-2] = entry;
>> + p[-1] = entry;
>> + } while (p != end);
>> +}
>> +
>> void pgd_init(void *addr)
>> {
>> unsigned long *p, *end;
>> --
>> 2.39.3
>>
© 2016 - 2026 Red Hat, Inc.