Currently, per_cpu_offset(x) is defined as __per_cpu_offset[x])
only under CONFIG_SMP in include/asm-generic/percpu.h, that is
to say, the implementation of loongarch_common_resume() which
calls per_cpu_offset(0) is not suitable for the case of non-SMP,
so do not write per_cpu_offset(0) to PERCPU_BASE_KS when resume,
just save the value of PERCPU_BASE_KS when suspend and restore
it when resume to avoid compiling error for non-SMP and make it
work well for both SMP and non-SMP.
This is preparation for later patch. Tested with the command
"rtcwake -d rtc1 -s 20 -m mem", dmesg shows that "PM: suspend
entry (deep)" and "PM: suspend exit".
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
arch/loongarch/power/suspend.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
index 166d9e06a64b..8571818b93dc 100644
--- a/arch/loongarch/power/suspend.c
+++ b/arch/loongarch/power/suspend.c
@@ -24,6 +24,7 @@ struct saved_registers {
u64 kpgd;
u32 pwctl0;
u32 pwctl1;
+ u64 pcpu_base;
};
static struct saved_registers saved_regs;
@@ -36,6 +37,7 @@ void loongarch_common_suspend(void)
saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
+ saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
loongarch_suspend_addr = loongson_sysconf.suspend_addr;
}
@@ -44,7 +46,7 @@ void loongarch_common_resume(void)
{
sync_counter();
local_flush_tlb_all();
- csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
+ csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
--
2.42.0
Hi, Tiezhu,
On Wed, Apr 17, 2024 at 6:00 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>
> Currently, per_cpu_offset(x) is defined as __per_cpu_offset[x])
> only under CONFIG_SMP in include/asm-generic/percpu.h, that is
> to say, the implementation of loongarch_common_resume() which
> calls per_cpu_offset(0) is not suitable for the case of non-SMP,
> so do not write per_cpu_offset(0) to PERCPU_BASE_KS when resume,
> just save the value of PERCPU_BASE_KS when suspend and restore
> it when resume to avoid compiling error for non-SMP and make it
> work well for both SMP and non-SMP.
>
> This is preparation for later patch. Tested with the command
> "rtcwake -d rtc1 -s 20 -m mem", dmesg shows that "PM: suspend
> entry (deep)" and "PM: suspend exit".
This patch can be squashed to the 3rd patch.
Huacai
>
> Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
> ---
> arch/loongarch/power/suspend.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c
> index 166d9e06a64b..8571818b93dc 100644
> --- a/arch/loongarch/power/suspend.c
> +++ b/arch/loongarch/power/suspend.c
> @@ -24,6 +24,7 @@ struct saved_registers {
> u64 kpgd;
> u32 pwctl0;
> u32 pwctl1;
> + u64 pcpu_base;
> };
> static struct saved_registers saved_regs;
>
> @@ -36,6 +37,7 @@ void loongarch_common_suspend(void)
> saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1);
> saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG);
> saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN);
> + saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS);
>
> loongarch_suspend_addr = loongson_sysconf.suspend_addr;
> }
> @@ -44,7 +46,7 @@ void loongarch_common_resume(void)
> {
> sync_counter();
> local_flush_tlb_all();
> - csr_write64(per_cpu_offset(0), PERCPU_BASE_KS);
> + csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS);
> csr_write64(eentry, LOONGARCH_CSR_EENTRY);
> csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
> csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
> --
> 2.42.0
>
© 2016 - 2026 Red Hat, Inc.