From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D7AEB337BAF; Thu, 27 Nov 2025 15:49:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258573; cv=none; b=N6yyHeGmB5ykrqAiZTvRs/KIS7tnzUMwV/ceVSg4u5YoHvtzW3UEQF0zQCcTgm/oiSQNQw7MwZ4L4b16bVQF7czOZoPO+E8pfnmQqLak2XPH+cT54p7UgZuk7B4dCZvSOk3dr64jYtWNHDHOKSNuLvARGSP4pUAWxbxiwiI1fY8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258573; c=relaxed/simple; bh=F8slmda6vCjyF6xh0edwI5ZDt99WD9OHTx/+q2HuiJI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=MSysD0AQUqzQfDW7t8AGLJVkcfOT86OBXXMuU/qmamh5u80U+EzL4dnUvAGGzjXgwB0alV7LBuTMMFPlYZT7Qd/kIlOYWTf6rj9bOClFfZAO4l6NvUVOpJfnnRbIhPvskXNdgD4cL0bsx0qC/8Q2EzBYGBkk08v4tz7WIikB/PA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 09685C4CEF8; Thu, 27 Nov 2025 15:49:30 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 01/14] LoongArch: Add atomic operations for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:19 +0800 Message-ID: <20251127154832.137925-2-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" LoongArch64 has both AMO and LL/SC instructions, while LoongArch32 only has LL/SC intstructions. So we add a Kconfig option CPU_HAS_AMO here and implement atomic operations (also including local operations and percpu operations) for both 32BIT and 64BIT platforms. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 4 + arch/loongarch/include/asm/atomic-amo.h | 206 +++++++++++++++++++++++ arch/loongarch/include/asm/atomic-llsc.h | 100 +++++++++++ arch/loongarch/include/asm/atomic.h | 197 ++-------------------- arch/loongarch/include/asm/cmpxchg.h | 48 ++++-- arch/loongarch/include/asm/local.h | 37 ++++ arch/loongarch/include/asm/percpu.h | 40 +++-- 7 files changed, 413 insertions(+), 219 deletions(-) create mode 100644 arch/loongarch/include/asm/atomic-amo.h create mode 100644 arch/loongarch/include/asm/atomic-llsc.h diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index a672f689cb03..730f34214519 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -568,6 +568,10 @@ config ARCH_STRICT_ALIGN to run kernel only on systems with h/w unaligned access support in order to optimise for performance. =20 +config CPU_HAS_AMO + bool + default 64BIT + config CPU_HAS_FPU bool default y diff --git a/arch/loongarch/include/asm/atomic-amo.h b/arch/loongarch/inclu= de/asm/atomic-amo.h new file mode 100644 index 000000000000..d5efa5252d56 --- /dev/null +++ b/arch/loongarch/include/asm/atomic-amo.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations (AMO). + * + * Copyright (C) 2020-2025 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_ATOMIC_AMO_H +#define _ASM_ATOMIC_AMO_H + +#include +#include +#include + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op".w" " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=3D&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=3D&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, I, asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC_OPS(add, i, add, +) +ATOMIC_OPS(sub, -i, add, +) + +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#ifdef CONFIG_64BIT + +#define ATOMIC64_OP(op, I, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op".d " " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t = *v) \ +{ \ + long result; \ + __asm__ __volatile__( \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=3D&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v)= \ +{ \ + long result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=3D&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC64_OPS(op, I, asm_op, c_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC64_OPS(add, i, add, +) +ATOMIC64_OPS(sub, -i, add, +) + +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS + +#define ATOMIC64_OPS(op, I, asm_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC64_OPS(and, i, and) +ATOMIC64_OPS(or, i, or) +ATOMIC64_OPS(xor, i, xor) + +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +#endif + +#endif /* _ASM_ATOMIC_AMO_H */ diff --git a/arch/loongarch/include/asm/atomic-llsc.h b/arch/loongarch/incl= ude/asm/atomic-llsc.h new file mode 100644 index 000000000000..b4f5670b85cf --- /dev/null +++ b/arch/loongarch/include/asm/atomic-llsc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations (LLSC). + * + * Copyright (C) 2024-2025 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_ATOMIC_LLSC_H +#define _ASM_ATOMIC_LLSC_H + +#include +#include +#include + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + int temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %0, %1 #atomic_" #op " \n" \ + " " #asm_op " %0, %0, %2 \n" \ + " sc.w %0, %1 \n" \ + " beq %0, $r0, 1b \n" \ + :"=3D&r" (temp) , "+ZB"(v->counter) \ + :"r" (I) \ + ); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int result, temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc.w %0, %2 \n" \ + " beq %0, $r0 ,1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + : "=3D&r" (result), "=3D&r" (temp), "+ZB"(v->counter) \ + : "r" (I)); \ + \ + return result; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int result, temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %1, %2 # atomic_fetch_" #op " \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc.w %0, %2 \n" \ + " beq %0, $r0 ,1b \n" \ + " add.w %0, %1 ,$r0 \n" \ + : "=3D&r" (result), "=3D&r" (temp), "+ZB" (v->counter) \ + : "r" (I)); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op,I ,asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I , asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(add, i , add.w ,+=3D) +ATOMIC_OPS(sub, -i , add.w ,+=3D) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#ifdef CONFIG_64BIT +#error "64-bit LLSC atomic operations are not supported" +#endif + +#endif /* _ASM_ATOMIC_LLSC_H */ diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/a= sm/atomic.h index c86f0ab922ec..444b9ddcd004 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -11,6 +11,16 @@ #include #include =20 +#ifdef CONFIG_CPU_HAS_AMO +#include +#else +#include +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include +#endif + #if __SIZEOF_LONG__ =3D=3D 4 #define __LL "ll.w " #define __SC "sc.w " @@ -34,100 +44,6 @@ #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) =20 -#define ATOMIC_OP(op, I, asm_op) \ -static inline void arch_atomic_##op(int i, atomic_t *v) \ -{ \ - __asm__ __volatile__( \ - "am"#asm_op".w" " $zero, %1, %0 \n" \ - : "+ZB" (v->counter) \ - : "r" (I) \ - : "memory"); \ -} - -#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ -static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ -{ \ - int result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".w" " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=3D&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result c_op I; \ -} - -#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ -static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ -{ \ - int result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".w" " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=3D&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result; \ -} - -#define ATOMIC_OPS(op, I, asm_op, c_op) \ - ATOMIC_OP(op, I, asm_op) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ - ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC_OPS(add, i, add, +) -ATOMIC_OPS(sub, -i, add, +) - -#define arch_atomic_add_return arch_atomic_add_return -#define arch_atomic_add_return_acquire arch_atomic_add_return -#define arch_atomic_add_return_release arch_atomic_add_return -#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed -#define arch_atomic_sub_return arch_atomic_sub_return -#define arch_atomic_sub_return_acquire arch_atomic_sub_return -#define arch_atomic_sub_return_release arch_atomic_sub_return -#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed -#define arch_atomic_fetch_add arch_atomic_fetch_add -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add -#define arch_atomic_fetch_add_release arch_atomic_fetch_add -#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed -#define arch_atomic_fetch_sub arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed - -#undef ATOMIC_OPS - -#define ATOMIC_OPS(op, I, asm_op) \ - ATOMIC_OP(op, I, asm_op) \ - ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC_OPS(and, i, and) -ATOMIC_OPS(or, i, or) -ATOMIC_OPS(xor, i, xor) - -#define arch_atomic_fetch_and arch_atomic_fetch_and -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and -#define arch_atomic_fetch_and_release arch_atomic_fetch_and -#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed -#define arch_atomic_fetch_or arch_atomic_fetch_or -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or -#define arch_atomic_fetch_or_release arch_atomic_fetch_or -#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed -#define arch_atomic_fetch_xor arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed - -#undef ATOMIC_OPS -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; @@ -194,99 +110,6 @@ static inline int arch_atomic_sub_if_positive(int i, a= tomic_t *v) #define arch_atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) =20 -#define ATOMIC64_OP(op, I, asm_op) \ -static inline void arch_atomic64_##op(long i, atomic64_t *v) \ -{ \ - __asm__ __volatile__( \ - "am"#asm_op".d " " $zero, %1, %0 \n" \ - : "+ZB" (v->counter) \ - : "r" (I) \ - : "memory"); \ -} - -#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ -static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t = *v) \ -{ \ - long result; \ - __asm__ __volatile__( \ - "am"#asm_op#mb".d " " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=3D&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result c_op I; \ -} - -#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ -static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v)= \ -{ \ - long result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".d " " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=3D&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result; \ -} - -#define ATOMIC64_OPS(op, I, asm_op, c_op) \ - ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ - ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC64_OPS(add, i, add, +) -ATOMIC64_OPS(sub, -i, add, +) - -#define arch_atomic64_add_return arch_atomic64_add_return -#define arch_atomic64_add_return_acquire arch_atomic64_add_return -#define arch_atomic64_add_return_release arch_atomic64_add_return -#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed -#define arch_atomic64_sub_return arch_atomic64_sub_return -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return -#define arch_atomic64_sub_return_release arch_atomic64_sub_return -#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed -#define arch_atomic64_fetch_add arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed -#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed - -#undef ATOMIC64_OPS - -#define ATOMIC64_OPS(op, I, asm_op) \ - ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC64_OPS(and, i, and) -ATOMIC64_OPS(or, i, or) -ATOMIC64_OPS(xor, i, xor) - -#define arch_atomic64_fetch_and arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed -#define arch_atomic64_fetch_or arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed - -#undef ATOMIC64_OPS -#undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP_RETURN -#undef ATOMIC64_OP - static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, l= ong u) { long prev, rc; diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/= asm/cmpxchg.h index 979fde61bba8..0494c2ab553e 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -9,17 +9,33 @@ #include #include =20 -#define __xchg_asm(amswap_db, m, val) \ +#define __xchg_amo_asm(amswap_db, m, val) \ ({ \ - __typeof(val) __ret; \ + __typeof(val) __ret; \ \ - __asm__ __volatile__ ( \ - " "amswap_db" %1, %z2, %0 \n" \ - : "+ZB" (*m), "=3D&r" (__ret) \ - : "Jr" (val) \ - : "memory"); \ + __asm__ __volatile__ ( \ + " "amswap_db" %1, %z2, %0 \n" \ + : "+ZB" (*m), "=3D&r" (__ret) \ + : "Jr" (val) \ + : "memory"); \ \ - __ret; \ + __ret; \ +}) + +#define __xchg_llsc_asm(ld, st, m, val) \ +({ \ + __typeof(val) __ret, __tmp; \ + \ + asm volatile ( \ + "1: ll.w %0, %3 \n" \ + " move %1, %z4 \n" \ + " sc.w %1, %2 \n" \ + " beqz %1, 1b \n" \ + : "=3D&r" (__ret), "=3D&r" (__tmp), "=3DZC" (*m) \ + : "ZC" (*m), "Jr" (val) \ + : "memory"); \ + \ + __ret; \ }) =20 static inline unsigned int __xchg_small(volatile void *ptr, unsigned int v= al, @@ -67,13 +83,23 @@ __arch_xchg(volatile void *ptr, unsigned long x, int si= ze) switch (size) { case 1: case 2: - return __xchg_small(ptr, x, size); + return __xchg_small((volatile void *)ptr, x, size); =20 case 4: - return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); +#ifdef CONFIG_CPU_HAS_AMO + return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); +#else + return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x); +#endif /* CONFIG_CPU_HAS_AMO */ =20 +#ifdef CONFIG_64BIT case 8: - return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); +#ifdef CONFIG_CPU_HAS_AMO + return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); +#else + return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x); +#endif /* CONFIG_CPU_HAS_AMO */ +#endif /* CONFIG_64BIT */ =20 default: BUILD_BUG(); diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/as= m/local.h index f53ea653af76..65ace8e4350a 100644 --- a/arch/loongarch/include/asm/local.h +++ b/arch/loongarch/include/asm/local.h @@ -8,6 +8,7 @@ #include #include #include +#include #include =20 typedef struct { @@ -27,6 +28,7 @@ typedef struct { /* * Same as above, but return the result value */ +#ifdef CONFIG_CPU_HAS_AMO static inline long local_add_return(long i, local_t *l) { unsigned long result; @@ -55,6 +57,41 @@ static inline long local_sub_return(long i, local_t *l) =20 return result; } +#else +static inline long local_add_return(long i, local_t * l) +{ + unsigned long result, temp; + + __asm__ __volatile__( + "1:" __LL "%1, %2 # local_add_return \n" + __stringify(LONG_ADD) " %0, %1, %3 \n" + __SC "%0, %2 \n" + " beq %0, $r0, 1b \n" + __stringify(LONG_ADD) " %0, %1, %3 \n" + : "=3D&r" (result), "=3D&r" (temp), "=3Dm" (l->a.counter) + : "r" (i), "m" (l->a.counter) + : "memory"); + + return result; +} + +static inline long local_sub_return(long i, local_t * l) +{ + unsigned long result, temp; + + __asm__ __volatile__( + "1:" __LL "%1, %2 # local_sub_return \n" + __stringify(LONG_SUB) " %0, %1, %3 \n" + __SC "%0, %2 \n" + " beq %0, $r0, 1b \n" + __stringify(LONG_SUB) " %0, %1, %3 \n" + : "=3D&r" (result), "=3D&r" (temp), "=3Dm" (l->a.counter) + : "r" (i), "m" (l->a.counter) + : "memory"); + + return result; +} +#endif =20 static inline long local_cmpxchg(local_t *l, long old, long new) { diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/a= sm/percpu.h index 87be9b14e9da..1619c1d15e6b 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -36,6 +36,8 @@ static inline void set_my_cpu_offset(unsigned long off) __my_cpu_offset; \ }) =20 +#ifdef CONFIG_CPU_HAS_AMO + #define PERCPU_OP(op, asm_op, c_op) \ static __always_inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ @@ -68,25 +70,9 @@ PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP =20 -static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned lon= g val, int size) -{ - switch (size) { - case 1: - case 2: - return __xchg_small((volatile void *)ptr, val, size); - - case 4: - return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); - - case 8: - return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val); +#endif =20 - default: - BUILD_BUG(); - } - - return 0; -} +#ifdef CONFIG_64BIT =20 #define __pcpu_op_1(op) op ".b " #define __pcpu_op_2(op) op ".h " @@ -115,6 +101,10 @@ do { \ : "memory"); \ } while (0) =20 +#endif + +#define __percpu_xchg __arch_xchg + /* this_cpu_cmpxchg */ #define _protect_cmpxchg_local(pcp, o, n) \ ({ \ @@ -135,6 +125,8 @@ do { \ __retval; \ }) =20 +#ifdef CONFIG_CPU_HAS_AMO + #define _percpu_add(pcp, val) \ _pcp_protect(__percpu_add, pcp, val) =20 @@ -146,9 +138,6 @@ do { \ #define _percpu_or(pcp, val) \ _pcp_protect(__percpu_or, pcp, val) =20 -#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ - _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) - #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) =20 @@ -161,6 +150,10 @@ do { \ #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) =20 +#endif + +#ifdef CONFIG_64BIT + #define this_cpu_read_1(pcp) _percpu_read(1, pcp) #define this_cpu_read_2(pcp) _percpu_read(2, pcp) #define this_cpu_read_4(pcp) _percpu_read(4, pcp) @@ -171,6 +164,11 @@ do { \ #define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val) #define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val) =20 +#endif + +#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) + #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 487D2283FDD; Thu, 27 Nov 2025 15:50:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258614; cv=none; b=BXvGoSxLMQ7I6NpouaXroodR4sTJdnIstKIsSPXTjrBuEeA8iQ5FEQ1uQpwhCd96cHo/PFpr8Z/8xd2scOpQrF1BlBtT0SrnjCTjFxTBPA/bUquONr9Raq8JjZPXx/CP1t8DNZEJQWRVFlCVOK5dDcDRL5mf7TibDJ5OBO4QfMU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258614; c=relaxed/simple; bh=TUZW0Zlb6l31YjTEdTm+SgMmxGwmueWw+i6tiufLldg=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Yc8y22CWGpUFou+JWye7aD8E+knuNgBiSKEhWhr3vpqGG/1H6NRHIf4nFUAGlFDj3lap1Rt+vtX01cDZpMwjXKK1s/IWrJHmSgkDMS29Zj3pP/rEc9ZExfG1NBHaFrgjkf+AYwohr98kNpO3kpD5mAs2W6sGlyE1gYox8vxjI+s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 02DFAC4CEF8; Thu, 27 Nov 2025 15:50:10 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 02/14] LoongArch: Add adaptive CSR accessors for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:20 +0800 Message-ID: <20251127154832.137925-3-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" 32BIT platforms only have 32bit CSR/IOCSR registers, 64BIT platforms have both 32bit/64bit CSR/IOCSR registers. Now there are both 32bit and 64bit CSR accessors: csr_read32()/csr_write32()/csr_xchg32(); csr_read64()/csr_write64()/csr_xchg64(); Some CSR registers (address and timer registers) are 32bit length on 32BIT platform and 64bit length on 64BIT platform. To avoid #ifdefs here and there, they need adaptive accessors, so we define and use: csr_read()/csr_write()/csr_xchg(); IOCSR doesn't have a "natural length", which means a 64bit register can be treated as two 32bit registers, so we just use two 32bit accessors to emulate a 64bit accessors. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/loongarch.h | 46 +++++++++++++++--------- arch/loongarch/include/asm/percpu.h | 2 +- arch/loongarch/kernel/cpu-probe.c | 7 ++++ arch/loongarch/kernel/time.c | 16 ++++----- arch/loongarch/kernel/traps.c | 15 ++++---- arch/loongarch/lib/dump_tlb.c | 6 ++-- arch/loongarch/mm/tlb.c | 10 +++--- arch/loongarch/power/hibernate.c | 6 ++-- arch/loongarch/power/suspend.c | 24 ++++++------- drivers/firmware/efi/libstub/loongarch.c | 8 ++--- 10 files changed, 81 insertions(+), 59 deletions(-) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/includ= e/asm/loongarch.h index 3de03cb864b2..9f71a79271da 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -182,6 +182,16 @@ #define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg) #define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg) =20 +#ifdef CONFIG_32BIT +#define csr_read(reg) csr_read32(reg) +#define csr_write(val, reg) csr_write32(val, reg) +#define csr_xchg(val, mask, reg) csr_xchg32(val, mask, reg) +#else +#define csr_read(reg) csr_read64(reg) +#define csr_write(val, reg) csr_write64(val, reg) +#define csr_xchg(val, mask, reg) csr_xchg64(val, mask, reg) +#endif + /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) @@ -1223,6 +1233,7 @@ static inline unsigned int get_csr_cpuid(void) return csr_read32(LOONGARCH_CSR_CPUID); } =20 +#ifdef CONFIG_64BIT static inline void csr_any_send(unsigned int addr, unsigned int data, unsigned int data_mask, unsigned int cpu) { @@ -1234,6 +1245,7 @@ static inline void csr_any_send(unsigned int addr, un= signed int data, val |=3D ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT); iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND); } +#endif =20 static inline unsigned int read_csr_excode(void) { @@ -1257,22 +1269,22 @@ static inline void write_csr_pagesize(unsigned int = size) =20 static inline unsigned int read_csr_tlbrefill_pagesize(void) { - return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREH= I_PS_SHIFT; + return (csr_read(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_= PS_SHIFT; } =20 static inline void write_csr_tlbrefill_pagesize(unsigned int size) { - csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TL= BREHI); + csr_xchg(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBR= EHI); } =20 #define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID) #define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID) -#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI) -#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI) -#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0) -#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0) -#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1) -#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1) +#define read_csr_entryhi() csr_read(LOONGARCH_CSR_TLBEHI) +#define write_csr_entryhi(val) csr_write(val, LOONGARCH_CSR_TLBEHI) +#define read_csr_entrylo0() csr_read(LOONGARCH_CSR_TLBELO0) +#define write_csr_entrylo0(val) csr_write(val, LOONGARCH_CSR_TLBELO0) +#define read_csr_entrylo1() csr_read(LOONGARCH_CSR_TLBELO1) +#define write_csr_entrylo1(val) csr_write(val, LOONGARCH_CSR_TLBELO1) #define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG) #define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG) #define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT) @@ -1282,20 +1294,20 @@ static inline void write_csr_tlbrefill_pagesize(uns= igned int size) #define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN) #define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN) #define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID) -#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1) -#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1) -#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2) -#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2) -#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3) -#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3) +#define read_csr_prcfg1() csr_read(LOONGARCH_CSR_PRCFG1) +#define write_csr_prcfg1(val) csr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_csr_prcfg2() csr_read(LOONGARCH_CSR_PRCFG2) +#define write_csr_prcfg2(val) csr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_csr_prcfg3() csr_read(LOONGARCH_CSR_PRCFG3) +#define write_csr_prcfg3(val) csr_write(val, LOONGARCH_CSR_PRCFG3) #define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE) #define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZ= E) #define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG) #define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG) #define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR) -#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1) -#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1) -#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2) +#define read_csr_impctl1() csr_read(LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl1(val) csr_write(val, LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl2(val) csr_write(val, LOONGARCH_CSR_IMPCTL2) =20 #define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0) #define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0) diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/a= sm/percpu.h index 1619c1d15e6b..44a8aea2b0e5 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -27,7 +27,7 @@ register unsigned long __my_cpu_offset __asm__("$r21"); static inline void set_my_cpu_offset(unsigned long off) { __my_cpu_offset =3D off; - csr_write64(off, PERCPU_BASE_KS); + csr_write(off, PERCPU_BASE_KS); } =20 #define __my_cpu_offset \ diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-= probe.c index a2060a24b39f..3726cd0885b6 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -298,8 +298,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_l= oongarch *c, unsigned int return; } =20 +#ifdef CONFIG_64BIT *vendor =3D iocsr_read64(LOONGARCH_IOCSR_VENDOR); *cpuname =3D iocsr_read64(LOONGARCH_IOCSR_CPUNAME); +#else + *vendor =3D iocsr_read32(LOONGARCH_IOCSR_VENDOR) | + (u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32; + *cpuname =3D iocsr_read32(LOONGARCH_IOCSR_CPUNAME) | + (u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32; +#endif =20 if (!__cpu_full_name[cpu]) { if (((char *)vendor)[0] =3D=3D 0) diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 6fb92cc1a4c9..1c31bf3a16ed 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -50,10 +50,10 @@ static int constant_set_state_oneshot(struct clock_even= t_device *evt) =20 raw_spin_lock(&state_lock); =20 - timer_config =3D csr_read64(LOONGARCH_CSR_TCFG); + timer_config =3D csr_read(LOONGARCH_CSR_TCFG); timer_config |=3D CSR_TCFG_EN; timer_config &=3D ~CSR_TCFG_PERIOD; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); =20 raw_spin_unlock(&state_lock); =20 @@ -70,7 +70,7 @@ static int constant_set_state_periodic(struct clock_event= _device *evt) period =3D const_clock_freq / HZ; timer_config =3D period & CSR_TCFG_VAL; timer_config |=3D (CSR_TCFG_PERIOD | CSR_TCFG_EN); - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); =20 raw_spin_unlock(&state_lock); =20 @@ -83,9 +83,9 @@ static int constant_set_state_shutdown(struct clock_event= _device *evt) =20 raw_spin_lock(&state_lock); =20 - timer_config =3D csr_read64(LOONGARCH_CSR_TCFG); + timer_config =3D csr_read(LOONGARCH_CSR_TCFG); timer_config &=3D ~CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); =20 raw_spin_unlock(&state_lock); =20 @@ -98,7 +98,7 @@ static int constant_timer_next_event(unsigned long delta,= struct clock_event_dev =20 delta &=3D CSR_TCFG_VAL; timer_config =3D delta | CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); =20 return 0; } @@ -137,7 +137,7 @@ void save_counter(void) void sync_counter(void) { /* Ensure counter begin at 0 */ - csr_write64(init_offset, LOONGARCH_CSR_CNTC); + csr_write(init_offset, LOONGARCH_CSR_CNTC); } =20 int constant_clockevent_init(void) @@ -235,7 +235,7 @@ void __init time_init(void) else const_clock_freq =3D calc_const_freq(); =20 - init_offset =3D -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC)); + init_offset =3D -(drdtime() - csr_read(LOONGARCH_CSR_CNTC)); =20 constant_clockevent_init(); constant_clocksource_init(); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index da5926fead4a..004b8ebf0051 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) bool user =3D user_mode(regs); bool pie =3D regs_irqs_disabled(regs); unsigned long era =3D exception_era(regs); - u64 badv =3D 0, lower =3D 0, upper =3D ULONG_MAX; + unsigned long badv =3D 0, lower =3D 0, upper =3D ULONG_MAX; union loongarch_instruction insn; irqentry_state_t state =3D irqentry_enter(regs); =20 @@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs = *regs) =20 asmlinkage void cache_parity_error(void) { + u32 merrctl =3D csr_read32(LOONGARCH_CSR_MERRCTL); + unsigned long merrera =3D csr_read(LOONGARCH_CSR_MERRERA); + /* For the moment, report the problem and hang. */ pr_err("Cache error exception:\n"); - pr_err("csr_merrctl =3D=3D %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); - pr_err("csr_merrera =3D=3D %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA)); + pr_err("csr_merrctl =3D=3D %08x\n", merrctl); + pr_err("csr_merrera =3D=3D %016lx\n", merrera); panic("Can't handle the cache error!"); } =20 @@ -1130,9 +1133,9 @@ static void configure_exception_vector(void) eentry =3D (unsigned long)exception_handlers; tlbrentry =3D (unsigned long)exception_handlers + 80*VECSIZE; =20 - csr_write64(eentry, LOONGARCH_CSR_EENTRY); - csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY); - csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); + csr_write(eentry, LOONGARCH_CSR_EENTRY); + csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY); + csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); } =20 void per_cpu_trap_init(int cpu) diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c index 0b886a6e260f..116f21ea4e2c 100644 --- a/arch/loongarch/lib/dump_tlb.c +++ b/arch/loongarch/lib/dump_tlb.c @@ -20,9 +20,9 @@ void dump_tlb_regs(void) =20 pr_info("Index : 0x%0x\n", read_csr_tlbidx()); pr_info("PageSize : 0x%0x\n", read_csr_pagesize()); - pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi()); - pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0()); - pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1()); + pr_info("EntryHi : 0x%0*lx\n", field, (unsigned long)read_csr_entryhi()); + pr_info("EntryLo0 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo0()= ); + pr_info("EntryLo1 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo1()= ); } =20 static void dump_tlb(int first, int last) diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 3b427b319db2..6e474469e210 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -229,11 +229,11 @@ static void setup_ptwalker(void) if (cpu_has_ptw) pwctl1 |=3D CSR_PWCTL1_PTW; =20 - csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0); - csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1); - csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); - csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); - csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID); + csr_write(pwctl0, LOONGARCH_CSR_PWCTL0); + csr_write(pwctl1, LOONGARCH_CSR_PWCTL1); + csr_write((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); + csr_write((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + csr_write((long)smp_processor_id(), LOONGARCH_CSR_TMID); } =20 static void output_pgtable_bits_defines(void) diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibern= ate.c index e7b7346592cb..817270410ef9 100644 --- a/arch/loongarch/power/hibernate.c +++ b/arch/loongarch/power/hibernate.c @@ -10,7 +10,7 @@ static u32 saved_crmd; static u32 saved_prmd; static u32 saved_euen; static u32 saved_ecfg; -static u64 saved_pcpu_base; +static unsigned long saved_pcpu_base; struct pt_regs saved_regs; =20 void save_processor_state(void) @@ -20,7 +20,7 @@ void save_processor_state(void) saved_prmd =3D csr_read32(LOONGARCH_CSR_PRMD); saved_euen =3D csr_read32(LOONGARCH_CSR_EUEN); saved_ecfg =3D csr_read32(LOONGARCH_CSR_ECFG); - saved_pcpu_base =3D csr_read64(PERCPU_BASE_KS); + saved_pcpu_base =3D csr_read(PERCPU_BASE_KS); =20 if (is_fpu_owner()) save_fp(current); @@ -33,7 +33,7 @@ void restore_processor_state(void) csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); csr_write32(saved_euen, LOONGARCH_CSR_EUEN); csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG); - csr_write64(saved_pcpu_base, PERCPU_BASE_KS); + csr_write(saved_pcpu_base, PERCPU_BASE_KS); =20 if (is_fpu_owner()) restore_fp(current); diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c index c9e594925c47..7e3d79f8bbd4 100644 --- a/arch/loongarch/power/suspend.c +++ b/arch/loongarch/power/suspend.c @@ -20,24 +20,24 @@ u64 loongarch_suspend_addr; struct saved_registers { u32 ecfg; u32 euen; - u64 pgd; - u64 kpgd; u32 pwctl0; u32 pwctl1; - u64 pcpu_base; + unsigned long pgd; + unsigned long kpgd; + unsigned long pcpu_base; }; static struct saved_registers saved_regs; =20 void loongarch_common_suspend(void) { save_counter(); - saved_regs.pgd =3D csr_read64(LOONGARCH_CSR_PGDL); - saved_regs.kpgd =3D csr_read64(LOONGARCH_CSR_PGDH); + saved_regs.pgd =3D csr_read(LOONGARCH_CSR_PGDL); + saved_regs.kpgd =3D csr_read(LOONGARCH_CSR_PGDH); saved_regs.pwctl0 =3D csr_read32(LOONGARCH_CSR_PWCTL0); saved_regs.pwctl1 =3D csr_read32(LOONGARCH_CSR_PWCTL1); saved_regs.ecfg =3D csr_read32(LOONGARCH_CSR_ECFG); saved_regs.euen =3D csr_read32(LOONGARCH_CSR_EUEN); - saved_regs.pcpu_base =3D csr_read64(PERCPU_BASE_KS); + saved_regs.pcpu_base =3D csr_read(PERCPU_BASE_KS); =20 loongarch_suspend_addr =3D loongson_sysconf.suspend_addr; } @@ -46,17 +46,17 @@ void loongarch_common_resume(void) { sync_counter(); local_flush_tlb_all(); - csr_write64(eentry, LOONGARCH_CSR_EENTRY); - csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); - csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); + csr_write(eentry, LOONGARCH_CSR_EENTRY); + csr_write(eentry, LOONGARCH_CSR_MERRENTRY); + csr_write(tlbrentry, LOONGARCH_CSR_TLBRENTRY); =20 - csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL); - csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH); + csr_write(saved_regs.pgd, LOONGARCH_CSR_PGDL); + csr_write(saved_regs.kpgd, LOONGARCH_CSR_PGDH); csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0); csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1); csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG); csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN); - csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS); + csr_write(saved_regs.pcpu_base, PERCPU_BASE_KS); } =20 int loongarch_acpi_suspend(void) diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/ef= i/libstub/loongarch.c index 3782d0a187d1..9825f5218137 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -72,10 +72,10 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_i= mage_t *image, desc_ver, priv.runtime_map); =20 /* Config Direct Mapping */ - csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); - csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); - csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); - csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); + csr_write(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); + csr_write(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + csr_write(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); + csr_write(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); =20 real_kernel_entry =3D (void *)kernel_entry_address(kernel_addr, image); =20 --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1B93D334C3F; Thu, 27 Nov 2025 15:50:54 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258655; cv=none; b=MXt1UBCUFV1UVxdzbLvhYvm7ghzYfkO3V0VRPulIZjWfMkR6V6MZz3kijQwOCxhkuIt4g95FcWsWM3CNLvUuStIU/NPJPpYnzC9m46jBwaGwbCDFC21V1mxnzJJwp/FLp9RpO44YqZhqXHs6fIeOJwEGP8gl7HJnlbXaBXAGSNo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258655; c=relaxed/simple; bh=WTJu2tXOv5KAyGVP16QMZE5antQXZ3/MoxjBNuP7O5g=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=HMN5JE2sOUlMJrawyKnErgeRqTOlpWPpP3zP6b798rb6Zch6vPZiEf6MYaaJ+Wcmi2F7+whsKT9wRZ7O/Tq/lJYr72j39DF/s2T3uwNW0hEvZ74cGmuXoY81CGp+y5AlzCmepJDpHRVhCpS2UmV3mQGnB9DHvmZKBAZWt4P+fVA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8CDBDC113D0; Thu, 27 Nov 2025 15:50:52 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 03/14] LoongArch: Adjust common macro definitions for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:21 +0800 Message-ID: <20251127154832.137925-4-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Most common macros are defined in asm.h, asmmacro.h and stackframe.h. Adjust these macros for both 32BIT and 64BIT. Add SETUP_TWINS (Setup Trampoline Windows) and SETUP_MODES (Setup CRMD/ PRMD/EUEN) which will be used later. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/asm.h | 77 ++++++++++++---- arch/loongarch/include/asm/asmmacro.h | 118 ++++++++++++++++++------ arch/loongarch/include/asm/stackframe.h | 34 +++++-- 3 files changed, 174 insertions(+), 55 deletions(-) diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/= asm.h index f018d26fc995..719cab1a0ad8 100644 --- a/arch/loongarch/include/asm/asm.h +++ b/arch/loongarch/include/asm/asm.h @@ -72,11 +72,11 @@ #define INT_SUB sub.w #define INT_L ld.w #define INT_S st.w -#define INT_SLL slli.w +#define INT_SLLI slli.w #define INT_SLLV sll.w -#define INT_SRL srli.w +#define INT_SRLI srli.w #define INT_SRLV srl.w -#define INT_SRA srai.w +#define INT_SRAI srai.w #define INT_SRAV sra.w #endif =20 @@ -86,11 +86,11 @@ #define INT_SUB sub.d #define INT_L ld.d #define INT_S st.d -#define INT_SLL slli.d +#define INT_SLLI slli.d #define INT_SLLV sll.d -#define INT_SRL srli.d +#define INT_SRLI srli.d #define INT_SRLV srl.d -#define INT_SRA srai.d +#define INT_SRAI srai.d #define INT_SRAV sra.d #endif =20 @@ -100,15 +100,23 @@ #if (__SIZEOF_LONG__ =3D=3D 4) #define LONG_ADD add.w #define LONG_ADDI addi.w +#define LONG_ALSL alsl.w +#define LONG_BSTRINS bstrins.w +#define LONG_BSTRPICK bstrpick.w #define LONG_SUB sub.w #define LONG_L ld.w +#define LONG_LI li.w +#define LONG_LPTR ld.w #define LONG_S st.w -#define LONG_SLL slli.w +#define LONG_SPTR st.w +#define LONG_SLLI slli.w #define LONG_SLLV sll.w -#define LONG_SRL srli.w +#define LONG_SRLI srli.w #define LONG_SRLV srl.w -#define LONG_SRA srai.w +#define LONG_SRAI srai.w #define LONG_SRAV sra.w +#define LONG_ROTR rotr.w +#define LONG_ROTRI rotri.w =20 #ifdef __ASSEMBLER__ #define LONG .word @@ -121,15 +129,23 @@ #if (__SIZEOF_LONG__ =3D=3D 8) #define LONG_ADD add.d #define LONG_ADDI addi.d +#define LONG_ALSL alsl.d +#define LONG_BSTRINS bstrins.d +#define LONG_BSTRPICK bstrpick.d #define LONG_SUB sub.d #define LONG_L ld.d +#define LONG_LI li.d +#define LONG_LPTR ldptr.d #define LONG_S st.d -#define LONG_SLL slli.d +#define LONG_SPTR stptr.d +#define LONG_SLLI slli.d #define LONG_SLLV sll.d -#define LONG_SRL srli.d +#define LONG_SRLI srli.d #define LONG_SRLV srl.d -#define LONG_SRA srai.d +#define LONG_SRAI srai.d #define LONG_SRAV sra.d +#define LONG_ROTR rotr.d +#define LONG_ROTRI rotri.d =20 #ifdef __ASSEMBLER__ #define LONG .dword @@ -145,16 +161,23 @@ #if (__SIZEOF_POINTER__ =3D=3D 4) #define PTR_ADD add.w #define PTR_ADDI addi.w +#define PTR_ALSL alsl.w +#define PTR_BSTRINS bstrins.w +#define PTR_BSTRPICK bstrpick.w #define PTR_SUB sub.w #define PTR_L ld.w -#define PTR_S st.w #define PTR_LI li.w -#define PTR_SLL slli.w +#define PTR_LPTR ld.w +#define PTR_S st.w +#define PTR_SPTR st.w +#define PTR_SLLI slli.w #define PTR_SLLV sll.w -#define PTR_SRL srli.w +#define PTR_SRLI srli.w #define PTR_SRLV srl.w -#define PTR_SRA srai.w +#define PTR_SRAI srai.w #define PTR_SRAV sra.w +#define PTR_ROTR rotr.w +#define PTR_ROTRI rotri.w =20 #define PTR_SCALESHIFT 2 =20 @@ -168,16 +191,23 @@ #if (__SIZEOF_POINTER__ =3D=3D 8) #define PTR_ADD add.d #define PTR_ADDI addi.d +#define PTR_ALSL alsl.d +#define PTR_BSTRINS bstrins.d +#define PTR_BSTRPICK bstrpick.d #define PTR_SUB sub.d #define PTR_L ld.d -#define PTR_S st.d #define PTR_LI li.d -#define PTR_SLL slli.d +#define PTR_LPTR ldptr.d +#define PTR_S st.d +#define PTR_SPTR stptr.d +#define PTR_SLLI slli.d #define PTR_SLLV sll.d -#define PTR_SRL srli.d +#define PTR_SRLI srli.d #define PTR_SRLV srl.d -#define PTR_SRA srai.d +#define PTR_SRAI srai.d #define PTR_SRAV sra.d +#define PTR_ROTR rotr.d +#define PTR_ROTRI rotri.d =20 #define PTR_SCALESHIFT 3 =20 @@ -190,10 +220,17 @@ =20 /* Annotate a function as being unsuitable for kprobes. */ #ifdef CONFIG_KPROBES +#ifdef CONFIG_32BIT +#define _ASM_NOKPROBE(name) \ + .pushsection "_kprobe_blacklist", "aw"; \ + .long name; \ + .popsection +#else #define _ASM_NOKPROBE(name) \ .pushsection "_kprobe_blacklist", "aw"; \ .quad name; \ .popsection +#endif #else #define _ASM_NOKPROBE(name) #endif diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include= /asm/asmmacro.h index 8d7f501b0a12..a648be5f723f 100644 --- a/arch/loongarch/include/asm/asmmacro.h +++ b/arch/loongarch/include/asm/asmmacro.h @@ -5,43 +5,55 @@ #ifndef _ASM_ASMMACRO_H #define _ASM_ASMMACRO_H =20 +#include #include #include #include #include =20 +#ifdef CONFIG_64BIT +#define TASK_STRUCT_OFFSET 0 +#else +#define TASK_STRUCT_OFFSET 2000 +#endif + .macro cpu_save_nonscratch thread - stptr.d s0, \thread, THREAD_REG23 - stptr.d s1, \thread, THREAD_REG24 - stptr.d s2, \thread, THREAD_REG25 - stptr.d s3, \thread, THREAD_REG26 - stptr.d s4, \thread, THREAD_REG27 - stptr.d s5, \thread, THREAD_REG28 - stptr.d s6, \thread, THREAD_REG29 - stptr.d s7, \thread, THREAD_REG30 - stptr.d s8, \thread, THREAD_REG31 - stptr.d sp, \thread, THREAD_REG03 - stptr.d fp, \thread, THREAD_REG22 + LONG_SPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET) + LONG_SPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET) + LONG_SPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET) + LONG_SPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET) + LONG_SPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET) + LONG_SPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET) + LONG_SPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET) + LONG_SPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET) + LONG_SPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET) + LONG_SPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET) + LONG_SPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET) + LONG_SPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET) .endm =20 .macro cpu_restore_nonscratch thread - ldptr.d s0, \thread, THREAD_REG23 - ldptr.d s1, \thread, THREAD_REG24 - ldptr.d s2, \thread, THREAD_REG25 - ldptr.d s3, \thread, THREAD_REG26 - ldptr.d s4, \thread, THREAD_REG27 - ldptr.d s5, \thread, THREAD_REG28 - ldptr.d s6, \thread, THREAD_REG29 - ldptr.d s7, \thread, THREAD_REG30 - ldptr.d s8, \thread, THREAD_REG31 - ldptr.d ra, \thread, THREAD_REG01 - ldptr.d sp, \thread, THREAD_REG03 - ldptr.d fp, \thread, THREAD_REG22 + LONG_LPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET) + LONG_LPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET) + LONG_LPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET) + LONG_LPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET) + LONG_LPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET) + LONG_LPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET) + LONG_LPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET) + LONG_LPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET) + LONG_LPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET) + LONG_LPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET) + LONG_LPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET) + LONG_LPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET) .endm =20 .macro fpu_save_csr thread tmp movfcsr2gr \tmp, fcsr0 +#ifdef CONFIG_32BIT + st.w \tmp, \thread, THREAD_FCSR +#else stptr.w \tmp, \thread, THREAD_FCSR +#endif #ifdef CONFIG_CPU_HAS_LBT /* TM bit is always 0 if LBT not supported */ andi \tmp, \tmp, FPU_CSR_TM @@ -56,7 +68,11 @@ .endm =20 .macro fpu_restore_csr thread tmp0 tmp1 +#ifdef CONFIG_32BIT + ld.w \tmp0, \thread, THREAD_FCSR +#else ldptr.w \tmp0, \thread, THREAD_FCSR +#endif movgr2fcsr fcsr0, \tmp0 #ifdef CONFIG_CPU_HAS_LBT /* TM bit is always 0 if LBT not supported */ @@ -88,9 +104,52 @@ #endif .endm =20 +#ifdef CONFIG_32BIT .macro fpu_save_cc thread tmp0 tmp1 movcf2gr \tmp0, $fcc0 - move \tmp1, \tmp0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.w \tmp1, \tmp0, 31, 24 + st.w \tmp1, \thread, THREAD_FCC + movcf2gr \tmp0, $fcc4 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc5 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc6 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc7 + bstrins.w \tmp1, \tmp0, 31, 24 + st.w \tmp1, \thread, (THREAD_FCC + 4) + .endm + + .macro fpu_restore_cc thread tmp0 tmp1 + ld.w \tmp0, \thread, THREAD_FCC + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + ld.w \tmp0, \thread, (THREAD_FCC + 4) + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc4, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc5, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc6, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc7, \tmp1 + .endm +#else + .macro fpu_save_cc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 movcf2gr \tmp0, $fcc1 bstrins.d \tmp1, \tmp0, 15, 8 movcf2gr \tmp0, $fcc2 @@ -109,7 +168,7 @@ .endm =20 .macro fpu_restore_cc thread tmp0 tmp1 - ldptr.d \tmp0, \thread, THREAD_FCC + ldptr.d \tmp0, \thread, THREAD_FCC bstrpick.d \tmp1, \tmp0, 7, 0 movgr2cf $fcc0, \tmp1 bstrpick.d \tmp1, \tmp0, 15, 8 @@ -127,6 +186,7 @@ bstrpick.d \tmp1, \tmp0, 63, 56 movgr2cf $fcc7, \tmp1 .endm +#endif =20 .macro fpu_save_double thread tmp li.w \tmp, THREAD_FPR0 @@ -606,12 +666,14 @@ 766: lu12i.w \reg, 0 ori \reg, \reg, 0 +#ifdef CONFIG_64BIT lu32i.d \reg, 0 lu52i.d \reg, \reg, 0 +#endif .pushsection ".la_abs", "aw", %progbits - .p2align 3 - .dword 766b - .dword \sym + .p2align PTRLOG + PTR 766b + PTR \sym .popsection #endif .endm diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/inclu= de/asm/stackframe.h index 5cb568a60cf8..b44930fbb675 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -38,22 +38,42 @@ cfi_restore \reg \offset \docfi .endm =20 + .macro SETUP_TWINS temp + pcaddi t0, 0 + PTR_LI t1, ~TO_PHYS_MASK + and t0, t0, t1 + ori t0, t0, (1 << 4 | 1) + csrwr t0, LOONGARCH_CSR_DMWIN0 + PTR_LI t0, CSR_DMW1_INIT + csrwr t0, LOONGARCH_CSR_DMWIN1 + .endm + + .macro SETUP_MODES temp + /* Enable PG */ + li.w \temp, 0xb0 # PLV=3D0, IE=3D0, PG=3D1 + csrwr \temp, LOONGARCH_CSR_CRMD + li.w \temp, 0x04 # PLV=3D0, PIE=3D1, PWE=3D0 + csrwr \temp, LOONGARCH_CSR_PRMD + li.w \temp, 0x00 # FPE=3D0, SXE=3D0, ASXE=3D0, BTE=3D0 + csrwr \temp, LOONGARCH_CSR_EUEN + .endm + .macro SETUP_DMWINS temp - li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN0 - li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN1 - li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN2 - li.d \temp, CSR_DMW3_INIT # 0x0, unused + PTR_LI \temp, CSR_DMW3_INIT # 0x0, unused csrwr \temp, LOONGARCH_CSR_DMWIN3 .endm =20 /* Jump to the runtime virtual address. */ .macro JUMP_VIRT_ADDR temp1 temp2 - li.d \temp1, CACHE_BASE + PTR_LI \temp1, CACHE_BASE pcaddi \temp2, 0 - bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0 + PTR_BSTRINS \temp1, \temp2, (DMW_PABITS - 1), 0 jirl zero, \temp1, 0xc .endm =20 @@ -171,7 +191,7 @@ andi t0, t0, 0x3 /* extract pplv bit */ beqz t0, 9f =20 - li.d tp, ~_THREAD_MASK + LONG_LI tp, ~_THREAD_MASK and tp, tp, sp cfi_st u0, PT_R21, \docfi csrrd u0, PERCPU_BASE_KS --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BAC11336EC0; Thu, 27 Nov 2025 15:51:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258676; cv=none; b=TFvrTGxDl8vd12BJLkYSzIw26IqoH80HoTJIggRtvWFeyzdkDSMvnW0BkrfxpK/RmmizbxEt5dv7+RT2Kq2/uUGo+Gp1iHIrWHZoonqeoGCfy0BPafTfcCoOKbVFBNVz8/yVSQv3dt31CBNnJowS91g9AQSXvkXsl4wXcUXivGg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258676; c=relaxed/simple; bh=xGiqlMCpdsnqPrErxLc9KfqZvLM1z7GlOl2Zwze+5go=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=CpnllbVI0CfR+/6xSnJbVr9sU7tX8Lac12KbKuquc2gpwyJTiQPFnH5wzEsQ6TJTc+rH4wANIEU//kENSUuuYHLMhMmFACZh4bmAw8hSjWVNg0tIp6kayfISs/OA8SS3vna3hiJocL/Nw5J7mm8vkjqlMmVT/Qomfc5GStq2VyU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id D55B5C116C6; Thu, 27 Nov 2025 15:51:11 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 04/14] LoongArch: Adjust boot & setup for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:22 +0800 Message-ID: <20251127154832.137925-5-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust boot & setup for both 32BIT and 64BIT, including: efi header definition, MAX_IO_PICS definition, kernel entry and environment setup routines, etc. Add a fallback path in fdt_cpu_clk_init() to avoid 0MHz in /proc/cpuinfo if there is no valid clock freq from firmware. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/addrspace.h | 2 +- arch/loongarch/include/asm/irq.h | 5 ++++ arch/loongarch/kernel/efi-header.S | 4 +++ arch/loongarch/kernel/efi.c | 4 ++- arch/loongarch/kernel/env.c | 5 +++- arch/loongarch/kernel/head.S | 39 +++++++++++--------------- arch/loongarch/kernel/relocate.c | 9 +++++- 7 files changed, 42 insertions(+), 26 deletions(-) diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/includ= e/asm/addrspace.h index e739dbc6329d..9766a100504a 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -42,7 +42,7 @@ extern unsigned long vm_map_base; #endif =20 #define DMW_PABITS 48 -#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) +#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1) =20 /* * Memory above this physical address will be considered highmem. diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/= irq.h index 12bd15578c33..cf6c82a9117b 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,7 +53,12 @@ void spurious_interrupt(void); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclud= e_cpu); =20 +#ifdef CONFIG_32BIT +#define MAX_IO_PICS 1 +#else #define MAX_IO_PICS 8 +#endif + #define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) =20 struct acpi_vector_group { diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi= -header.S index ba0bdbf86aa8..6df56241cb95 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -9,7 +9,11 @@ .macro __EFI_PE_HEADER .long IMAGE_NT_SIGNATURE .Lcoff_header: +#ifdef CONFIG_32BIT + .short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */ +#else .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ +#endif .short .Lsection_count /* NumberOfSections */ .long 0 /* TimeDateStamp */ .long 0 /* PointerToSymbolTable */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 860a3bc030e0..52c21c895318 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -115,7 +115,9 @@ void __init efi_init(void) =20 efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); =20 - set_bit(EFI_64BIT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + efi_nr_tables =3D efi_systab->nr_tables; efi_config_table =3D (unsigned long)efi_systab->tables; =20 diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 23bd5ae2212c..841206fde3ab 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void) =20 clk =3D of_clk_get(np, 0); of_node_put(np); + cpu_clock_freq =3D 200 * 1000 * 1000; =20 - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + pr_warn("No valid CPU clock freq, assume 200MHz.\n"); return -ENODEV; + } =20 cpu_clock_freq =3D clk_get_rate(clk); clk_put(clk); diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e3865e92a917..aba548db2446 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); =20 SYM_CODE_START(kernel_entry) # kernel entry point =20 - /* Config direct window and set PG */ - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 - - /* Enable PG */ - li.w t0, 0xb0 # PLV=3D0, IE=3D0, PG=3D1 - csrwr t0, LOONGARCH_CSR_CRMD - li.w t0, 0x04 # PLV=3D0, PIE=3D1, PWE=3D0 - csrwr t0, LOONGARCH_CSR_PRMD - li.w t0, 0x00 # FPE=3D0, SXE=3D0, ASXE=3D0, BTE=3D0 - csrwr t0, LOONGARCH_CSR_EUEN + SETUP_DMWINS t0 =20 la.pcrel t0, __bss_start # clear .bss - st.d zero, t0, 0 + LONG_S zero, t0, 0 la.pcrel t1, __bss_stop - LONGSIZE 1: - addi.d t0, t0, LONGSIZE - st.d zero, t0, 0 + PTR_ADDI t0, t0, LONGSIZE + LONG_S zero, t0, 0 bne t0, t1, 1b =20 la.pcrel t0, fw_arg0 - st.d a0, t0, 0 # firmware arguments + PTR_S a0, t0, 0 # firmware arguments la.pcrel t0, fw_arg1 - st.d a1, t0, 0 + PTR_S a1, t0, 0 la.pcrel t0, fw_arg2 - st.d a2, t0, 0 + PTR_S a2, t0, 0 =20 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* KSave3 used for percpu base, initialized as 0 */ @@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point =20 /* Jump to the new kernel: new_pc =3D current_pc + random_offset */ pcaddi t0, 0 - add.d t0, t0, a0 + PTR_ADD t0, t0, a0 jirl zero, t0, 0xc #endif /* CONFIG_RANDOMIZE_BASE */ =20 @@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry) */ SYM_CODE_START(smpboot_entry) =20 - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 + SETUP_DMWINS t0 =20 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* Enable PG */ diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/reloc= ate.c index 76abbb8d2931..82aa3f035927 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random= _offset) =20 for (p =3D begin; (void *)p < end; p++) { long v =3D p->symvalue; - uint32_t lu12iw, ori, lu32id, lu52id; + uint32_t lu12iw, ori; +#ifdef CONFIG_64BIT + uint32_t lu32id, lu52id; +#endif union loongarch_instruction *insn =3D (void *)p->pc; =20 lu12iw =3D (v >> 12) & 0xfffff; ori =3D v & 0xfff; +#ifdef CONFIG_64BIT lu32id =3D (v >> 32) & 0xfffff; lu52id =3D v >> 52; +#endif =20 insn[0].reg1i20_format.immediate =3D lu12iw; insn[1].reg2i12_format.immediate =3D ori; +#ifdef CONFIG_64BIT insn[2].reg1i20_format.immediate =3D lu32id; insn[3].reg2i12_format.immediate =3D lu52id; +#endif } } =20 --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D683123FC49; Thu, 27 Nov 2025 15:51:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258699; cv=none; b=Waz/f9LSQr4zgEEUVpY6MS/J6vxB8IdU/zvQ/JEgTgX7CwyHtWr1S5+NO8g8DGrGvwgaCmHxxm62qRGoPU2xVLqu3JbCMomtA8MSke1VogvhNYzAZ0zhWxBLkbcuaQIrR1lcXTUkmQOZAa2NKnXm2qMAE9upzwNTYPmxoDhiX6M= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258699; c=relaxed/simple; bh=0cbwOXUKb8e1f8wCYs5H+m/ylf1tQIK5+c4I6TxtjYA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ZuS0KcUHXOJAtQSvkh4RfyffGgDItwQs+GRFAB4OJh7T8dlh2qdim31erXBA3izAScE8T7+CjxMfSDMk5ZYLAmYN1M69MGyo7j8a9PgxbBJUMFE62TCam5Q5TaJmztwqPsvjaJslgmVSoP+/8G0iMkNpwmUw9vU1R0j0WpyebaY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id BA020C4CEF8; Thu, 27 Nov 2025 15:51:35 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann , Yawei Li Subject: [PATCH V4 05/14] LoongArch: Adjust memory management for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:23 +0800 Message-ID: <20251127154832.137925-6-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust memory management for both 32BIT and 64BIT, including: address space definition, DMW CSR definition, page table bits definition, boot time detection of VA/PA bits, page table init, tlb exception handling, copy_page/clear_page/dump_tlb libraries, etc. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Yawei Li Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/addrspace.h | 13 + arch/loongarch/include/asm/cpu-features.h | 3 - arch/loongarch/include/asm/loongarch.h | 24 ++ arch/loongarch/include/asm/page.h | 2 +- arch/loongarch/include/asm/pgtable-bits.h | 36 ++- arch/loongarch/include/asm/pgtable.h | 74 +++-- arch/loongarch/kernel/cpu-probe.c | 6 +- arch/loongarch/lib/dump_tlb.c | 8 + arch/loongarch/mm/init.c | 4 +- arch/loongarch/mm/page.S | 118 ++++---- arch/loongarch/mm/tlb.c | 2 + arch/loongarch/mm/tlbex.S | 322 +++++++++++++++------- 12 files changed, 418 insertions(+), 194 deletions(-) diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/includ= e/asm/addrspace.h index 9766a100504a..d6472cafb32c 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -38,11 +38,20 @@ extern unsigned long vm_map_base; #endif =20 #ifndef WRITECOMBINE_BASE +#ifdef CONFIG_32BIT +#define WRITECOMBINE_BASE CSR_DMW0_BASE +#else #define WRITECOMBINE_BASE CSR_DMW2_BASE #endif +#endif =20 +#ifdef CONFIG_32BIT +#define DMW_PABITS 29 +#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1) +#else #define DMW_PABITS 48 #define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1) +#endif =20 /* * Memory above this physical address will be considered highmem. @@ -112,7 +121,11 @@ extern unsigned long vm_map_base; /* * Returns the physical address of a KPRANGEx / XKPRANGE address */ +#ifdef CONFIG_32BIT +#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK) +#else #define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) +#endif =20 /* * On LoongArch, I/O ports mappring is following: diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/inc= lude/asm/cpu-features.h index bd5f0457ad21..3745d991a99a 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -20,16 +20,13 @@ #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64= BIT) =20 #ifdef CONFIG_32BIT -# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_vabits 31 # define cpu_pabits 31 #endif =20 #ifdef CONFIG_64BIT -# define cpu_has_64bits 1 # define cpu_vabits cpu_data[0].vabits # define cpu_pabits cpu_data[0].pabits -# define __NEED_ADDRBITS_PROBE #endif =20 /* diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/includ= e/asm/loongarch.h index 9f71a79271da..804341bd8d2e 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -912,6 +912,26 @@ #define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ =20 /* Direct Map window 0/1/2/3 */ + +#ifdef CONFIG_32BIT + +#define CSR_DMW0_PLV0 (1 << 0) +#define CSR_DMW0_VSEG (0x4) +#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) +#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0) + +#define CSR_DMW1_PLV0 (1 << 0) +#define CSR_DMW1_MAT (1 << 4) +#define CSR_DMW1_VSEG (0x5) +#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) +#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) + +#define CSR_DMW2_INIT 0x0 + +#define CSR_DMW3_INIT 0x0 + +#else + #define CSR_DMW0_PLV0 _CONST64_(1 << 0) #define CSR_DMW0_VSEG _CONST64_(0x8000) #define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) @@ -931,6 +951,8 @@ =20 #define CSR_DMW3_INIT 0x0 =20 +#endif + /* Performance Counter registers */ #define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ #define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ @@ -1388,8 +1410,10 @@ __BUILD_CSR_OP(tlbidx) #define ENTRYLO_C_SHIFT 4 #define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT) #define ENTRYLO_G (_ULCAST_(1) << 6) +#ifdef CONFIG_64BIT #define ENTRYLO_NR (_ULCAST_(1) << 61) #define ENTRYLO_NX (_ULCAST_(1) << 62) +#endif =20 /* Values for PageSize register */ #define PS_4K 0x0000000c diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm= /page.h index a3aaf34fba16..256d1ff7a1e3 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -10,7 +10,7 @@ =20 #include =20 -#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG) #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/inc= lude/asm/pgtable-bits.h index 2fc3789220ac..b565573cd82e 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -6,6 +6,26 @@ #define _ASM_PGTABLE_BITS_H =20 /* Page table bits */ + +#ifdef CONFIG_32BIT +#define _PAGE_VALID_SHIFT 0 +#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ +#define _PAGE_DIRTY_SHIFT 1 +#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */ +#define _CACHE_SHIFT 4 /* 4~5, two bits */ +#define _PAGE_GLOBAL_SHIFT 6 +#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */ +#define _PAGE_PRESENT_SHIFT 7 +#define _PAGE_PFN_SHIFT 8 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ +#define _PAGE_SWP_EXCLUSIVE_SHIFT 13 +#define _PAGE_PFN_END_SHIFT 28 +#define _PAGE_WRITE_SHIFT 29 +#define _PAGE_MODIFIED_SHIFT 30 +#define _PAGE_PRESENT_INVALID_SHIFT 31 +#endif + +#ifdef CONFIG_64BIT #define _PAGE_VALID_SHIFT 0 #define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ #define _PAGE_DIRTY_SHIFT 1 @@ -18,14 +38,15 @@ #define _PAGE_MODIFIED_SHIFT 9 #define _PAGE_PROTNONE_SHIFT 10 #define _PAGE_SPECIAL_SHIFT 11 -#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ #define _PAGE_PFN_SHIFT 12 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 #define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_RPLV_SHIFT 63 +#endif =20 /* Used by software */ #define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) @@ -33,10 +54,15 @@ #define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) #define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) +#ifdef CONFIG_32BIT +#define _PAGE_PROTNONE 0 +#define _PAGE_SPECIAL 0 +#else #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) +#endif =20 -/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ +/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) =20 /* Used by TLB hardware (placed in EntryLo*) */ @@ -46,9 +72,15 @@ #define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT) #define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT) #define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT) +#ifdef CONFIG_32BIT +#define _PAGE_NO_READ 0 +#define _PAGE_NO_EXEC 0 +#define _PAGE_RPLV 0 +#else #define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT) #define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT) #define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT) +#endif #define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT) #define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT) =20 diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/= asm/pgtable.h index 03fb60432fde..9ed2ea23c580 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -11,6 +11,7 @@ =20 #include #include +#include #include #include =20 @@ -23,37 +24,45 @@ #endif =20 #if CONFIG_PGTABLE_LEVELS =3D=3D 2 -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #elif CONFIG_PGTABLE_LEVELS =3D=3D 3 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG)) #elif CONFIG_PGTABLE_LEVELS =3D=3D 4 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG)) #endif =20 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) =20 -#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) +#ifdef CONFIG_32BIT +#define VA_BITS 32 +#else +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG)) +#endif =20 -#define PTRS_PER_PGD (PAGE_SIZE >> 3) +#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG) #if CONFIG_PGTABLE_LEVELS > 3 -#define PTRS_PER_PUD (PAGE_SIZE >> 3) +#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG) #endif #if CONFIG_PGTABLE_LEVELS > 2 -#define PTRS_PER_PMD (PAGE_SIZE >> 3) +#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG) #endif -#define PTRS_PER_PTE (PAGE_SIZE >> 3) +#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG) =20 +#ifdef CONFIG_32BIT +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#else #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 /= PGDIR_SIZE):1) +#endif =20 #ifndef __ASSEMBLER__ =20 @@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof= (unsigned long)]; =20 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) =20 -/* - * TLB refill handlers may also map the vmalloc area into xkvrange. - * Avoid the first couple of pages so NULL pointer dereferences will - * still reliably trap. - */ +#ifdef CONFIG_32BIT + +#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) +#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE)) + +#endif + +#ifdef CONFIG_64BIT + #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) #define MODULES_END (MODULES_VADDR + SZ_256M) =20 @@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof= (unsigned long)]; #define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) =20 +#endif + #define ptep_get(ptep) READ_ONCE(*(ptep)) #define pmdp_get(pmdp) READ_ONCE(*(pmdp)) =20 @@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr); * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that * are !pte_none() && !pte_present(). * - * Format of swap PTEs: + * Format of 32bit swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <------------ offset -------------> E <- type -> <-- zeroes --> + * + * E is the exclusive marker that is not stored in swap entries. + * The zero'ed bits include _PAGE_PRESENT. + * + * Format of 64bit swap PTEs: * * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 @@ -290,11 +314,21 @@ extern void kernel_pte_init(void *addr); * E is the exclusive marker that is not stored in swap entries. * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE. */ + +#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7) +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1) + static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) -{ pte_t pte; pte_val(pte) =3D ((type & 0x7f) << 16) | (offset << 24); retu= rn pte; } +{ + pte_t pte; + pte_val(pte) =3D ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset= << __SWP_OFFSET_SHIFT); + return pte; +} =20 -#define __swp_type(x) (((x).val >> 16) & 0x7f) -#define __swp_offset(x) ((x).val >> 24) +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((ty= pe), (offset))) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-= probe.c index 3726cd0885b6..08a227034042 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base); =20 static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) { -#ifdef __NEED_ADDRBITS_PROBE +#ifdef CONFIG_32BIT + c->pabits =3D cpu_pabits; + c->vabits =3D cpu_vabits; + vm_map_base =3D KVRANGE; +#else c->pabits =3D (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4; c->vabits =3D (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12; vm_map_base =3D 0UL - (1UL << c->vabits); diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c index 116f21ea4e2c..e1cdad7a676e 100644 --- a/arch/loongarch/lib/dump_tlb.c +++ b/arch/loongarch/lib/dump_tlb.c @@ -73,12 +73,16 @@ static void dump_tlb(int first, int last) vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask); =20 /* NR/NX are in awkward places, so mask them off separately */ +#ifdef CONFIG_64BIT pa =3D entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX); +#endif pa =3D pa & PAGE_MASK; pr_cont("\n\t["); +#ifdef CONFIG_64BIT pr_cont("nr=3D%d nx=3D%d ", (entrylo0 & ENTRYLO_NR) ? 1 : 0, (entrylo0 & ENTRYLO_NX) ? 1 : 0); +#endif pr_cont("pa=3D0x%0*llx c=3D%d d=3D%d v=3D%d g=3D%d plv=3D%lld] [", pwidth, pa, c0, (entrylo0 & ENTRYLO_D) ? 1 : 0, @@ -86,11 +90,15 @@ static void dump_tlb(int first, int last) (entrylo0 & ENTRYLO_G) ? 1 : 0, (entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT); /* NR/NX are in awkward places, so mask them off separately */ +#ifdef CONFIG_64BIT pa =3D entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX); +#endif pa =3D pa & PAGE_MASK; +#ifdef CONFIG_64BIT pr_cont("nr=3D%d nx=3D%d ", (entrylo1 & ENTRYLO_NR) ? 1 : 0, (entrylo1 & ENTRYLO_NX) ? 1 : 0); +#endif pr_cont("pa=3D0x%0*llx c=3D%d d=3D%d v=3D%d g=3D%d plv=3D%lld]\n", pwidth, pa, c1, (entrylo1 & ENTRYLO_D) ? 1 : 0, diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 6bfd4b8dad1b..0946662afdd6 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table); pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); =20 -#ifdef CONFIG_EXECMEM +#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) static struct execmem_info execmem_info __ro_after_init; =20 struct execmem_info __init *execmem_arch_setup(void) @@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void) =20 return &execmem_info; } -#endif /* CONFIG_EXECMEM */ +#endif /* CONFIG_EXECMEM && MODULES_VADDR */ diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S index 7ad76551d313..7286b804756d 100644 --- a/arch/loongarch/mm/page.S +++ b/arch/loongarch/mm/page.S @@ -10,75 +10,75 @@ =20 .align 5 SYM_FUNC_START(clear_page) - lu12i.w t0, 1 << (PAGE_SHIFT - 12) - add.d t0, t0, a0 + lu12i.w t0, 1 << (PAGE_SHIFT - 12) + PTR_ADD t0, t0, a0 1: - st.d zero, a0, 0 - st.d zero, a0, 8 - st.d zero, a0, 16 - st.d zero, a0, 24 - st.d zero, a0, 32 - st.d zero, a0, 40 - st.d zero, a0, 48 - st.d zero, a0, 56 - addi.d a0, a0, 128 - st.d zero, a0, -64 - st.d zero, a0, -56 - st.d zero, a0, -48 - st.d zero, a0, -40 - st.d zero, a0, -32 - st.d zero, a0, -24 - st.d zero, a0, -16 - st.d zero, a0, -8 - bne t0, a0, 1b + LONG_S zero, a0, (LONGSIZE * 0) + LONG_S zero, a0, (LONGSIZE * 1) + LONG_S zero, a0, (LONGSIZE * 2) + LONG_S zero, a0, (LONGSIZE * 3) + LONG_S zero, a0, (LONGSIZE * 4) + LONG_S zero, a0, (LONGSIZE * 5) + LONG_S zero, a0, (LONGSIZE * 6) + LONG_S zero, a0, (LONGSIZE * 7) + PTR_ADDI a0, a0, (LONGSIZE * 16) + LONG_S zero, a0, -(LONGSIZE * 8) + LONG_S zero, a0, -(LONGSIZE * 7) + LONG_S zero, a0, -(LONGSIZE * 6) + LONG_S zero, a0, -(LONGSIZE * 5) + LONG_S zero, a0, -(LONGSIZE * 4) + LONG_S zero, a0, -(LONGSIZE * 3) + LONG_S zero, a0, -(LONGSIZE * 2) + LONG_S zero, a0, -(LONGSIZE * 1) + bne t0, a0, 1b =20 - jr ra + jr ra SYM_FUNC_END(clear_page) EXPORT_SYMBOL(clear_page) =20 .align 5 SYM_FUNC_START(copy_page) - lu12i.w t8, 1 << (PAGE_SHIFT - 12) - add.d t8, t8, a0 + lu12i.w t8, 1 << (PAGE_SHIFT - 12) + PTR_ADD t8, t8, a0 1: - ld.d t0, a1, 0 - ld.d t1, a1, 8 - ld.d t2, a1, 16 - ld.d t3, a1, 24 - ld.d t4, a1, 32 - ld.d t5, a1, 40 - ld.d t6, a1, 48 - ld.d t7, a1, 56 + LONG_L t0, a1, (LONGSIZE * 0) + LONG_L t1, a1, (LONGSIZE * 1) + LONG_L t2, a1, (LONGSIZE * 2) + LONG_L t3, a1, (LONGSIZE * 3) + LONG_L t4, a1, (LONGSIZE * 4) + LONG_L t5, a1, (LONGSIZE * 5) + LONG_L t6, a1, (LONGSIZE * 6) + LONG_L t7, a1, (LONGSIZE * 7) =20 - st.d t0, a0, 0 - st.d t1, a0, 8 - ld.d t0, a1, 64 - ld.d t1, a1, 72 - st.d t2, a0, 16 - st.d t3, a0, 24 - ld.d t2, a1, 80 - ld.d t3, a1, 88 - st.d t4, a0, 32 - st.d t5, a0, 40 - ld.d t4, a1, 96 - ld.d t5, a1, 104 - st.d t6, a0, 48 - st.d t7, a0, 56 - ld.d t6, a1, 112 - ld.d t7, a1, 120 - addi.d a0, a0, 128 - addi.d a1, a1, 128 + LONG_S t0, a0, (LONGSIZE * 0) + LONG_S t1, a0, (LONGSIZE * 1) + LONG_L t0, a1, (LONGSIZE * 8) + LONG_L t1, a1, (LONGSIZE * 9) + LONG_S t2, a0, (LONGSIZE * 2) + LONG_S t3, a0, (LONGSIZE * 3) + LONG_L t2, a1, (LONGSIZE * 10) + LONG_L t3, a1, (LONGSIZE * 11) + LONG_S t4, a0, (LONGSIZE * 4) + LONG_S t5, a0, (LONGSIZE * 5) + LONG_L t4, a1, (LONGSIZE * 12) + LONG_L t5, a1, (LONGSIZE * 13) + LONG_S t6, a0, (LONGSIZE * 6) + LONG_S t7, a0, (LONGSIZE * 7) + LONG_L t6, a1, (LONGSIZE * 14) + LONG_L t7, a1, (LONGSIZE * 15) + PTR_ADDI a0, a0, (LONGSIZE * 16) + PTR_ADDI a1, a1, (LONGSIZE * 16) =20 - st.d t0, a0, -64 - st.d t1, a0, -56 - st.d t2, a0, -48 - st.d t3, a0, -40 - st.d t4, a0, -32 - st.d t5, a0, -24 - st.d t6, a0, -16 - st.d t7, a0, -8 + LONG_S t0, a0, -(LONGSIZE * 8) + LONG_S t1, a0, -(LONGSIZE * 7) + LONG_S t2, a0, -(LONGSIZE * 6) + LONG_S t3, a0, -(LONGSIZE * 5) + LONG_S t4, a0, -(LONGSIZE * 4) + LONG_S t5, a0, -(LONGSIZE * 3) + LONG_S t6, a0, -(LONGSIZE * 2) + LONG_S t7, a0, -(LONGSIZE * 1) =20 - bne t8, a0, 1b - jr ra + bne t8, a0, 1b + jr ra SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 6e474469e210..6a3c91b9cacd 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -251,8 +251,10 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); +#ifdef CONFIG_64BIT pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); +#endif pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT); pr_debug("\n"); } diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index c08682a89c58..84a881a339a7 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -11,10 +11,18 @@ =20 #define INVTLB_ADDR_GFALSE_AND_ASID 5 =20 -#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG) + +#ifdef CONFIG_32BIT +#define PTE_LL ll.w +#define PTE_SC sc.w +#else +#define PTE_LL ll.d +#define PTE_SC sc.d +#endif =20 .macro tlb_do_page_fault, write SYM_CODE_START(tlb_do_page_fault_\write) @@ -60,52 +68,61 @@ SYM_CODE_START(handle_tlb_load) =20 vmalloc_done_load: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 + #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 + #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 =20 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_load =20 - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 =20 #ifdef CONFIG_SMP smp_pgtable_change_load: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif andi ra, t0, _PAGE_PRESENT beqz ra, nopage_tlb_load =20 ori t0, t0, _PAGE_VALID + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_load #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif + tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -115,30 +132,28 @@ smp_pgtable_change_load: csrrd ra, EXCEPTION_KS2 ertn =20 -#ifdef CONFIG_64BIT vmalloc_load: la_abs t1, swapper_pg_dir b vmalloc_done_load -#endif =20 /* This is the entry point of a huge page. */ tlb_huge_update_load: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif andi t0, ra, _PAGE_PRESENT beqz t0, nopage_tlb_load =20 #ifdef CONFIG_SMP ori t0, ra, _PAGE_VALID - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_load ori t0, ra, _PAGE_VALID #else ori t0, ra, _PAGE_VALID - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -158,27 +173,27 @@ tlb_huge_update_load: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 =20 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 =20 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 =20 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 tlbfill =20 - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 csrrd t0, EXCEPTION_KS0 @@ -216,53 +231,71 @@ SYM_CODE_START(handle_tlb_store) =20 vmalloc_done_store: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 =20 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_store =20 - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 =20 #ifdef CONFIG_SMP smp_pgtable_change_store: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif + +#ifdef CONFIG_64BIT andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE +#else + PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE + and ra, ra, t0 + nor ra, ra, zero +#endif bnez ra, nopage_tlb_store =20 +#ifdef CONFIG_64BIT ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_store #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -272,31 +305,42 @@ smp_pgtable_change_store: csrrd ra, EXCEPTION_KS2 ertn =20 -#ifdef CONFIG_64BIT vmalloc_store: la_abs t1, swapper_pg_dir b vmalloc_done_store -#endif =20 /* This is the entry point of a huge page. */ tlb_huge_update_store: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif + +#ifdef CONFIG_64BIT andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE +#else + PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE + and t0, t0, ra + nor t0, t0, zero +#endif + bnez t0, nopage_tlb_store =20 #ifdef CONFIG_SMP ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_store ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else +#ifdef CONFIG_64BIT ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - st.d t0, t1, 0 +#else + PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -316,28 +360,28 @@ tlb_huge_update_store: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 =20 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 =20 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 =20 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 tlbfill =20 /* Reset default page size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 csrrd t0, EXCEPTION_KS0 @@ -375,52 +419,69 @@ SYM_CODE_START(handle_tlb_modify) =20 vmalloc_done_modify: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 =20 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_modify =20 - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 =20 #ifdef CONFIG_SMP smp_pgtable_change_modify: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif +#ifdef CONFIG_64BIT andi ra, t0, _PAGE_WRITE +#else + PTR_LI ra, _PAGE_WRITE + and ra, t0, ra +#endif + beqz ra, nopage_tlb_modify =20 +#ifdef CONFIG_64BIT ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_modify #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -430,30 +491,40 @@ smp_pgtable_change_modify: csrrd ra, EXCEPTION_KS2 ertn =20 -#ifdef CONFIG_64BIT vmalloc_modify: la_abs t1, swapper_pg_dir b vmalloc_done_modify -#endif =20 /* This is the entry point of a huge page. */ tlb_huge_update_modify: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif + +#ifdef CONFIG_64BIT andi t0, ra, _PAGE_WRITE +#else + PTR_LI t0, _PAGE_WRITE + and t0, ra, t0 +#endif + beqz t0, nopage_tlb_modify =20 #ifdef CONFIG_SMP ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_modify ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else +#ifdef CONFIG_64BIT ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - st.d t0, t1, 0 +#else + PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -473,28 +544,28 @@ tlb_huge_update_modify: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 =20 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 =20 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 =20 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 tlbfill =20 /* Reset default page size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX =20 csrrd t0, EXCEPTION_KS0 @@ -517,6 +588,44 @@ SYM_CODE_START(handle_tlb_modify_ptw) jr t0 SYM_CODE_END(handle_tlb_modify_ptw) =20 +#ifdef CONFIG_32BIT +SYM_CODE_START(handle_tlb_refill) + UNWIND_HINT_UNDEFINED + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + li.w ra, 0x1fffffff + + csrrd t0, LOONGARCH_CSR_PGD + csrrd t1, LOONGARCH_CSR_TLBRBADV + srli.w t1, t1, PGDIR_SHIFT + slli.w t1, t1, 0x2 + add.w t0, t0, t1 + and t0, t0, ra + + ld.w t0, t0, 0 + csrrd t1, LOONGARCH_CSR_TLBRBADV + slli.w t1, t1, (32 - PGDIR_SHIFT) + srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1) + slli.w t1, t1, (0x2 + 1) + add.w t0, t0, t1 + and t0, t0, ra + + ld.w t1, t0, 0x0 + csrwr t1, LOONGARCH_CSR_TLBRELO0 + + ld.w t1, t0, 0x4 + csrwr t1, LOONGARCH_CSR_TLBRELO1 + + tlbfill + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn +SYM_CODE_END(handle_tlb_refill) +#endif + +#ifdef CONFIG_64BIT SYM_CODE_START(handle_tlb_refill) UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_TLBRSAVE @@ -534,3 +643,4 @@ SYM_CODE_START(handle_tlb_refill) csrrd t0, LOONGARCH_CSR_TLBRSAVE ertn SYM_CODE_END(handle_tlb_refill) +#endif --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B453A2C0274; Thu, 27 Nov 2025 15:52:57 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258777; cv=none; b=Sg+Q0J8qzf4lagie4vO48GK6flvp5aDfvfx3N6PxG26/w33bIRv7vwWfXAMz87w4FL4wiIvWkQcJJX1Uhsq5+CGVf7OrBqFtdz0ogz9Z5zskVn1Xz31nMW0e+A9hZuf4Fzzm9ZSyf7/JWq99YrHtED0vKNFNJTNUJCZvEU7J4f0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258777; c=relaxed/simple; bh=H9oagXPUlnj4v8+KtBtzEi/ls2DQKmjaq14rxQeDbXA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=pSipSj2sOrGjfbDzcNdJG92E1sFNvQtTewhu5eriTff1+7t0gO3wAjMsha3oGfuTieKF6PxhtJxYlimqaIs9IgwOE94c2xQy7Kedne+4nP2WQh6zElDbCD9rAbZkQ3ElPVN+7bRNZCLLbv2xR0e0CvEjpq42dVh/jaWwSRvHV6s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5F240C113D0; Thu, 27 Nov 2025 15:52:55 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 06/14] LoongArch: Adjust process management for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:24 +0800 Message-ID: <20251127154832.137925-7-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust memory management for both 32BIT and 64BIT, including: CPU context switching, FPU loading/restoring, process dumping and process tracing routines. Q: Why modify switch.S? A: LoongArch32 has no ldptr.d/stptr.d instructions, and asm offsets of thead_struct members are too large to be filled in the 12b immediate field of ld.w/st.w. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/elf.h | 1 + arch/loongarch/include/asm/inst.h | 12 ++- arch/loongarch/include/uapi/asm/ptrace.h | 10 ++ arch/loongarch/kernel/fpu.S | 111 +++++++++++++++++++++++ arch/loongarch/kernel/process.c | 6 +- arch/loongarch/kernel/ptrace.c | 5 + arch/loongarch/kernel/switch.S | 28 ++++-- 7 files changed, 157 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/= elf.h index f16bd42456e4..1b6489427e30 100644 --- a/arch/loongarch/include/asm/elf.h +++ b/arch/loongarch/include/asm/elf.h @@ -156,6 +156,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; =20 +void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs); void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); =20 #ifdef CONFIG_32BIT diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm= /inst.h index 55e64a12a124..f9f207082d0e 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -438,8 +438,10 @@ static inline bool is_branch_ins(union loongarch_instr= uction *ip) =20 static inline bool is_ra_save_ins(union loongarch_instruction *ip) { - /* st.d $ra, $sp, offset */ - return ip->reg2i12_format.opcode =3D=3D std_op && + const u32 opcode =3D IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op; + + /* st.w / st.d $ra, $sp, offset */ + return ip->reg2i12_format.opcode =3D=3D opcode && ip->reg2i12_format.rj =3D=3D LOONGARCH_GPR_SP && ip->reg2i12_format.rd =3D=3D LOONGARCH_GPR_RA && !is_imm12_negative(ip->reg2i12_format.immediate); @@ -447,8 +449,10 @@ static inline bool is_ra_save_ins(union loongarch_inst= ruction *ip) =20 static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) { - /* addi.d $sp, $sp, -imm */ - return ip->reg2i12_format.opcode =3D=3D addid_op && + const u32 opcode =3D IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op; + + /* addi.w / addi.d $sp, $sp, -imm */ + return ip->reg2i12_format.opcode =3D=3D opcode && ip->reg2i12_format.rj =3D=3D LOONGARCH_GPR_SP && ip->reg2i12_format.rd =3D=3D LOONGARCH_GPR_SP && is_imm12_negative(ip->reg2i12_format.immediate); diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/incl= ude/uapi/asm/ptrace.h index 215e0f9e8aa3..b35c794323bc 100644 --- a/arch/loongarch/include/uapi/asm/ptrace.h +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -61,8 +61,13 @@ struct user_lbt_state { struct user_watch_state { __u64 dbg_info; struct { +#if __BITS_PER_LONG =3D=3D 32 + __u32 addr; + __u32 mask; +#else __u64 addr; __u64 mask; +#endif __u32 ctrl; __u32 pad; } dbg_regs[8]; @@ -71,8 +76,13 @@ struct user_watch_state { struct user_watch_state_v2 { __u64 dbg_info; struct { +#if __BITS_PER_LONG =3D=3D 32 + __u32 addr; + __u32 mask; +#else __u64 addr; __u64 mask; +#endif __u32 ctrl; __u32 pad; } dbg_regs[14]; diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 28caf416ae36..f225dcc5b530 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -96,6 +96,49 @@ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) .endm =20 +#ifdef CONFIG_32BIT + .macro sc_save_fcc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, THREAD_FCC + movcf2gr \tmp0, $fcc4 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc5 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc6 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc7 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, (THREAD_FCC + 4) + .endm + + .macro sc_restore_fcc thread tmp0 tmp1 + EX ld.w \tmp0, \thread, THREAD_FCC + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + EX ld.w \tmp0, \thread, (THREAD_FCC + 4) + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc4, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc5, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc6, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc7, \tmp1 + .endm +#else .macro sc_save_fcc base, tmp0, tmp1 movcf2gr \tmp0, $fcc0 move \tmp1, \tmp0 @@ -135,6 +178,7 @@ bstrpick.d \tmp1, \tmp0, 63, 56 movgr2cf $fcc7, \tmp1 .endm +#endif =20 .macro sc_save_fcsr base, tmp0 movfcsr2gr \tmp0, fcsr0 @@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu) =20 li.w t1, -1 # SNaN =20 +#ifdef CONFIG_32BIT + movgr2fr.w $f0, t1 + movgr2frh.w $f0, t1 + movgr2fr.w $f1, t1 + movgr2frh.w $f1, t1 + movgr2fr.w $f2, t1 + movgr2frh.w $f2, t1 + movgr2fr.w $f3, t1 + movgr2frh.w $f3, t1 + movgr2fr.w $f4, t1 + movgr2frh.w $f4, t1 + movgr2fr.w $f5, t1 + movgr2frh.w $f5, t1 + movgr2fr.w $f6, t1 + movgr2frh.w $f6, t1 + movgr2fr.w $f7, t1 + movgr2frh.w $f7, t1 + movgr2fr.w $f8, t1 + movgr2frh.w $f8, t1 + movgr2fr.w $f9, t1 + movgr2frh.w $f9, t1 + movgr2fr.w $f10, t1 + movgr2frh.w $f10, t1 + movgr2fr.w $f11, t1 + movgr2frh.w $f11, t1 + movgr2fr.w $f12, t1 + movgr2frh.w $f12, t1 + movgr2fr.w $f13, t1 + movgr2frh.w $f13, t1 + movgr2fr.w $f14, t1 + movgr2frh.w $f14, t1 + movgr2fr.w $f15, t1 + movgr2frh.w $f15, t1 + movgr2fr.w $f16, t1 + movgr2frh.w $f16, t1 + movgr2fr.w $f17, t1 + movgr2frh.w $f17, t1 + movgr2fr.w $f18, t1 + movgr2frh.w $f18, t1 + movgr2fr.w $f19, t1 + movgr2frh.w $f19, t1 + movgr2fr.w $f20, t1 + movgr2frh.w $f20, t1 + movgr2fr.w $f21, t1 + movgr2frh.w $f21, t1 + movgr2fr.w $f22, t1 + movgr2frh.w $f22, t1 + movgr2fr.w $f23, t1 + movgr2frh.w $f23, t1 + movgr2fr.w $f24, t1 + movgr2frh.w $f24, t1 + movgr2fr.w $f25, t1 + movgr2frh.w $f25, t1 + movgr2fr.w $f26, t1 + movgr2frh.w $f26, t1 + movgr2fr.w $f27, t1 + movgr2frh.w $f27, t1 + movgr2fr.w $f28, t1 + movgr2frh.w $f28, t1 + movgr2fr.w $f29, t1 + movgr2frh.w $f29, t1 + movgr2fr.w $f30, t1 + movgr2frh.w $f30, t1 + movgr2fr.w $f31, t1 + movgr2frh.w $f31, t1 +#else movgr2fr.d $f0, t1 movgr2fr.d $f1, t1 movgr2fr.d $f2, t1 @@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu) movgr2fr.d $f29, t1 movgr2fr.d $f30, t1 movgr2fr.d $f31, t1 +#endif =20 jr ra SYM_FUNC_END(_init_fpu) diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/proces= s.c index d1e04f9e0f79..4ac1c3086152 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -382,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *m= ask, int exclude_cpu) nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace); } =20 -#ifdef CONFIG_64BIT +#ifdef CONFIG_32BIT +void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs) +#else void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) +#endif { unsigned int i; =20 @@ -400,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_= regs *regs) uregs[LOONGARCH_EF_CSR_ECFG] =3D regs->csr_ecfg; uregs[LOONGARCH_EF_CSR_ESTAT] =3D regs->csr_estat; } -#endif /* CONFIG_64BIT */ diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 8edd0954e55a..bea376e87d4e 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type, struct perf_event_attr attr; =20 /* Kernel-space address cannot be monitored by user-space */ +#ifdef CONFIG_32BIT + if ((unsigned long)addr >=3D KPRANGE1) + return -EINVAL; +#else if ((unsigned long)addr >=3D XKPRANGE) return -EINVAL; +#endif =20 bp =3D ptrace_hbp_get_initialised_bp(note_type, tsk, idx); if (IS_ERR(bp)) diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 3007e909e0d8..f377d8f5c51a 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -16,18 +16,23 @@ */ .align 5 SYM_FUNC_START(__switch_to) - csrrd t1, LOONGARCH_CSR_PRMD - stptr.d t1, a0, THREAD_CSRPRMD +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, TASK_STRUCT_OFFSET + PTR_ADDI a1, a1, TASK_STRUCT_OFFSET +#endif + csrrd t1, LOONGARCH_CSR_PRMD + LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) =20 cpu_save_nonscratch a0 - stptr.d ra, a0, THREAD_REG01 - stptr.d a3, a0, THREAD_SCHED_RA - stptr.d a4, a0, THREAD_SCHED_CFA + LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET) + LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET) + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) - la t7, __stack_chk_guard - ldptr.d t8, a1, TASK_STACK_CANARY - stptr.d t8, t7, 0 + la t7, __stack_chk_guard + LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET) + LONG_SPTR t8, t7, 0 #endif + move tp, a2 cpu_restore_nonscratch a1 =20 @@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to) PTR_ADD t0, t0, tp set_saved_sp t0, t1, t2 =20 - ldptr.d t1, a1, THREAD_CSRPRMD - csrwr t1, LOONGARCH_CSR_PRMD + LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) + csrwr t1, LOONGARCH_CSR_PRMD =20 +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET +#endif jr ra SYM_FUNC_END(__switch_to) --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 16682331220; Thu, 27 Nov 2025 15:53:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258827; cv=none; b=m85gxsexNNqaFdavJF86Ndb92eRK1uN1nO3mXV6iDF2XW2/r89HuZc+hEtkMvWWRPhVU9rcyJwyuPwrNNd+4l5bULLxhT6SqGGFywepQ4IpqlN+2pGYUyBxgGfTvJolq6H4U5txSkTmJBiBMicO+xIizlGmscAMmYDe957X8XVc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258827; c=relaxed/simple; bh=2DyTattMOJ+L/fwbFXGMUTdkSAAP2m/xEF9Ja56dRI8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=jsfuLuhBJSZg/3gZbvxYJVQZvZ0vEGB/FQ9+biVFhSwwcYpVtbNmTaG1UF7L3m0F7I2d/r/givu4VZxXhISyskWcjnFYevVHVduHJqzzlbKXl70u8kCIXx+CcZ0cowDBTfEcuFj3k5D1s8kpgSN1/fqA5Z6VM7DhnbJDYCfpZwM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7BE8DC4CEF8; Thu, 27 Nov 2025 15:53:43 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 07/14] LoongArch: Adjust time routines for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:25 +0800 Message-ID: <20251127154832.137925-8-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust time routines for both 32BIT and 64BIT, including: rdtime_h() / rdtime_l() definitions for 32BIT and rdtime_d() definition for 64BIT, get_cycles() and get_cycles64() definitions for 32BIT/64BIT, show time frequency info ("CPU MHz" and "BogoMIPS") in /proc/cpuinfo, etc. Use do_div() for division which works on both 32BIT and 64BIT platforms. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/loongarch.h | 32 ++++++++++++++++++++++++- arch/loongarch/include/asm/timex.h | 33 +++++++++++++++++++++++++- arch/loongarch/kernel/proc.c | 10 ++++---- arch/loongarch/kernel/syscall.c | 2 +- arch/loongarch/kernel/time.c | 15 ++++++------ arch/loongarch/kvm/vcpu.c | 5 ++-- 6 files changed, 80 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/includ= e/asm/loongarch.h index 804341bd8d2e..19e3f2c183fe 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -1238,7 +1238,35 @@ =20 #ifndef __ASSEMBLER__ =20 -static __always_inline u64 drdtime(void) +#ifdef CONFIG_32BIT + +static __always_inline u32 rdtime_h(void) +{ + u32 val =3D 0; + + __asm__ __volatile__( + "rdtimeh.w %0, $zero\n\t" + : "=3Dr"(val) + : + ); + return val; +} + +static __always_inline u32 rdtime_l(void) +{ + u32 val =3D 0; + + __asm__ __volatile__( + "rdtimel.w %0, $zero\n\t" + : "=3Dr"(val) + : + ); + return val; +} + +#else + +static __always_inline u64 rdtime_d(void) { u64 val =3D 0; =20 @@ -1250,6 +1278,8 @@ static __always_inline u64 drdtime(void) return val; } =20 +#endif + static inline unsigned int get_csr_cpuid(void) { return csr_read32(LOONGARCH_CSR_CPUID); diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/as= m/timex.h index fb41e9e7a222..9ea52fad9690 100644 --- a/arch/loongarch/include/asm/timex.h +++ b/arch/loongarch/include/asm/timex.h @@ -18,7 +18,38 @@ typedef unsigned long cycles_t; =20 static inline cycles_t get_cycles(void) { - return drdtime(); +#ifdef CONFIG_32BIT + return rdtime_l(); +#else + return rdtime_d(); +#endif +} + +#ifdef CONFIG_32BIT + +#define get_cycles_hi get_cycles_hi + +static inline cycles_t get_cycles_hi(void) +{ + return rdtime_h(); +} + +#endif + +static inline u64 get_cycles64(void) +{ +#ifdef CONFIG_32BIT + u32 hi, lo; + + do { + hi =3D rdtime_h(); + lo =3D rdtime_l(); + } while (hi !=3D rdtime_h()); + + return ((u64)hi << 32) | lo; +#else + return rdtime_d(); +#endif } =20 #endif /* __KERNEL__ */ diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 63d2b7e7e844..a8800d20e11b 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned int prid =3D cpu_data[n].processor_id; unsigned int version =3D cpu_data[n].processor_id & 0xff; unsigned int fp_version =3D cpu_data[n].fpu_vers; + u64 freq =3D cpu_clock_freq, bogomips =3D lpj_fine * cpu_clock_freq; =20 #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; #endif + do_div(freq, 10000); + do_div(bogomips, const_clock_freq * (5000/HZ)); =20 /* * For the first processor also print the system type @@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid); seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version); - seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n", - cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100); - seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n", - (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ), - ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100= ); + seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 10= 0); + seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomi= ps % 100); seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n", cpu_pabits + 1, cpu_vabits + 1); diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscal= l.c index 168bd97540f8..ab94eb5ce039 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -75,7 +75,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_re= gs *regs) * * The resulting 6 bits of entropy is seen in SP[9:4]. */ - choose_random_kstack_offset(drdtime()); + choose_random_kstack_offset(get_cycles()); =20 syscall_exit_to_user_mode(regs); } diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 1c31bf3a16ed..5892f6da07a5 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -18,6 +18,7 @@ #include #include #include +#include =20 u64 cpu_clock_freq; EXPORT_SYMBOL(cpu_clock_freq); @@ -62,12 +63,12 @@ static int constant_set_state_oneshot(struct clock_even= t_device *evt) =20 static int constant_set_state_periodic(struct clock_event_device *evt) { - unsigned long period; unsigned long timer_config; + u64 period =3D const_clock_freq; =20 raw_spin_lock(&state_lock); =20 - period =3D const_clock_freq / HZ; + do_div(period, HZ); timer_config =3D period & CSR_TCFG_VAL; timer_config |=3D (CSR_TCFG_PERIOD | CSR_TCFG_EN); csr_write(timer_config, LOONGARCH_CSR_TCFG); @@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu) =20 static unsigned long get_loops_per_jiffy(void) { - unsigned long lpj =3D (unsigned long)const_clock_freq; + u64 lpj =3D const_clock_freq; =20 do_div(lpj, HZ); =20 @@ -131,7 +132,7 @@ static long init_offset; =20 void save_counter(void) { - init_offset =3D drdtime(); + init_offset =3D get_cycles(); } =20 void sync_counter(void) @@ -197,12 +198,12 @@ int constant_clockevent_init(void) =20 static u64 read_const_counter(struct clocksource *clk) { - return drdtime(); + return get_cycles64(); } =20 static noinstr u64 sched_clock_read(void) { - return drdtime(); + return get_cycles64(); } =20 static struct clocksource clocksource_const =3D { @@ -235,7 +236,7 @@ void __init time_init(void) else const_clock_freq =3D calc_const_freq(); =20 - init_offset =3D -(drdtime() - csr_read(LOONGARCH_CSR_CNTC)); + init_offset =3D -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC)); =20 constant_clockevent_init(); constant_clocksource_init(); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 1245a6b35896..803224d297eb 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -9,6 +9,7 @@ #include #include #include +#include =20 #define CREATE_TRACE_POINTS #include "trace.h" @@ -811,7 +812,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_LOONGARCH_KVM: switch (reg->id) { case KVM_REG_LOONGARCH_COUNTER: - *v =3D drdtime() + vcpu->kvm->arch.time_offset; + *v =3D get_cycles() + vcpu->kvm->arch.time_offset; break; case KVM_REG_LOONGARCH_DEBUG_INST: *v =3D INSN_HVCL | KVM_HCALL_SWDBG; @@ -906,7 +907,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, * only set for the first time for smp system */ if (vcpu->vcpu_id =3D=3D 0) - vcpu->kvm->arch.time_offset =3D (signed long)(v - drdtime()); + vcpu->kvm->arch.time_offset =3D (signed long)(v - get_cycles()); break; case KVM_REG_LOONGARCH_VCPU_RESET: vcpu->arch.st.guest_addr =3D 0; --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CA2EA3375C4; Thu, 27 Nov 2025 15:54:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258889; cv=none; b=n38kw715eJOBpUXwIzpzvC02uJyYIUP6G+Tqh061NVymM46WhcDNiFlCzHAL7gUx61hKArnE1eimDJrbGGaQhoQaIAndhFKp9ampa/qA98MMIz6DlVxEn+6RAYKRp3eg7SN+JOlXixqwr/WAXJ88sDTnr7vbPzZ0dVavRy2SCuU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258889; c=relaxed/simple; bh=aQx7HiOi3X1vk14M3py2qMHO/WxzFvSZYrjY6JZIBO4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Wumeh2iEggBulesHuuZXBzoj9YJhwfs7hols+pNwFWV3EZ2Xl/gYmCOb38BSJMsTSAjJitWmMFLs5ZWUsWkopjkpJWisoLXrdg7KkkjcvLIsxIN+b9YwnsfIL3ZSYhsgVzrl3ZFTO3xfESic5GSxsI6zoePY0d9IuI1TCDkZJC4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 85CF8C4CEF8; Thu, 27 Nov 2025 15:54:46 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 08/14] LoongArch: Adjust module loader for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:26 +0800 Message-ID: <20251127154832.137925-9-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust module loader for both 32BIT and 64BIT, including: change the s64 type to long, change the u64 type to unsigned long, change the plt entry definition and handling, etc. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/module.h | 11 ++++ arch/loongarch/include/asm/percpu.h | 2 +- arch/loongarch/kernel/module.c | 80 +++++++++++++++++------------ 3 files changed, 59 insertions(+), 34 deletions(-) diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/a= sm/module.h index f33f3fd32ecc..d56a968273de 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -38,8 +38,10 @@ struct got_entry { =20 struct plt_entry { u32 inst_lu12iw; +#ifdef CONFIG_64BIT u32 inst_lu32id; u32 inst_lu52id; +#endif u32 inst_jirl; }; =20 @@ -57,6 +59,14 @@ static inline struct got_entry emit_got_entry(Elf_Addr v= al) =20 static inline struct plt_entry emit_plt_entry(unsigned long val) { +#ifdef CONFIG_32BIT + u32 lu12iw, jirl; + + lu12iw =3D larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); + jirl =3D larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI)); + + return (struct plt_entry) { lu12iw, jirl }; +#else u32 lu12iw, lu32id, lu52id, jirl; =20 lu12iw =3D larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); @@ -65,6 +75,7 @@ static inline struct plt_entry emit_plt_entry(unsigned lo= ng val) jirl =3D larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI)); =20 return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl }; +#endif } =20 static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/a= sm/percpu.h index 44a8aea2b0e5..583f2466262f 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -13,7 +13,7 @@ * the loading address of main kernel image, but far from where the module= s are * loaded. Tell the compiler this fact when using explicit relocs. */ -#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) +#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) && defined(C= ONFIG_64BIT) # if __has_attribute(model) # define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) # else diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index 36d6d9eeb7c7..27f6d6b2e3ff 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -22,72 +22,76 @@ #include #include =20 -static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_= stack_top) +static int rela_stack_push(long stack_value, long *rela_stack, size_t *rel= a_stack_top) { if (*rela_stack_top >=3D RELA_STACK_DEPTH) return -ENOEXEC; =20 rela_stack[(*rela_stack_top)++] =3D stack_value; - pr_debug("%s stack_value =3D 0x%llx\n", __func__, stack_value); + pr_debug("%s stack_value =3D 0x%lx\n", __func__, stack_value); =20 return 0; } =20 -static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_= stack_top) +static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rel= a_stack_top) { if (*rela_stack_top =3D=3D 0) return -ENOEXEC; =20 *stack_value =3D rela_stack[--(*rela_stack_top)]; - pr_debug("%s stack_value =3D 0x%llx\n", __func__, *stack_value); + pr_debug("%s stack_value =3D 0x%lx\n", __func__, *stack_value); =20 return 0; } =20 static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr = v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return 0; } =20 static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr = v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { pr_err("%s: Unsupport relocation type %u, please add its support.\n", me-= >name, type); return -EINVAL; } =20 static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *location =3D v; return 0; } =20 +#ifdef CONFIG_32BIT +#define apply_r_larch_64 apply_r_larch_error +#else static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *(Elf_Addr *)location =3D v; return 0; } +#endif =20 static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location,= Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { - return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top); + return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stac= k_top); } =20 static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *locati= on, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return rela_stack_push(v, rela_stack, rela_stack_top); } =20 static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, E= lf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err =3D 0; - s64 opr1; + long opr1; =20 err =3D rela_stack_pop(&opr1, rela_stack, rela_stack_top); if (err) @@ -104,7 +108,7 @@ static int apply_r_larch_sop_push_dup(struct module *mo= d, u32 *location, Elf_Add =20 static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset =3D (void *)v - (void *)location; =20 @@ -118,10 +122,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct mo= dule *mod, } =20 static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err =3D 0; - s64 opr1, opr2, opr3; + long opr1, opr2, opr3; =20 if (type =3D=3D R_LARCH_SOP_IF_ELSE) { err =3D rela_stack_pop(&opr3, rela_stack, rela_stack_top); @@ -164,10 +168,10 @@ static int apply_r_larch_sop(struct module *mod, u32 = *location, Elf_Addr v, } =20 static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, = Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err =3D 0; - s64 opr1; + long opr1; union loongarch_instruction *insn =3D (union loongarch_instruction *)loca= tion; =20 err =3D rela_stack_pop(&opr1, rela_stack, rela_stack_top); @@ -244,31 +248,33 @@ static int apply_r_larch_sop_imm_field(struct module = *mod, u32 *location, Elf_Ad } =20 overflow: - pr_err("module %s: opr1 =3D 0x%llx overflow! dangerous %s (%u) relocation= \n", + pr_err("module %s: opr1 =3D 0x%lx overflow! dangerous %s (%u) relocation\= n", mod->name, opr1, __func__, type); return -ENOEXEC; =20 unaligned: - pr_err("module %s: opr1 =3D 0x%llx unaligned! dangerous %s (%u) relocatio= n\n", + pr_err("module %s: opr1 =3D 0x%lx unaligned! dangerous %s (%u) relocation= \n", mod->name, opr1, __func__, type); return -ENOEXEC; } =20 static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Ad= dr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { switch (type) { case R_LARCH_ADD32: *(s32 *)location +=3D v; return 0; - case R_LARCH_ADD64: - *(s64 *)location +=3D v; - return 0; case R_LARCH_SUB32: *(s32 *)location -=3D v; return 0; +#ifdef CONFIG_64BIT + case R_LARCH_ADD64: + *(s64 *)location +=3D v; + return 0; case R_LARCH_SUB64: *(s64 *)location -=3D v; +#endif return 0; default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); @@ -278,7 +284,7 @@ static int apply_r_larch_add_sub(struct module *mod, u3= 2 *location, Elf_Addr v, =20 static int apply_r_larch_b26(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset =3D (void *)v - (void *)location; union loongarch_instruction *insn =3D (union loongarch_instruction *)loca= tion; @@ -311,14 +317,16 @@ static int apply_r_larch_b26(struct module *mod, } =20 static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr= v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { union loongarch_instruction *insn =3D (union loongarch_instruction *)loca= tion; /* Use s32 for a sign-extension deliberately. */ s32 offset_hi20 =3D (void *)((v + 0x800) & ~0xfff) - (void *)((Elf_Addr)location & ~0xfff); +#ifdef CONFIG_64BIT Elf_Addr anchor =3D (((Elf_Addr)location) & ~0xfff) + offset_hi20; ptrdiff_t offset_rem =3D (void *)v - (void *)anchor; +#endif =20 switch (type) { case R_LARCH_PCALA_LO12: @@ -328,6 +336,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 = *location, Elf_Addr v, v =3D offset_hi20 >> 12; insn->reg1i20_format.immediate =3D v & 0xfffff; break; +#ifdef CONFIG_64BIT case R_LARCH_PCALA64_LO20: v =3D offset_rem >> 32; insn->reg1i20_format.immediate =3D v & 0xfffff; @@ -336,6 +345,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 = *location, Elf_Addr v, v =3D offset_rem >> 52; insn->reg2i12_format.immediate =3D v & 0xfff; break; +#endif default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); return -EINVAL; @@ -346,7 +356,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 = *location, Elf_Addr v, =20 static int apply_r_larch_got_pc(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { Elf_Addr got =3D module_emit_got_entry(mod, sechdrs, v); =20 @@ -369,7 +379,7 @@ static int apply_r_larch_got_pc(struct module *mod, } =20 static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_A= ddr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset =3D (void *)v - (void *)location; =20 @@ -377,14 +387,18 @@ static int apply_r_larch_32_pcrel(struct module *mod,= u32 *location, Elf_Addr v, return 0; } =20 +#ifdef CONFIG_32BIT +#define apply_r_larch_64_pcrel apply_r_larch_error +#else static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_A= ddr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset =3D (void *)v - (void *)location; =20 *(u64 *)location =3D offset; return 0; } +#endif =20 /* * reloc_handlers_rela() - Apply a particular relocation to a module @@ -397,7 +411,7 @@ static int apply_r_larch_64_pcrel(struct module *mod, u= 32 *location, Elf_Addr v, * Return: 0 upon success, else -ERRNO */ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_A= ddr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type); + long *rela_stack, size_t *rela_stack_top, unsigned int type); =20 /* The handlers for known reloc types */ static reloc_rela_handler reloc_rela_handlers[] =3D { @@ -425,7 +439,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *s= trtab, { int i, err; unsigned int type; - s64 rela_stack[RELA_STACK_DEPTH]; + long rela_stack[RELA_STACK_DEPTH]; size_t rela_stack_top =3D 0; reloc_rela_handler handler; void *location; @@ -462,9 +476,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *s= trtab, return -EINVAL; } =20 - pr_debug("type %d st_value %llx r_addend %llx loc %llx\n", + pr_debug("type %d st_value %lx r_addend %lx loc %lx\n", (int)ELF_R_TYPE(rel[i].r_info), - sym->st_value, rel[i].r_addend, (u64)location); + (unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (un= signed long)location); =20 v =3D sym->st_value + rel[i].r_addend; switch (type) { --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 39E513376A0; Thu, 27 Nov 2025 15:55:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258940; cv=none; b=QOYM7tVgZdSj6hwc8E2u+MO67WEWq734bLpbRiWMlGZA55kVQhgGrWfd5Cpsr+7o5LgDCIbs3+rMqhXbJ1A0vkq3hR9xZWern9zOxTsfz3wceNA5g+7KeW9N45Z3VZLat8kBsCgu5wh2CKIlemVjCLEUvs/Jfgu6AvEtOPVnuGw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258940; c=relaxed/simple; bh=DwQ2wgcvLxZ3EH9QpbXhwz+CxDuf427+QtHUhK+h5uM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=d5AsF7UVrt7NYWyqh4ObmfXeG/D8GvYqhJs1HhbQHq8VdB8NNJT0KrV0hl/nJBPL6EFy7A0TmjlGa8FyzXsPgD9/yjps9tl+BI0vpn7shQ056slNJgi+N2h7SSk64B6QrtssgLSsGridMfG+HvNfDORtX8781Lw5KfM8xOs/SgU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 71710C113D0; Thu, 27 Nov 2025 15:55:36 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 09/14] LoongArch: Adjust system call for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:27 +0800 Message-ID: <20251127154832.137925-10-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust system call for both 32BIT and 64BIT, including: add the uapi unistd_{32,64}.h and syscall_table_{32,64}.h inclusion, add sys_mmap2() definition, change the system call entry routines, etc. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/Kbuild | 1 + arch/loongarch/include/uapi/asm/Kbuild | 1 + arch/loongarch/include/uapi/asm/unistd.h | 6 ++++++ arch/loongarch/kernel/Makefile.syscalls | 1 + arch/loongarch/kernel/entry.S | 22 +++++++++++----------- arch/loongarch/kernel/syscall.c | 13 +++++++++++++ 6 files changed, 33 insertions(+), 11 deletions(-) diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm= /Kbuild index b04d2cef935f..9034b583a88a 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y +=3D syscall_table_32.h syscall-y +=3D syscall_table_64.h generated-y +=3D orc_hash.h =20 diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/includ= e/uapi/asm/Kbuild index 517761419999..89ac01faa5ae 100644 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ b/arch/loongarch/include/uapi/asm/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y +=3D unistd_32.h syscall-y +=3D unistd_64.h diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/incl= ude/uapi/asm/unistd.h index 1f01980f9c94..e19c7f2f9f87 100644 --- a/arch/loongarch/include/uapi/asm/unistd.h +++ b/arch/loongarch/include/uapi/asm/unistd.h @@ -1,3 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ =20 +#include + +#if __BITS_PER_LONG =3D=3D 32 +#include +#else #include +#endif diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kerne= l/Makefile.syscalls index ab7d9baa2915..cd46c2b69c7f 100644 --- a/arch/loongarch/kernel/Makefile.syscalls +++ b/arch/loongarch/kernel/Makefile.syscalls @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 =20 # No special ABIs on loongarch so far +syscall_abis_32 +=3D syscall_abis_64 +=3D diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 47e1db9a1ce4..b53d333a7c42 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall) UNWIND_HINT_UNDEFINED csrrd t0, PERCPU_BASE_KS la.pcrel t1, kernelsp - add.d t1, t1, t0 + PTR_ADD t1, t1, t0 move t2, sp - ld.d sp, t1, 0 + PTR_L sp, t1, 0 =20 - addi.d sp, sp, -PT_SIZE + PTR_ADDI sp, sp, -PT_SIZE cfi_st t2, PT_R3 cfi_rel_offset sp, PT_R3 - st.d zero, sp, PT_R0 + LONG_S zero, sp, PT_R0 csrrd t2, LOONGARCH_CSR_PRMD - st.d t2, sp, PT_PRMD + LONG_S t2, sp, PT_PRMD csrrd t2, LOONGARCH_CSR_CRMD - st.d t2, sp, PT_CRMD + LONG_S t2, sp, PT_CRMD csrrd t2, LOONGARCH_CSR_EUEN - st.d t2, sp, PT_EUEN + LONG_S t2, sp, PT_EUEN csrrd t2, LOONGARCH_CSR_ECFG - st.d t2, sp, PT_ECFG + LONG_S t2, sp, PT_ECFG csrrd t2, LOONGARCH_CSR_ESTAT - st.d t2, sp, PT_ESTAT + LONG_S t2, sp, PT_ESTAT cfi_st ra, PT_R1 cfi_st a0, PT_R4 cfi_st a1, PT_R5 @@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall) cfi_st a6, PT_R10 cfi_st a7, PT_R11 csrrd ra, LOONGARCH_CSR_ERA - st.d ra, sp, PT_ERA + LONG_S ra, sp, PT_ERA cfi_rel_offset ra, PT_ERA =20 cfi_st tp, PT_R2 @@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall) #endif =20 move u0, t0 - li.d tp, ~_THREAD_MASK + LONG_LI tp, ~_THREAD_MASK and tp, tp, sp =20 move a0, sp diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscal= l.c index ab94eb5ce039..1249d82c1cd0 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long= , len, unsigned long, return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } =20 +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned l= ong, + prot, unsigned long, flags, unsigned long, fd, unsigned long, offset) +{ + if (offset & (~PAGE_MASK >> 12)) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT = - 12)); +} + void *sys_call_table[__NR_syscalls] =3D { [0 ... __NR_syscalls - 1] =3D sys_ni_syscall, +#ifdef CONFIG_32BIT +#include +#else #include +#endif }; =20 typedef long (*sys_call_fn)(unsigned long, unsigned long, --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BE73F231858; Thu, 27 Nov 2025 15:56:06 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258966; cv=none; b=boYVmoC9pHvNz8vn1t7kVstijRmKJGJeSEiYVe9hy/gXuLnpVwjbU+G0PpL/oT5cWm4Jqdonp7Y40jjzPwkp7wpWxcGsOQ41Q1rIBFyzAcgcgx3/EIOhf2Fr0Jz3MqdFF1iXdw47TcZq3bkAqMEfgEuXNpL2McMkFcWT3hmiiqA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764258966; c=relaxed/simple; bh=o9qF1skZzzh/Vu0bNKiK4qbazRnAbN1q6TucGfD/FsA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=I9BJ64hnzPsjYCl6QKnkVbatuRDgLGcGILYoq1RQwWJrtM0NfkrYVQA4W8MhLPCqdVG4yqmkJzC/t4TRbV6EjWUuHTK0TG9hH8gt8H59LzLQA9pZa5hTHDJhC1Xhc6mKytWxsOiiwX1QWGMZDuWq5UXI6kxafsdBdyiY2+dDkx8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1C7CAC4CEF8; Thu, 27 Nov 2025 15:56:03 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 10/14] LoongArch: Adjust user accessors for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:28 +0800 Message-ID: <20251127154832.137925-11-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust user accessors for both 32BIT and 64BIT, including: get_user(), put_user(), copy_user(), clear_user(), etc. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/uaccess.h | 63 ++++++++++++++++++++++++++-- arch/loongarch/lib/clear_user.S | 22 ++++++---- arch/loongarch/lib/copy_user.S | 28 ++++++++----- 3 files changed, 91 insertions(+), 22 deletions(-) diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/= asm/uaccess.h index 0d22991ae430..4e259d490e45 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -19,10 +19,16 @@ #include #include =20 +#define __LSW 0 +#define __MSW 1 + extern u64 __ua_limit; =20 -#define __UA_ADDR ".dword" +#ifdef CONFIG_64BIT #define __UA_LIMIT __ua_limit +#else +#define __UA_LIMIT 0x80000000UL +#endif =20 /* * get_user: - Get a simple variable from user space. @@ -126,6 +132,7 @@ extern u64 __ua_limit; * * Returns zero on success, or -EFAULT on error. */ + #define __put_user(x, ptr) \ ({ \ int __pu_err =3D 0; \ @@ -146,7 +153,7 @@ do { \ case 1: __get_data_asm(val, "ld.b", ptr); break; \ case 2: __get_data_asm(val, "ld.h", ptr); break; \ case 4: __get_data_asm(val, "ld.w", ptr); break; \ - case 8: __get_data_asm(val, "ld.d", ptr); break; \ + case 8: __get_data_asm_8(val, ptr); break; \ default: BUILD_BUG(); break; \ } \ } while (0) @@ -167,13 +174,39 @@ do { \ (val) =3D (__typeof__(*(ptr))) __gu_tmp; \ } =20 +#ifdef CONFIG_64BIT +#define __get_data_asm_8(val, ptr) \ + __get_data_asm(val, "ld.d", ptr) +#else /* !CONFIG_64BIT */ +#define __get_data_asm_8(val, ptr) \ +{ \ + u32 __lo, __hi; \ + u32 __user *__ptr =3D (u32 __user *)(ptr); \ + \ + __asm__ __volatile__ ( \ + "1:\n" \ + " ld.w %1, %3 \n" \ + "2:\n" \ + " ld.w %2, %4 \n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ + : "+r" (__gu_err), "=3D&r" (__lo), "=3Dr" (__hi) \ + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ + if (__gu_err) \ + __hi =3D 0; \ + (val) =3D (__typeof__(val))((__typeof__((val)-(val))) \ + ((((u64)__hi << 32) | __lo))); \ +} +#endif /* CONFIG_64BIT */ + #define __put_user_common(ptr, size) \ do { \ switch (size) { \ case 1: __put_data_asm("st.b", ptr); break; \ case 2: __put_data_asm("st.h", ptr); break; \ case 4: __put_data_asm("st.w", ptr); break; \ - case 8: __put_data_asm("st.d", ptr); break; \ + case 8: __put_data_asm_8(ptr); break; \ default: BUILD_BUG(); break; \ } \ } while (0) @@ -190,6 +223,30 @@ do { \ : "Jr" (__pu_val)); \ } =20 +#ifdef CONFIG_64BIT +#define __put_data_asm_8(ptr) \ + __put_data_asm("st.d", ptr) +#else /* !CONFIG_64BIT */ +#define __put_data_asm_8(ptr) \ +{ \ + u32 __user *__ptr =3D (u32 __user *)(ptr); \ + u64 __x =3D (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \ + \ + __asm__ __volatile__ ( \ + "1:\n" \ + " st.w %z3, %1 \n" \ + "2:\n" \ + " st.w %z4, %2 \n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ + : "+r" (__pu_err), \ + "=3Dm" (__ptr[__LSW]), \ + "=3Dm" (__ptr[__MSW]) \ + : "rJ" (__x), "rJ" (__x >> 32)); \ +} +#endif /* CONFIG_64BIT */ + #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ int __gu_err =3D 0; \ diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_use= r.S index 7a0db643b286..58c667dde882 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -13,11 +13,15 @@ #include =20 SYM_FUNC_START(__clear_user) +#ifdef CONFIG_32BIT + b __clear_user_generic +#else /* * Some CPUs support hardware unaligned access */ ALTERNATIVE "b __clear_user_generic", \ "b __clear_user_fast", CPU_FEATURE_UAL +#endif SYM_FUNC_END(__clear_user) =20 EXPORT_SYMBOL(__clear_user) @@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user) * a1: size */ SYM_FUNC_START(__clear_user_generic) - beqz a1, 2f + beqz a1, 2f =20 -1: st.b zero, a0, 0 - addi.d a0, a0, 1 - addi.d a1, a1, -1 - bgtz a1, 1b +1: st.b zero, a0, 0 + PTR_ADDI a0, a0, 1 + PTR_ADDI a1, a1, -1 + bgtz a1, 1b =20 -2: move a0, a1 - jr ra +2: move a0, a1 + jr ra =20 - _asm_extable 1b, 2b + _asm_extable 1b, 2b SYM_FUNC_END(__clear_user_generic) =20 +#ifdef CONFIG_64BIT /* * unsigned long __clear_user_fast(void *addr, unsigned long size) * @@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast) SYM_FUNC_END(__clear_user_fast) =20 STACK_FRAME_NON_STANDARD __clear_user_fast +#endif diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index 095ce9181c6c..c7264b779f6e 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -13,11 +13,15 @@ #include =20 SYM_FUNC_START(__copy_user) +#ifdef CONFIG_32BIT + b __copy_user_generic +#else /* * Some CPUs support hardware unaligned access */ ALTERNATIVE "b __copy_user_generic", \ "b __copy_user_fast", CPU_FEATURE_UAL +#endif SYM_FUNC_END(__copy_user) =20 EXPORT_SYMBOL(__copy_user) @@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user) * a2: n */ SYM_FUNC_START(__copy_user_generic) - beqz a2, 3f + beqz a2, 3f =20 -1: ld.b t0, a1, 0 -2: st.b t0, a0, 0 - addi.d a0, a0, 1 - addi.d a1, a1, 1 - addi.d a2, a2, -1 - bgtz a2, 1b +1: ld.b t0, a1, 0 +2: st.b t0, a0, 0 + PTR_ADDI a0, a0, 1 + PTR_ADDI a1, a1, 1 + PTR_ADDI a2, a2, -1 + bgtz a2, 1b =20 -3: move a0, a2 - jr ra +3: move a0, a2 + jr ra =20 - _asm_extable 1b, 3b - _asm_extable 2b, 3b + _asm_extable 1b, 3b + _asm_extable 2b, 3b SYM_FUNC_END(__copy_user_generic) =20 +#ifdef CONFIG_64BIT /* * unsigned long __copy_user_fast(void *to, const void *from, unsigned lon= g n) * @@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast) SYM_FUNC_END(__copy_user_fast) =20 STACK_FRAME_NON_STANDARD __copy_user_fast +#endif --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 85AC02C11CD; Thu, 27 Nov 2025 15:56:54 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259014; cv=none; b=EfkDwlA6BRAK1Ir4TH7k8aBkD62F9sq5J0qnGU5AYSj89TAVwNywYXgQPXEpDdfX3r7Sg8FKZ5BkvoT2BHYqpAhq1PMT9G+uXLFPGchXSUgf3ii5PczDtYH/RiDJu5u8S/piZS7iJdr1yIL46GmMB9W43gE4H6SuInvYpaf8NQA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259014; c=relaxed/simple; bh=ZWdljuUwL9qh/uCfFS2wv5ubKmxATrRkFda//e3Gx8M=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=B4Syx4T8ceR401fMzHag6m+qFctt7KNeJs6LB+agzy4G18J5TRE76s9dafeeAU66p69YuuZuoZHipXLNR2JQJyA9wQsUoV0D9y2Z/ONazAbBa13DPGnsarDtPxOAX5T7cIiRt2Cg5NZ2b0y2DP1fe0huaMxhVLKy6wR+AdglowA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6E041C4CEF8; Thu, 27 Nov 2025 15:56:51 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 11/14] LoongArch: Adjust misc routines for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:29 +0800 Message-ID: <20251127154832.137925-12-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust misc routines for both 32BIT and 64BIT, including: checksum, jump label, unaligned access emulator, sleep/wakeup routines, etc. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/checksum.h | 4 ++ arch/loongarch/include/asm/jump_label.h | 12 ++++- arch/loongarch/include/asm/string.h | 2 + arch/loongarch/kernel/unaligned.c | 30 ++++++++--- arch/loongarch/lib/unaligned.S | 72 ++++++++++++------------- arch/loongarch/power/suspend_asm.S | 72 ++++++++++++------------- 6 files changed, 112 insertions(+), 80 deletions(-) diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include= /asm/checksum.h index cabbf6af44c4..cc2754e0aa25 100644 --- a/arch/loongarch/include/asm/checksum.h +++ b/arch/loongarch/include/asm/checksum.h @@ -9,6 +9,8 @@ #include #include =20 +#ifdef CONFIG_64BIT + #define _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, @@ -61,6 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsig= ned int ihl) extern unsigned int do_csum(const unsigned char *buff, int len); #define do_csum do_csum =20 +#endif + #include =20 #endif /* __ASM_CHECKSUM_H */ diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/inclu= de/asm/jump_label.h index 4000c7603d8e..dcaecf69ea5a 100644 --- a/arch/loongarch/include/asm/jump_label.h +++ b/arch/loongarch/include/asm/jump_label.h @@ -10,15 +10,23 @@ #ifndef __ASSEMBLER__ =20 #include +#include +#include =20 #define JUMP_LABEL_NOP_SIZE 4 =20 +#ifdef CONFIG_32BIT +#define JUMP_LABEL_TYPE ".long " +#else +#define JUMP_LABEL_TYPE ".quad " +#endif + /* This macro is also expanded on the Rust side. */ #define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ - ".align 3 \n\t" \ + ".align " __stringify(PTRLOG) " \n\t" \ ".long 1b - ., " label " - . \n\t" \ - ".quad " key " - . \n\t" \ + JUMP_LABEL_TYPE key " - . \n\t" \ ".popsection \n\t" =20 #define ARCH_STATIC_BRANCH_ASM(key, label) \ diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/a= sm/string.h index 5bb5a90d2681..bfa3fd879c7f 100644 --- a/arch/loongarch/include/asm/string.h +++ b/arch/loongarch/include/asm/string.h @@ -5,6 +5,7 @@ #ifndef _ASM_STRING_H #define _ASM_STRING_H =20 +#ifdef CONFIG_64BIT #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); extern void *__memset(void *__s, int __c, size_t __count); @@ -16,6 +17,7 @@ extern void *__memcpy(void *__to, __const__ void *__from,= size_t __n); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); +#endif =20 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) =20 diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unal= igned.c index 487be604b96a..cc929c9fe7e9 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -27,12 +27,21 @@ static u32 unaligned_instructions_user; static u32 unaligned_instructions_kernel; #endif =20 -static inline unsigned long read_fpr(unsigned int idx) +static inline u64 read_fpr(unsigned int idx) { +#ifdef CONFIG_64BIT #define READ_FPR(idx, __value) \ __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=3Dr"(__value)); - - unsigned long __value; +#else +#define READ_FPR(idx, __value) \ +{ \ + u32 __value_lo, __value_hi; \ + __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=3Dr"(__value_lo))= ; \ + __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=3Dr"(__value_hi))= ; \ + __value =3D (__value_lo | ((u64)__value_hi << 32)); \ +} +#endif + u64 __value; =20 switch (idx) { case 0: @@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx) return __value; } =20 -static inline void write_fpr(unsigned int idx, unsigned long value) +static inline void write_fpr(unsigned int idx, u64 value) { +#ifdef CONFIG_64BIT #define WRITE_FPR(idx, value) \ __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value)); - +#else +#define WRITE_FPR(idx, value) \ +{ \ + u32 value_lo =3D value; \ + u32 value_hi =3D value >> 32; \ + __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \ + __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \ +} +#endif switch (idx) { case 0: WRITE_FPR(0, value); @@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void= __user *addr, unsigned i bool sign, write; bool user =3D user_mode(regs); unsigned int res, size =3D 0; - unsigned long value =3D 0; + u64 value =3D 0; union loongarch_instruction insn; =20 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S index 185f82d85810..470c0bfa3463 100644 --- a/arch/loongarch/lib/unaligned.S +++ b/arch/loongarch/lib/unaligned.S @@ -24,35 +24,35 @@ * a3: sign */ SYM_FUNC_START(unaligned_read) - beqz a2, 5f + beqz a2, 5f =20 - li.w t2, 0 - addi.d t0, a2, -1 - slli.d t1, t0, 3 - add.d a0, a0, t0 + li.w t2, 0 + LONG_ADDI t0, a2, -1 + PTR_SLLI t1, t0, LONGLOG + PTR_ADD a0, a0, t0 =20 - beqz a3, 2f -1: ld.b t3, a0, 0 - b 3f + beqz a3, 2f +1: ld.b t3, a0, 0 + b 3f =20 -2: ld.bu t3, a0, 0 -3: sll.d t3, t3, t1 - or t2, t2, t3 - addi.d t1, t1, -8 - addi.d a0, a0, -1 - addi.d a2, a2, -1 - bgtz a2, 2b -4: st.d t2, a1, 0 +2: ld.bu t3, a0, 0 +3: LONG_SLLV t3, t3, t1 + or t2, t2, t3 + LONG_ADDI t1, t1, -8 + PTR_ADDI a0, a0, -1 + PTR_ADDI a2, a2, -1 + bgtz a2, 2b +4: LONG_S t2, a1, 0 =20 - move a0, a2 - jr ra + move a0, a2 + jr ra =20 -5: li.w a0, -EFAULT - jr ra +5: li.w a0, -EFAULT + jr ra =20 - _asm_extable 1b, .L_fixup_handle_unaligned - _asm_extable 2b, .L_fixup_handle_unaligned - _asm_extable 4b, .L_fixup_handle_unaligned + _asm_extable 1b, .L_fixup_handle_unaligned + _asm_extable 2b, .L_fixup_handle_unaligned + _asm_extable 4b, .L_fixup_handle_unaligned SYM_FUNC_END(unaligned_read) =20 /* @@ -63,21 +63,21 @@ SYM_FUNC_END(unaligned_read) * a2: n */ SYM_FUNC_START(unaligned_write) - beqz a2, 3f + beqz a2, 3f =20 - li.w t0, 0 -1: srl.d t1, a1, t0 -2: st.b t1, a0, 0 - addi.d t0, t0, 8 - addi.d a2, a2, -1 - addi.d a0, a0, 1 - bgtz a2, 1b + li.w t0, 0 +1: LONG_SRLV t1, a1, t0 +2: st.b t1, a0, 0 + LONG_ADDI t0, t0, 8 + PTR_ADDI a2, a2, -1 + PTR_ADDI a0, a0, 1 + bgtz a2, 1b =20 - move a0, a2 - jr ra + move a0, a2 + jr ra =20 -3: li.w a0, -EFAULT - jr ra +3: li.w a0, -EFAULT + jr ra =20 - _asm_extable 2b, .L_fixup_handle_unaligned + _asm_extable 2b, .L_fixup_handle_unaligned SYM_FUNC_END(unaligned_write) diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/susp= end_asm.S index df0865df26fa..c8119ad5fb2c 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -14,41 +14,41 @@ =20 /* preparatory stuff */ .macro SETUP_SLEEP - addi.d sp, sp, -PT_SIZE - st.d $r1, sp, PT_R1 - st.d $r2, sp, PT_R2 - st.d $r3, sp, PT_R3 - st.d $r4, sp, PT_R4 - st.d $r21, sp, PT_R21 - st.d $r22, sp, PT_R22 - st.d $r23, sp, PT_R23 - st.d $r24, sp, PT_R24 - st.d $r25, sp, PT_R25 - st.d $r26, sp, PT_R26 - st.d $r27, sp, PT_R27 - st.d $r28, sp, PT_R28 - st.d $r29, sp, PT_R29 - st.d $r30, sp, PT_R30 - st.d $r31, sp, PT_R31 + PTR_ADDI sp, sp, -PT_SIZE + REG_S $r1, sp, PT_R1 + REG_S $r2, sp, PT_R2 + REG_S $r3, sp, PT_R3 + REG_S $r4, sp, PT_R4 + REG_S $r21, sp, PT_R21 + REG_S $r22, sp, PT_R22 + REG_S $r23, sp, PT_R23 + REG_S $r24, sp, PT_R24 + REG_S $r25, sp, PT_R25 + REG_S $r26, sp, PT_R26 + REG_S $r27, sp, PT_R27 + REG_S $r28, sp, PT_R28 + REG_S $r29, sp, PT_R29 + REG_S $r30, sp, PT_R30 + REG_S $r31, sp, PT_R31 .endm =20 .macro SETUP_WAKEUP - ld.d $r1, sp, PT_R1 - ld.d $r2, sp, PT_R2 - ld.d $r3, sp, PT_R3 - ld.d $r4, sp, PT_R4 - ld.d $r21, sp, PT_R21 - ld.d $r22, sp, PT_R22 - ld.d $r23, sp, PT_R23 - ld.d $r24, sp, PT_R24 - ld.d $r25, sp, PT_R25 - ld.d $r26, sp, PT_R26 - ld.d $r27, sp, PT_R27 - ld.d $r28, sp, PT_R28 - ld.d $r29, sp, PT_R29 - ld.d $r30, sp, PT_R30 - ld.d $r31, sp, PT_R31 - addi.d sp, sp, PT_SIZE + REG_L $r1, sp, PT_R1 + REG_L $r2, sp, PT_R2 + REG_L $r3, sp, PT_R3 + REG_L $r4, sp, PT_R4 + REG_L $r21, sp, PT_R21 + REG_L $r22, sp, PT_R22 + REG_L $r23, sp, PT_R23 + REG_L $r24, sp, PT_R24 + REG_L $r25, sp, PT_R25 + REG_L $r26, sp, PT_R26 + REG_L $r27, sp, PT_R27 + REG_L $r28, sp, PT_R28 + REG_L $r29, sp, PT_R29 + REG_L $r30, sp, PT_R30 + REG_L $r31, sp, PT_R31 + PTR_ADDI sp, sp, PT_SIZE .endm =20 .text @@ -59,15 +59,15 @@ SYM_FUNC_START(loongarch_suspend_enter) SETUP_SLEEP =20 la.pcrel t0, acpi_saved_sp - st.d sp, t0, 0 + REG_S sp, t0, 0 =20 bl __flush_cache_all =20 /* Pass RA and SP to BIOS */ - addi.d a1, sp, 0 + PTR_ADDI a1, sp, 0 la.pcrel a0, loongarch_wakeup_start la.pcrel t0, loongarch_suspend_addr - ld.d t0, t0, 0 + REG_L t0, t0, 0 jirl ra, t0, 0 /* Call BIOS's STR sleep routine */ =20 /* @@ -83,7 +83,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) csrwr t0, LOONGARCH_CSR_CRMD =20 la.pcrel t0, acpi_saved_sp - ld.d sp, t0, 0 + REG_L sp, t0, 0 =20 SETUP_WAKEUP jr ra --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9FD9E3375AE; Thu, 27 Nov 2025 15:57:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259043; cv=none; b=dZv2iUbnxlotOROT6U1nxe0TT1dWhv1HgaumIXVNWBsoiDRCbwGRXqqtu+L42lwV2OWZVB5nujWW9Ny/3PNBfizNX+Y3k81jB2JgLggg7eEFMTa0QQfOZTJ2fLtT+96maI8Q/7GN6u5TjJvCvfNYtl0Lh1iFKFc58R4o5dy771E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259043; c=relaxed/simple; bh=7o9qPwjb/FCkf7nuG8/kVz3m9lUqo+mDFIEK2BWZtFk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=MVH1Ofply9+8dQTkeU4r7u1qaUWSjxMfsAuskSJ9f0r+GUCGZ/pUugs/5/NE5Khe1xxlbScdHd0S6zNt1buymIZx6X7mF6yK62kEGYBEvva9yF2HviFT3/zH3FSG9Z3ergljRa0jHrvOMifCYO7Oe5HLa48qudVgOfhOWOhocis= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9CF54C113D0; Thu, 27 Nov 2025 15:57:20 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 12/14] LoongArch: Adjust VDSO/VSYSCALL for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:30 +0800 Message-ID: <20251127154832.137925-13-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust VDSO/VSYSCALL because read_cpu_id() for 32BIT/64BIT are different, and LoongArch32 doesn't support GENERIC_GETTIMEOFDAY now (will be supported in future). Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/vdso/gettimeofday.h | 4 ++++ arch/loongarch/kernel/time.c | 2 ++ arch/loongarch/vdso/Makefile | 7 ++++++- arch/loongarch/vdso/vdso.lds.S | 4 ++-- arch/loongarch/vdso/vgetcpu.c | 8 ++++++++ 5 files changed, 22 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarc= h/include/asm/vdso/gettimeofday.h index dcafabca9bb6..bae76767c693 100644 --- a/arch/loongarch/include/asm/vdso/gettimeofday.h +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -12,6 +12,8 @@ #include #include =20 +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + #define VDSO_HAS_CLOCK_GETRES 1 =20 static __always_inline long gettimeofday_fallback( @@ -89,6 +91,8 @@ static inline bool loongarch_vdso_hres_capable(void) } #define __arch_vdso_hres_capable loongarch_vdso_hres_capable =20 +#endif /* CONFIG_GENERIC_GETTIMEOFDAY */ + #endif /* !__ASSEMBLER__ */ =20 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 5892f6da07a5..dbaaabcaf6f0 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -212,7 +212,9 @@ static struct clocksource clocksource_const =3D { .read =3D read_const_counter, .mask =3D CLOCKSOURCE_MASK(64), .flags =3D CLOCK_SOURCE_IS_CONTINUOUS, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY .vdso_clock_mode =3D VDSO_CLOCKMODE_CPU, +#endif }; =20 int __init constant_clocksource_init(void) diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index d8316f993482..a8ac0e811e39 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -4,8 +4,9 @@ # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile.include =20 -obj-vdso-y :=3D elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \ +obj-vdso-y :=3D elf.o vgetcpu.o vgetrandom.o \ vgetrandom-chacha.o sigreturn.o +obj-vdso-$(CONFIG_GENERIC_GETTIMEOFDAY) +=3D vgettimeofday.o =20 # Common compiler flags between ABIs. ccflags-vdso :=3D \ @@ -16,6 +17,10 @@ ccflags-vdso :=3D \ $(CLANG_FLAGS) \ -D__VDSO__ =20 +ifdef CONFIG_32BIT +ccflags-vdso +=3D -DBUILD_VDSO32 +endif + cflags-vdso :=3D $(ccflags-vdso) \ -isystem $(shell $(CC) -print-file-name=3Dinclude) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S index 8ff986499947..ac537e02beb1 100644 --- a/arch/loongarch/vdso/vdso.lds.S +++ b/arch/loongarch/vdso/vdso.lds.S @@ -7,8 +7,6 @@ #include #include =20 -OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch") - OUTPUT_ARCH(loongarch) =20 SECTIONS @@ -63,9 +61,11 @@ VERSION LINUX_5.10 { global: __vdso_getcpu; +#ifdef CONFIG_GENERIC_GETTIMEOFDAY __vdso_clock_getres; __vdso_clock_gettime; __vdso_gettimeofday; +#endif __vdso_getrandom; __vdso_rt_sigreturn; local: *; diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c index 5301cd9d0f83..73af49242ecd 100644 --- a/arch/loongarch/vdso/vgetcpu.c +++ b/arch/loongarch/vdso/vgetcpu.c @@ -10,11 +10,19 @@ static __always_inline int read_cpu_id(void) { int cpu_id; =20 +#ifdef CONFIG_64BIT __asm__ __volatile__( " rdtime.d $zero, %0\n" : "=3Dr" (cpu_id) : : "memory"); +#else + __asm__ __volatile__( + " rdtimel.w $zero, %0\n" + : "=3Dr" (cpu_id) + : + : "memory"); +#endif =20 return cpu_id; } --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E9972337681; Thu, 27 Nov 2025 15:57:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259066; cv=none; b=LTHPTx9DiH8jJHMOzo2TKVxUXaH6AD3A7xGrVqPGQANz6Trttn3S1DXZZS6S9IUyglPFU7j+GlmcJ9DMS8V6jfIXmHGw6qcsb5krY3g7IFR33fofbxvjPWfxcvz4b9nQ0/rrSIM3RfhaXJL4uOzW97rAyH0SLsqSSUleDLBFDLY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259066; c=relaxed/simple; bh=acE6e9H/coyZSOCB28zYbzquvvzPBhjA7YxZzxN3mrA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Nc+ERii4/b4brSvgIZqpFMmBsxv984+Y+tmhyZWqpSHrqrb+w3aLstY2xiRe8nG/HVrs2cwEP9lrU5pFCSa/RCtmOVR0U+7h8ZdN/UMfEjFiXK5cTT/8AGUfvQ76yt2DKFIzFFOwffJccfCrP99Aojle6cFHkEsFES9GQr6AP2A= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id 46208C116C6; Thu, 27 Nov 2025 15:57:42 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 13/14] LoongArch: Adjust default config files for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:31 +0800 Message-ID: <20251127154832.137925-14-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Add loongson32_defconfig (for 32BIT) and rename loongson3_defconfig to loongson64_defconfig (for 64BIT). Also adjust graphics drivers, such as FB_EFI is replaced with EFIDRM. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/Makefile | 7 +- arch/loongarch/configs/loongson32_defconfig | 1104 +++++++++++++++++ ...ongson3_defconfig =3D> loongson64_defconfig} | 6 +- 3 files changed, 1113 insertions(+), 4 deletions(-) create mode 100644 arch/loongarch/configs/loongson32_defconfig rename arch/loongarch/configs/{loongson3_defconfig =3D> loongson64_defconf= ig} (99%) diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 96ca1a688984..8d45b860fe56 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -5,7 +5,12 @@ =20 boot :=3D arch/loongarch/boot =20 -KBUILD_DEFCONFIG :=3D loongson3_defconfig +ifeq ($(shell uname -m),loongarch32) +KBUILD_DEFCONFIG :=3D loongson32_defconfig +else +KBUILD_DEFCONFIG :=3D loongson64_defconfig +endif + KBUILD_DTBS :=3D dtbs =20 image-name-y :=3D vmlinux diff --git a/arch/loongarch/configs/loongson32_defconfig b/arch/loongarch/c= onfigs/loongson32_defconfig new file mode 100644 index 000000000000..af6ae7e5ccc4 --- /dev/null +++ b/arch/loongarch/configs/loongson32_defconfig @@ -0,0 +1,1104 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_ZSTD=3Dy +CONFIG_SYSVIPC=3Dy +CONFIG_POSIX_MQUEUE=3Dy +CONFIG_NO_HZ=3Dy +CONFIG_HIGH_RES_TIMERS=3Dy +CONFIG_BPF_SYSCALL=3Dy +CONFIG_PREEMPT=3Dy +CONFIG_PREEMPT_DYNAMIC=3Dy +CONFIG_BSD_PROCESS_ACCT=3Dy +CONFIG_BSD_PROCESS_ACCT_V3=3Dy +CONFIG_TASKSTATS=3Dy +CONFIG_TASK_DELAY_ACCT=3Dy +CONFIG_TASK_XACCT=3Dy +CONFIG_TASK_IO_ACCOUNTING=3Dy +CONFIG_PSI=3Dy +CONFIG_IKCONFIG=3Dy +CONFIG_IKCONFIG_PROC=3Dy +CONFIG_IKHEADERS=3Dy +CONFIG_LOG_BUF_SHIFT=3D18 +CONFIG_MEMCG=3Dy +CONFIG_BLK_CGROUP=3Dy +CONFIG_CFS_BANDWIDTH=3Dy +CONFIG_CGROUP_PIDS=3Dy +CONFIG_CGROUP_RDMA=3Dy +CONFIG_CGROUP_DMEM=3Dy +CONFIG_CGROUP_FREEZER=3Dy +CONFIG_CPUSETS=3Dy +CONFIG_CGROUP_DEVICE=3Dy +CONFIG_CGROUP_CPUACCT=3Dy +CONFIG_CGROUP_PERF=3Dy +CONFIG_CGROUP_BPF=3Dy +CONFIG_CGROUP_MISC=3Dy +CONFIG_NAMESPACES=3Dy +CONFIG_USER_NS=3Dy +CONFIG_CHECKPOINT_RESTORE=3Dy +CONFIG_SCHED_AUTOGROUP=3Dy +CONFIG_RELAY=3Dy +CONFIG_BLK_DEV_INITRD=3Dy +CONFIG_EXPERT=3Dy +CONFIG_KALLSYMS_ALL=3Dy +CONFIG_PERF_EVENTS=3Dy +CONFIG_KEXEC=3Dy +CONFIG_LOONGARCH=3Dy +CONFIG_32BIT=3Dy +CONFIG_32BIT_STANDARD=3Dy +CONFIG_MACH_LOONGSON32=3Dy +CONFIG_PAGE_SIZE_16KB=3Dy +CONFIG_HZ_250=3Dy +CONFIG_DMI=3Dy +CONFIG_EFI=3Dy +CONFIG_SUSPEND=3Dy +CONFIG_HIBERNATION=3Dy +CONFIG_ACPI=3Dy +CONFIG_ACPI_SPCR_TABLE=3Dy +CONFIG_ACPI_TAD=3Dy +CONFIG_ACPI_DOCK=3Dy +CONFIG_ACPI_IPMI=3Dm +CONFIG_ACPI_HOTPLUG_CPU=3Dy +CONFIG_ACPI_PCI_SLOT=3Dy +CONFIG_ACPI_HOTPLUG_MEMORY=3Dy +CONFIG_ACPI_BGRT=3Dy +CONFIG_CPU_FREQ=3Dy +CONFIG_CPU_FREQ_GOV_POWERSAVE=3Dy +CONFIG_CPU_FREQ_GOV_USERSPACE=3Dy +CONFIG_CPU_FREQ_GOV_ONDEMAND=3Dy +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=3Dy +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=3Dy +CONFIG_VIRTUALIZATION=3Dy +CONFIG_JUMP_LABEL=3Dy +CONFIG_MODULES=3Dy +CONFIG_MODULE_FORCE_LOAD=3Dy +CONFIG_MODULE_UNLOAD=3Dy +CONFIG_MODULE_FORCE_UNLOAD=3Dy +CONFIG_MODVERSIONS=3Dy +CONFIG_MODULE_COMPRESS=3Dy +CONFIG_MODULE_COMPRESS_ZSTD=3Dy +CONFIG_MODULE_DECOMPRESS=3Dy +CONFIG_BLK_DEV_ZONED=3Dy +CONFIG_BLK_DEV_THROTTLING=3Dy +CONFIG_BLK_WBT=3Dy +CONFIG_BLK_CGROUP_IOLATENCY=3Dy +CONFIG_BLK_CGROUP_FC_APPID=3Dy +CONFIG_BLK_CGROUP_IOCOST=3Dy +CONFIG_BLK_CGROUP_IOPRIO=3Dy +CONFIG_BLK_INLINE_ENCRYPTION=3Dy +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=3Dy +CONFIG_PARTITION_ADVANCED=3Dy +CONFIG_BSD_DISKLABEL=3Dy +CONFIG_UNIXWARE_DISKLABEL=3Dy +CONFIG_CMDLINE_PARTITION=3Dy +CONFIG_IOSCHED_BFQ=3Dy +CONFIG_BFQ_GROUP_IOSCHED=3Dy +CONFIG_BINFMT_MISC=3Dm +CONFIG_ZSWAP=3Dy +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=3Dy +CONFIG_ZSMALLOC=3Dy +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=3Dy +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set +CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=3Dy +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set +CONFIG_MEMORY_HOTREMOVE=3Dy +CONFIG_KSM=3Dy +CONFIG_CMA=3Dy +CONFIG_CMA_SYSFS=3Dy +CONFIG_USERFAULTFD=3Dy +CONFIG_NET=3Dy +CONFIG_PACKET=3Dy +CONFIG_UNIX=3Dy +CONFIG_TLS=3Dm +CONFIG_TLS_DEVICE=3Dy +CONFIG_XFRM_USER=3Dy +CONFIG_NET_KEY=3Dy +CONFIG_XDP_SOCKETS=3Dy +CONFIG_INET=3Dy +CONFIG_IP_MULTICAST=3Dy +CONFIG_IP_ADVANCED_ROUTER=3Dy +CONFIG_IP_MULTIPLE_TABLES=3Dy +CONFIG_IP_ROUTE_MULTIPATH=3Dy +CONFIG_IP_ROUTE_VERBOSE=3Dy +CONFIG_IP_PNP=3Dy +CONFIG_IP_PNP_DHCP=3Dy +CONFIG_IP_PNP_BOOTP=3Dy +CONFIG_IP_PNP_RARP=3Dy +CONFIG_NET_IPIP=3Dm +CONFIG_NET_IPGRE_DEMUX=3Dm +CONFIG_NET_IPGRE=3Dm +CONFIG_NET_IPGRE_BROADCAST=3Dy +CONFIG_IP_MROUTE=3Dy +CONFIG_IP_MROUTE_MULTIPLE_TABLES=3Dy +CONFIG_IP_PIMSM_V1=3Dy +CONFIG_IP_PIMSM_V2=3Dy +CONFIG_INET_AH=3Dm +CONFIG_INET_ESP=3Dm +CONFIG_INET_ESP_OFFLOAD=3Dm +CONFIG_INET_ESPINTCP=3Dy +CONFIG_INET_IPCOMP=3Dm +CONFIG_INET_UDP_DIAG=3Dy +CONFIG_TCP_CONG_ADVANCED=3Dy +CONFIG_TCP_CONG_BIC=3Dy +CONFIG_TCP_CONG_HSTCP=3Dm +CONFIG_TCP_CONG_HYBLA=3Dm +CONFIG_TCP_CONG_VEGAS=3Dm +CONFIG_TCP_CONG_NV=3Dm +CONFIG_TCP_CONG_SCALABLE=3Dm +CONFIG_TCP_CONG_VENO=3Dm +CONFIG_TCP_CONG_DCTCP=3Dm +CONFIG_TCP_CONG_CDG=3Dm +CONFIG_TCP_CONG_BBR=3Dy +CONFIG_IPV6_ROUTER_PREF=3Dy +CONFIG_IPV6_ROUTE_INFO=3Dy +CONFIG_INET6_AH=3Dm +CONFIG_INET6_ESP=3Dm +CONFIG_INET6_ESP_OFFLOAD=3Dm +CONFIG_INET6_ESPINTCP=3Dy +CONFIG_INET6_IPCOMP=3Dm +CONFIG_IPV6_MULTIPLE_TABLES=3Dy +CONFIG_IPV6_MROUTE=3Dy +CONFIG_MPTCP=3Dy +CONFIG_NETWORK_PHY_TIMESTAMPING=3Dy +CONFIG_NETFILTER=3Dy +CONFIG_BRIDGE_NETFILTER=3Dm +CONFIG_NETFILTER_NETLINK_LOG=3Dm +CONFIG_NF_CONNTRACK=3Dm +CONFIG_NF_CONNTRACK_AMANDA=3Dm +CONFIG_NF_CONNTRACK_FTP=3Dm +CONFIG_NF_CONNTRACK_NETBIOS_NS=3Dm +CONFIG_NF_CONNTRACK_SNMP=3Dm +CONFIG_NF_CONNTRACK_PPTP=3Dm +CONFIG_NF_CONNTRACK_TFTP=3Dm +CONFIG_NF_CT_NETLINK=3Dm +CONFIG_NF_TABLES=3Dm +CONFIG_NF_TABLES_INET=3Dy +CONFIG_NFT_CT=3Dm +CONFIG_NFT_CONNLIMIT=3Dm +CONFIG_NFT_LOG=3Dm +CONFIG_NFT_LIMIT=3Dm +CONFIG_NFT_MASQ=3Dm +CONFIG_NFT_REDIR=3Dm +CONFIG_NFT_NAT=3Dm +CONFIG_NFT_TUNNEL=3Dm +CONFIG_NFT_QUEUE=3Dm +CONFIG_NFT_QUOTA=3Dm +CONFIG_NFT_REJECT=3Dm +CONFIG_NFT_COMPAT=3Dm +CONFIG_NFT_HASH=3Dm +CONFIG_NFT_FIB_INET=3Dm +CONFIG_NFT_SOCKET=3Dm +CONFIG_NFT_OSF=3Dm +CONFIG_NFT_TPROXY=3Dm +CONFIG_NETFILTER_XT_SET=3Dm +CONFIG_NETFILTER_XT_TARGET_AUDIT=3Dm +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=3Dm +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=3Dm +CONFIG_NETFILTER_XT_TARGET_CONNMARK=3Dm +CONFIG_NETFILTER_XT_TARGET_CT=3Dm +CONFIG_NETFILTER_XT_TARGET_DSCP=3Dm +CONFIG_NETFILTER_XT_TARGET_HMARK=3Dm +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=3Dm +CONFIG_NETFILTER_XT_TARGET_LED=3Dm +CONFIG_NETFILTER_XT_TARGET_LOG=3Dm +CONFIG_NETFILTER_XT_TARGET_MARK=3Dm +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=3Dm +CONFIG_NETFILTER_XT_TARGET_TRACE=3Dm +CONFIG_NETFILTER_XT_TARGET_SECMARK=3Dm +CONFIG_NETFILTER_XT_TARGET_TCPMSS=3Dm +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=3Dm +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=3Dm +CONFIG_NETFILTER_XT_MATCH_BPF=3Dm +CONFIG_NETFILTER_XT_MATCH_CGROUP=3Dm +CONFIG_NETFILTER_XT_MATCH_CLUSTER=3Dm +CONFIG_NETFILTER_XT_MATCH_COMMENT=3Dm +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=3Dm +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=3Dm +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=3Dm +CONFIG_NETFILTER_XT_MATCH_CONNMARK=3Dm +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=3Dm +CONFIG_NETFILTER_XT_MATCH_CPU=3Dm +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=3Dm +CONFIG_NETFILTER_XT_MATCH_DSCP=3Dm +CONFIG_NETFILTER_XT_MATCH_ESP=3Dm +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=3Dm +CONFIG_NETFILTER_XT_MATCH_HELPER=3Dm +CONFIG_NETFILTER_XT_MATCH_IPCOMP=3Dm +CONFIG_NETFILTER_XT_MATCH_IPRANGE=3Dm +CONFIG_NETFILTER_XT_MATCH_IPVS=3Dm +CONFIG_NETFILTER_XT_MATCH_LENGTH=3Dm +CONFIG_NETFILTER_XT_MATCH_LIMIT=3Dm +CONFIG_NETFILTER_XT_MATCH_MAC=3Dm +CONFIG_NETFILTER_XT_MATCH_MARK=3Dm +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=3Dm +CONFIG_NETFILTER_XT_MATCH_NFACCT=3Dm +CONFIG_NETFILTER_XT_MATCH_OSF=3Dm +CONFIG_NETFILTER_XT_MATCH_OWNER=3Dm +CONFIG_NETFILTER_XT_MATCH_POLICY=3Dm +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=3Dm +CONFIG_NETFILTER_XT_MATCH_QUOTA=3Dm +CONFIG_NETFILTER_XT_MATCH_RATEEST=3Dm +CONFIG_NETFILTER_XT_MATCH_REALM=3Dm +CONFIG_NETFILTER_XT_MATCH_SOCKET=3Dm +CONFIG_NETFILTER_XT_MATCH_STATE=3Dm +CONFIG_NETFILTER_XT_MATCH_STATISTIC=3Dm +CONFIG_NETFILTER_XT_MATCH_STRING=3Dm +CONFIG_NETFILTER_XT_MATCH_TCPMSS=3Dm +CONFIG_NETFILTER_XT_MATCH_TIME=3Dm +CONFIG_NETFILTER_XT_MATCH_U32=3Dm +CONFIG_IP_SET=3Dm +CONFIG_IP_VS=3Dm +CONFIG_IP_VS_IPV6=3Dy +CONFIG_IP_VS_PROTO_TCP=3Dy +CONFIG_IP_VS_PROTO_UDP=3Dy +CONFIG_IP_VS_PROTO_ESP=3Dy +CONFIG_IP_VS_PROTO_AH=3Dy +CONFIG_IP_VS_PROTO_SCTP=3Dy +CONFIG_IP_VS_RR=3Dm +CONFIG_IP_VS_WRR=3Dm +CONFIG_IP_VS_NFCT=3Dy +CONFIG_NF_TABLES_IPV4=3Dy +CONFIG_NFT_DUP_IPV4=3Dm +CONFIG_NFT_FIB_IPV4=3Dm +CONFIG_NF_TABLES_ARP=3Dy +CONFIG_IP_NF_IPTABLES=3Dm +CONFIG_IP_NF_MATCH_AH=3Dm +CONFIG_IP_NF_MATCH_ECN=3Dm +CONFIG_IP_NF_MATCH_RPFILTER=3Dm +CONFIG_IP_NF_MATCH_TTL=3Dm +CONFIG_IP_NF_FILTER=3Dm +CONFIG_IP_NF_TARGET_REJECT=3Dm +CONFIG_IP_NF_TARGET_SYNPROXY=3Dm +CONFIG_IP_NF_NAT=3Dm +CONFIG_IP_NF_TARGET_MASQUERADE=3Dm +CONFIG_IP_NF_TARGET_NETMAP=3Dm +CONFIG_IP_NF_TARGET_REDIRECT=3Dm +CONFIG_IP_NF_MANGLE=3Dm +CONFIG_IP_NF_TARGET_ECN=3Dm +CONFIG_IP_NF_TARGET_TTL=3Dm +CONFIG_IP_NF_RAW=3Dm +CONFIG_IP_NF_SECURITY=3Dm +CONFIG_IP_NF_ARPTABLES=3Dm +CONFIG_IP_NF_ARPFILTER=3Dm +CONFIG_IP_NF_ARP_MANGLE=3Dm +CONFIG_NF_TABLES_IPV6=3Dy +CONFIG_NFT_FIB_IPV6=3Dm +CONFIG_IP6_NF_IPTABLES=3Dy +CONFIG_IP6_NF_MATCH_AH=3Dm +CONFIG_IP6_NF_MATCH_EUI64=3Dm +CONFIG_IP6_NF_MATCH_FRAG=3Dm +CONFIG_IP6_NF_MATCH_OPTS=3Dm +CONFIG_IP6_NF_MATCH_IPV6HEADER=3Dm +CONFIG_IP6_NF_MATCH_MH=3Dm +CONFIG_IP6_NF_MATCH_RPFILTER=3Dm +CONFIG_IP6_NF_MATCH_RT=3Dm +CONFIG_IP6_NF_MATCH_SRH=3Dm +CONFIG_IP6_NF_FILTER=3Dy +CONFIG_IP6_NF_TARGET_REJECT=3Dm +CONFIG_IP6_NF_TARGET_SYNPROXY=3Dm +CONFIG_IP6_NF_MANGLE=3Dm +CONFIG_IP6_NF_RAW=3Dm +CONFIG_IP6_NF_SECURITY=3Dm +CONFIG_IP6_NF_NAT=3Dm +CONFIG_IP6_NF_TARGET_MASQUERADE=3Dm +CONFIG_IP6_NF_TARGET_NPT=3Dm +CONFIG_NF_TABLES_BRIDGE=3Dm +CONFIG_NF_CONNTRACK_BRIDGE=3Dm +CONFIG_BRIDGE_NF_EBTABLES=3Dm +CONFIG_BRIDGE_EBT_BROUTE=3Dm +CONFIG_BRIDGE_EBT_T_FILTER=3Dm +CONFIG_BRIDGE_EBT_T_NAT=3Dm +CONFIG_BRIDGE_EBT_ARP=3Dm +CONFIG_BRIDGE_EBT_IP=3Dm +CONFIG_BRIDGE_EBT_IP6=3Dm +CONFIG_IP_SCTP=3Dm +CONFIG_RDS=3Dy +CONFIG_L2TP=3Dm +CONFIG_L2TP_V3=3Dy +CONFIG_L2TP_IP=3Dm +CONFIG_L2TP_ETH=3Dm +CONFIG_BRIDGE=3Dm +CONFIG_VLAN_8021Q=3Dm +CONFIG_VLAN_8021Q_GVRP=3Dy +CONFIG_VLAN_8021Q_MVRP=3Dy +CONFIG_LLC2=3Dm +CONFIG_NET_SCHED=3Dy +CONFIG_NET_SCH_HTB=3Dm +CONFIG_NET_SCH_PRIO=3Dm +CONFIG_NET_SCH_MULTIQ=3Dm +CONFIG_NET_SCH_RED=3Dm +CONFIG_NET_SCH_SFB=3Dm +CONFIG_NET_SCH_SFQ=3Dm +CONFIG_NET_SCH_TBF=3Dm +CONFIG_NET_SCH_CBS=3Dm +CONFIG_NET_SCH_GRED=3Dm +CONFIG_NET_SCH_NETEM=3Dm +CONFIG_NET_SCH_MQPRIO=3Dm +CONFIG_NET_SCH_SKBPRIO=3Dm +CONFIG_NET_SCH_QFQ=3Dm +CONFIG_NET_SCH_CODEL=3Dm +CONFIG_NET_SCH_FQ_CODEL=3Dm +CONFIG_NET_SCH_CAKE=3Dm +CONFIG_NET_SCH_FQ=3Dm +CONFIG_NET_SCH_PIE=3Dm +CONFIG_NET_SCH_FQ_PIE=3Dm +CONFIG_NET_SCH_INGRESS=3Dm +CONFIG_NET_SCH_DEFAULT=3Dy +CONFIG_NET_CLS_BASIC=3Dm +CONFIG_NET_CLS_FW=3Dm +CONFIG_NET_CLS_U32=3Dm +CONFIG_NET_CLS_FLOW=3Dm +CONFIG_NET_CLS_CGROUP=3Dm +CONFIG_NET_CLS_BPF=3Dm +CONFIG_NET_CLS_FLOWER=3Dm +CONFIG_NET_CLS_MATCHALL=3Dm +CONFIG_NET_CLS_ACT=3Dy +CONFIG_NET_ACT_POLICE=3Dm +CONFIG_NET_ACT_GACT=3Dm +CONFIG_NET_ACT_MIRRED=3Dm +CONFIG_NET_ACT_NAT=3Dm +CONFIG_NET_ACT_BPF=3Dm +CONFIG_OPENVSWITCH=3Dm +CONFIG_VSOCKETS=3Dm +CONFIG_VIRTIO_VSOCKETS=3Dm +CONFIG_NETLINK_DIAG=3Dy +CONFIG_CGROUP_NET_PRIO=3Dy +CONFIG_BPF_STREAM_PARSER=3Dy +CONFIG_BT=3Dm +CONFIG_BT_RFCOMM=3Dm +CONFIG_BT_RFCOMM_TTY=3Dy +CONFIG_BT_BNEP=3Dm +CONFIG_BT_BNEP_MC_FILTER=3Dy +CONFIG_BT_BNEP_PROTO_FILTER=3Dy +CONFIG_BT_HIDP=3Dm +CONFIG_BT_HS=3Dy +CONFIG_BT_HCIBTUSB=3Dm +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=3Dy +CONFIG_BT_HCIBTUSB_MTK=3Dy +CONFIG_BT_HCIUART=3Dm +CONFIG_BT_HCIUART_BCSP=3Dy +CONFIG_BT_HCIUART_ATH3K=3Dy +CONFIG_BT_HCIUART_INTEL=3Dy +CONFIG_BT_HCIUART_AG6XX=3Dy +CONFIG_BT_HCIBCM203X=3Dm +CONFIG_BT_HCIBPA10X=3Dm +CONFIG_BT_HCIBFUSB=3Dm +CONFIG_BT_HCIDTL1=3Dm +CONFIG_BT_HCIBT3C=3Dm +CONFIG_BT_HCIBLUECARD=3Dm +CONFIG_BT_HCIVHCI=3Dm +CONFIG_BT_MRVL=3Dm +CONFIG_BT_ATH3K=3Dm +CONFIG_BT_VIRTIO=3Dm +CONFIG_CFG80211=3Dm +CONFIG_CFG80211_WEXT=3Dy +CONFIG_MAC80211=3Dm +CONFIG_RFKILL=3Dm +CONFIG_RFKILL_INPUT=3Dy +CONFIG_NET_9P=3Dy +CONFIG_NET_9P_VIRTIO=3Dy +CONFIG_CEPH_LIB=3Dm +CONFIG_PCIEPORTBUS=3Dy +CONFIG_HOTPLUG_PCI_PCIE=3Dy +CONFIG_PCIEAER=3Dy +# CONFIG_PCIEASPM is not set +CONFIG_PCI_IOV=3Dy +CONFIG_HOTPLUG_PCI=3Dy +CONFIG_HOTPLUG_PCI_SHPC=3Dy +CONFIG_PCI_HOST_GENERIC=3Dy +CONFIG_PCCARD=3Dm +CONFIG_YENTA=3Dm +CONFIG_RAPIDIO=3Dy +CONFIG_RAPIDIO_TSI721=3Dy +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=3Dy +CONFIG_RAPIDIO_ENUM_BASIC=3Dm +CONFIG_RAPIDIO_CHMAN=3Dm +CONFIG_RAPIDIO_MPORT_CDEV=3Dm +CONFIG_UEVENT_HELPER=3Dy +CONFIG_DEVTMPFS=3Dy +CONFIG_DEVTMPFS_MOUNT=3Dy +CONFIG_FW_LOADER_COMPRESS=3Dy +CONFIG_FW_LOADER_COMPRESS_ZSTD=3Dy +CONFIG_EFI_ZBOOT=3Dy +CONFIG_EFI_BOOTLOADER_CONTROL=3Dm +CONFIG_EFI_CAPSULE_LOADER=3Dm +CONFIG_EFI_TEST=3Dm +CONFIG_MTD=3Dm +CONFIG_MTD_BLOCK=3Dm +CONFIG_MTD_CFI=3Dm +CONFIG_MTD_JEDECPROBE=3Dm +CONFIG_MTD_CFI_INTELEXT=3Dm +CONFIG_MTD_CFI_AMDSTD=3Dm +CONFIG_MTD_CFI_STAA=3Dm +CONFIG_MTD_RAM=3Dm +CONFIG_MTD_ROM=3Dm +CONFIG_MTD_RAW_NAND=3Dm +CONFIG_MTD_NAND_PLATFORM=3Dm +CONFIG_MTD_NAND_LOONGSON=3Dm +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=3Dy +CONFIG_MTD_NAND_ECC_SW_BCH=3Dy +CONFIG_MTD_UBI=3Dm +CONFIG_MTD_UBI_BLOCK=3Dy +CONFIG_PARPORT=3Dy +CONFIG_PARPORT_PC=3Dy +CONFIG_PARPORT_SERIAL=3Dy +CONFIG_PARPORT_PC_FIFO=3Dy +CONFIG_ZRAM=3Dm +CONFIG_ZRAM_BACKEND_LZ4=3Dy +CONFIG_ZRAM_BACKEND_LZ4HC=3Dy +CONFIG_ZRAM_BACKEND_ZSTD=3Dy +CONFIG_ZRAM_BACKEND_DEFLATE=3Dy +CONFIG_ZRAM_BACKEND_842=3Dy +CONFIG_ZRAM_BACKEND_LZO=3Dy +CONFIG_ZRAM_DEF_COMP_ZSTD=3Dy +CONFIG_ZRAM_WRITEBACK=3Dy +CONFIG_ZRAM_MEMORY_TRACKING=3Dy +CONFIG_ZRAM_MULTI_COMP=3Dy +CONFIG_BLK_DEV_LOOP=3Dy +CONFIG_BLK_DEV_DRBD=3Dm +CONFIG_BLK_DEV_NBD=3Dm +CONFIG_BLK_DEV_RAM=3Dy +CONFIG_BLK_DEV_RAM_SIZE=3D8192 +CONFIG_VIRTIO_BLK=3Dy +CONFIG_BLK_DEV_RBD=3Dm +CONFIG_BLK_DEV_NVME=3Dy +CONFIG_NVME_MULTIPATH=3Dy +CONFIG_NVME_RDMA=3Dm +CONFIG_NVME_FC=3Dm +CONFIG_NVME_TCP=3Dm +CONFIG_NVME_TARGET=3Dm +CONFIG_NVME_TARGET_PASSTHRU=3Dy +CONFIG_NVME_TARGET_LOOP=3Dm +CONFIG_NVME_TARGET_RDMA=3Dm +CONFIG_NVME_TARGET_FC=3Dm +CONFIG_NVME_TARGET_TCP=3Dm +CONFIG_EEPROM_AT24=3Dm +CONFIG_PVPANIC=3Dy +CONFIG_PVPANIC_MMIO=3Dm +CONFIG_PVPANIC_PCI=3Dm +CONFIG_BLK_DEV_SD=3Dy +CONFIG_BLK_DEV_SR=3Dy +CONFIG_CHR_DEV_SG=3Dy +CONFIG_CHR_DEV_SCH=3Dm +CONFIG_SCSI_CONSTANTS=3Dy +CONFIG_SCSI_LOGGING=3Dy +CONFIG_SCSI_SPI_ATTRS=3Dm +CONFIG_SCSI_FC_ATTRS=3Dm +CONFIG_SCSI_SAS_ATA=3Dy +CONFIG_ISCSI_TCP=3Dm +CONFIG_SCSI_MVSAS=3Dy +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=3Dy +CONFIG_SCSI_MVUMI=3Dy +CONFIG_MEGARAID_NEWGEN=3Dy +CONFIG_MEGARAID_MM=3Dy +CONFIG_MEGARAID_MAILBOX=3Dy +CONFIG_MEGARAID_LEGACY=3Dy +CONFIG_MEGARAID_SAS=3Dy +CONFIG_SCSI_MPT2SAS=3Dy +CONFIG_LIBFC=3Dm +CONFIG_LIBFCOE=3Dm +CONFIG_FCOE=3Dm +CONFIG_SCSI_QLOGIC_1280=3Dm +CONFIG_SCSI_QLA_FC=3Dm +CONFIG_TCM_QLA2XXX=3Dm +CONFIG_SCSI_QLA_ISCSI=3Dm +CONFIG_SCSI_LPFC=3Dm +CONFIG_SCSI_VIRTIO=3Dm +CONFIG_ATA=3Dy +CONFIG_SATA_AHCI=3Dy +CONFIG_SATA_AHCI_PLATFORM=3Dy +CONFIG_AHCI_DWC=3Dy +CONFIG_PATA_ATIIXP=3Dy +CONFIG_PATA_PCMCIA=3Dm +CONFIG_MD=3Dy +CONFIG_BLK_DEV_MD=3Dm +CONFIG_MD_LLBITMAP=3Dy +CONFIG_MD_RAID0=3Dm +CONFIG_MD_RAID1=3Dm +CONFIG_MD_RAID10=3Dm +CONFIG_MD_RAID456=3Dm +CONFIG_BCACHE=3Dm +CONFIG_BLK_DEV_DM=3Dy +CONFIG_DM_CRYPT=3Dm +CONFIG_DM_SNAPSHOT=3Dm +CONFIG_DM_THIN_PROVISIONING=3Dm +CONFIG_DM_CACHE=3Dm +CONFIG_DM_WRITECACHE=3Dm +CONFIG_DM_MIRROR=3Dm +CONFIG_DM_RAID=3Dm +CONFIG_DM_ZERO=3Dm +CONFIG_DM_MULTIPATH=3Dm +CONFIG_DM_MULTIPATH_QL=3Dm +CONFIG_DM_MULTIPATH_ST=3Dm +CONFIG_DM_MULTIPATH_HST=3Dm +CONFIG_DM_MULTIPATH_IOA=3Dm +CONFIG_DM_INIT=3Dy +CONFIG_DM_UEVENT=3Dy +CONFIG_DM_VERITY=3Dm +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=3Dy +CONFIG_DM_VERITY_FEC=3Dy +CONFIG_DM_INTEGRITY=3Dm +CONFIG_DM_ZONED=3Dm +CONFIG_DM_VDO=3Dm +CONFIG_TARGET_CORE=3Dm +CONFIG_TCM_IBLOCK=3Dm +CONFIG_TCM_FILEIO=3Dm +CONFIG_TCM_PSCSI=3Dm +CONFIG_TCM_USER2=3Dm +CONFIG_LOOPBACK_TARGET=3Dm +CONFIG_ISCSI_TARGET=3Dm +CONFIG_NETDEVICES=3Dy +CONFIG_BONDING=3Dm +CONFIG_DUMMY=3Dy +CONFIG_WIREGUARD=3Dm +CONFIG_IFB=3Dm +CONFIG_NET_TEAM=3Dm +CONFIG_NET_TEAM_MODE_BROADCAST=3Dm +CONFIG_NET_TEAM_MODE_ROUNDROBIN=3Dm +CONFIG_NET_TEAM_MODE_RANDOM=3Dm +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=3Dm +CONFIG_NET_TEAM_MODE_LOADBALANCE=3Dm +CONFIG_MACVLAN=3Dm +CONFIG_MACVTAP=3Dm +CONFIG_IPVLAN=3Dm +CONFIG_VXLAN=3Dy +CONFIG_RIONET=3Dm +CONFIG_TUN=3Dm +CONFIG_VETH=3Dm +CONFIG_VIRTIO_NET=3Dm +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=3Dy +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=3Dm +CONFIG_CHELSIO_T1_1G=3Dy +CONFIG_CHELSIO_T3=3Dm +CONFIG_CHELSIO_T4=3Dm +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=3Dy +CONFIG_E1000E=3Dy +CONFIG_IGB=3Dy +CONFIG_IXGBE=3Dy +CONFIG_I40E=3Dy +CONFIG_ICE=3Dy +CONFIG_FM10K=3Dy +CONFIG_IGC=3Dy +CONFIG_IDPF=3Dy +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=3Dm +CONFIG_8139TOO=3Dm +CONFIG_R8169=3Dy +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=3Dy +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=3Dy +CONFIG_TXGBE=3Dy +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_MOTORCOMM_PHY=3Dy +CONFIG_PPP=3Dm +CONFIG_PPP_BSDCOMP=3Dm +CONFIG_PPP_DEFLATE=3Dm +CONFIG_PPP_FILTER=3Dy +CONFIG_PPP_MPPE=3Dm +CONFIG_PPP_MULTILINK=3Dy +CONFIG_PPPOE=3Dm +CONFIG_PPTP=3Dm +CONFIG_PPPOL2TP=3Dm +CONFIG_PPP_ASYNC=3Dm +CONFIG_PPP_SYNC_TTY=3Dm +CONFIG_USB_RTL8150=3Dm +CONFIG_USB_RTL8152=3Dm +CONFIG_USB_USBNET=3Dm +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=3Dm +CONFIG_USB_NET_HUAWEI_CDC_NCM=3Dm +CONFIG_USB_NET_CDC_MBIM=3Dm +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_RNDIS_HOST=3Dm +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_ATH9K=3Dm +CONFIG_ATH9K_HTC=3Dm +CONFIG_IWLWIFI=3Dm +CONFIG_IWLDVM=3Dm +CONFIG_IWLMVM=3Dm +CONFIG_MT7601U=3Dm +CONFIG_RT2X00=3Dm +CONFIG_RT2800USB=3Dm +CONFIG_RTL8180=3Dm +CONFIG_RTL8187=3Dm +CONFIG_RTL8192CE=3Dm +CONFIG_RTL8192SE=3Dm +CONFIG_RTL8192DE=3Dm +CONFIG_RTL8723AE=3Dm +CONFIG_RTL8723BE=3Dm +CONFIG_RTL8188EE=3Dm +CONFIG_RTL8192EE=3Dm +CONFIG_RTL8821AE=3Dm +CONFIG_RTL8192CU=3Dm +CONFIG_RTL8192DU=3Dm +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=3Dm +CONFIG_RTW88=3Dm +CONFIG_RTW88_8822BE=3Dm +CONFIG_RTW88_8822BU=3Dm +CONFIG_RTW88_8822CE=3Dm +CONFIG_RTW88_8822CU=3Dm +CONFIG_RTW88_8723DE=3Dm +CONFIG_RTW88_8723DU=3Dm +CONFIG_RTW88_8821CE=3Dm +CONFIG_RTW88_8821CU=3Dm +CONFIG_RTW88_8821AU=3Dm +CONFIG_RTW88_8812AU=3Dm +CONFIG_RTW88_8814AE=3Dm +CONFIG_RTW88_8814AU=3Dm +CONFIG_RTW89=3Dm +CONFIG_RTW89_8851BE=3Dm +CONFIG_RTW89_8852AE=3Dm +CONFIG_RTW89_8852BE=3Dm +CONFIG_RTW89_8852BTE=3Dm +CONFIG_RTW89_8852CE=3Dm +CONFIG_RTW89_8922AE=3Dm +CONFIG_ZD1211RW=3Dm +CONFIG_USB4_NET=3Dm +CONFIG_INPUT_MOUSEDEV=3Dy +CONFIG_INPUT_MOUSEDEV_PSAUX=3Dy +CONFIG_INPUT_EVDEV=3Dy +CONFIG_KEYBOARD_GPIO=3Dm +CONFIG_KEYBOARD_GPIO_POLLED=3Dm +CONFIG_KEYBOARD_MATRIX=3Dm +CONFIG_KEYBOARD_XTKBD=3Dm +CONFIG_MOUSE_PS2_ELANTECH=3Dy +CONFIG_MOUSE_PS2_SENTELIC=3Dy +CONFIG_MOUSE_SERIAL=3Dm +CONFIG_INPUT_MISC=3Dy +CONFIG_INPUT_UINPUT=3Dm +CONFIG_SERIO_SERPORT=3Dm +CONFIG_SERIO_RAW=3Dm +CONFIG_LEGACY_PTY_COUNT=3D16 +CONFIG_SERIAL_8250=3Dy +CONFIG_SERIAL_8250_CONSOLE=3Dy +CONFIG_SERIAL_8250_NR_UARTS=3D16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3D16 +CONFIG_SERIAL_8250_EXTENDED=3Dy +CONFIG_SERIAL_8250_MANY_PORTS=3Dy +CONFIG_SERIAL_8250_SHARE_IRQ=3Dy +CONFIG_SERIAL_8250_RSA=3Dy +CONFIG_SERIAL_OF_PLATFORM=3Dy +CONFIG_SERIAL_NONSTANDARD=3Dy +CONFIG_PRINTER=3Dm +CONFIG_VIRTIO_CONSOLE=3Dy +CONFIG_HW_RANDOM=3Dy +CONFIG_HW_RANDOM_VIRTIO=3Dm +CONFIG_I2C_CHARDEV=3Dy +CONFIG_I2C_PIIX4=3Dy +CONFIG_I2C_DESIGNWARE_CORE=3Dy +CONFIG_I2C_DESIGNWARE_SLAVE=3Dy +CONFIG_I2C_DESIGNWARE_PCI=3Dy +CONFIG_I2C_GPIO=3Dy +CONFIG_SPI=3Dy +CONFIG_SPI_LOONGSON_PCI=3Dm +CONFIG_SPI_LOONGSON_PLATFORM=3Dm +CONFIG_PINCTRL=3Dy +CONFIG_GPIO_SYSFS=3Dy +CONFIG_GPIO_LOONGSON1=3Dy +CONFIG_GPIO_PCA953X=3Dm +CONFIG_GPIO_PCA953X_IRQ=3Dy +CONFIG_GPIO_PCA9570=3Dm +CONFIG_GPIO_PCF857X=3Dm +CONFIG_POWER_RESET=3Dy +CONFIG_POWER_RESET_RESTART=3Dy +CONFIG_POWER_RESET_SYSCON=3Dy +CONFIG_POWER_RESET_SYSCON_POWEROFF=3Dy +CONFIG_SYSCON_REBOOT_MODE=3Dy +CONFIG_SENSORS_LM75=3Dm +CONFIG_SENSORS_LM93=3Dm +CONFIG_SENSORS_W83795=3Dm +CONFIG_SENSORS_W83627HF=3Dm +CONFIG_WATCHDOG=3Dy +CONFIG_LOONGSON1_WDT=3Dm +CONFIG_RC_CORE=3Dm +CONFIG_LIRC=3Dy +CONFIG_RC_DECODERS=3Dy +CONFIG_IR_IMON_DECODER=3Dm +CONFIG_IR_JVC_DECODER=3Dm +CONFIG_IR_MCE_KBD_DECODER=3Dm +CONFIG_IR_NEC_DECODER=3Dm +CONFIG_IR_RC5_DECODER=3Dm +CONFIG_IR_RC6_DECODER=3Dm +CONFIG_IR_SANYO_DECODER=3Dm +CONFIG_IR_SHARP_DECODER=3Dm +CONFIG_IR_SONY_DECODER=3Dm +CONFIG_IR_XMP_DECODER=3Dm +CONFIG_MEDIA_SUPPORT=3Dm +CONFIG_MEDIA_USB_SUPPORT=3Dy +CONFIG_USB_VIDEO_CLASS=3Dm +CONFIG_MEDIA_PCI_SUPPORT=3Dy +CONFIG_VIDEO_BT848=3Dm +CONFIG_DVB_BT8XX=3Dm +CONFIG_DRM=3Dy +CONFIG_DRM_LOAD_EDID_FIRMWARE=3Dy +CONFIG_DRM_EFIDRM=3Dy +CONFIG_DRM_SIMPLEDRM=3Dy +CONFIG_DRM_RADEON=3Dm +CONFIG_DRM_RADEON_USERPTR=3Dy +CONFIG_DRM_QXL=3Dm +CONFIG_DRM_VIRTIO_GPU=3Dm +CONFIG_DRM_LOONGSON=3Dy +CONFIG_FB=3Dy +CONFIG_FB_RADEON=3Dy +CONFIG_FIRMWARE_EDID=3Dy +CONFIG_LCD_CLASS_DEVICE=3Dy +CONFIG_LCD_PLATFORM=3Dm +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=3Dy +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=3Dy +CONFIG_LOGO=3Dy +CONFIG_SOUND=3Dy +CONFIG_SND=3Dy +CONFIG_SND_SEQUENCER=3Dm +CONFIG_SND_SEQ_DUMMY=3Dm +CONFIG_SND_BT87X=3Dm +CONFIG_SND_BT87X_OVERCLOCK=3Dy +CONFIG_SND_HDA_INTEL=3Dy +CONFIG_SND_HDA_HWDEP=3Dy +CONFIG_SND_HDA_INPUT_BEEP=3Dy +CONFIG_SND_HDA_PATCH_LOADER=3Dy +CONFIG_SND_HDA_CODEC_REALTEK=3Dy +CONFIG_SND_HDA_CODEC_REALTEK_LIB=3Dy +CONFIG_SND_HDA_CODEC_ALC260=3Dy +CONFIG_SND_HDA_CODEC_ALC262=3Dy +CONFIG_SND_HDA_CODEC_ALC268=3Dy +CONFIG_SND_HDA_CODEC_ALC269=3Dy +CONFIG_SND_HDA_CODEC_ALC662=3Dy +CONFIG_SND_HDA_CODEC_ALC680=3Dy +CONFIG_SND_HDA_CODEC_ALC861=3Dy +CONFIG_SND_HDA_CODEC_ALC861VD=3Dy +CONFIG_SND_HDA_CODEC_ALC880=3Dy +CONFIG_SND_HDA_CODEC_ALC882=3Dy +CONFIG_SND_HDA_CODEC_SIGMATEL=3Dy +CONFIG_SND_HDA_CODEC_HDMI=3Dy +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=3Dy +CONFIG_SND_HDA_CODEC_HDMI_INTEL=3Dy +CONFIG_SND_HDA_CODEC_HDMI_ATI=3Dy +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=3Dy +CONFIG_SND_HDA_CODEC_CONEXANT=3Dy +CONFIG_SND_USB_AUDIO=3Dm +CONFIG_SND_USB_AUDIO_MIDI_V2=3Dy +CONFIG_SND_SOC=3Dm +CONFIG_SND_SOC_LOONGSON_CARD=3Dm +CONFIG_SND_LOONGSON1_AC97=3Dm +CONFIG_SND_SOC_ES7134=3Dm +CONFIG_SND_SOC_ES7241=3Dm +CONFIG_SND_SOC_ES8311=3Dm +CONFIG_SND_SOC_ES8316=3Dm +CONFIG_SND_SOC_ES8323=3Dm +CONFIG_SND_SOC_ES8326=3Dm +CONFIG_SND_SOC_ES8328_I2C=3Dm +CONFIG_SND_SOC_ES8328_SPI=3Dm +CONFIG_SND_SOC_UDA1334=3Dm +CONFIG_SND_SOC_UDA1342=3Dm +CONFIG_SND_VIRTIO=3Dm +CONFIG_HIDRAW=3Dy +CONFIG_UHID=3Dm +CONFIG_HID_A4TECH=3Dm +CONFIG_HID_CHERRY=3Dm +CONFIG_HID_ELAN=3Dm +CONFIG_HID_LOGITECH=3Dm +CONFIG_HID_LOGITECH_DJ=3Dm +CONFIG_LOGITECH_FF=3Dy +CONFIG_LOGIRUMBLEPAD2_FF=3Dy +CONFIG_LOGIG940_FF=3Dy +CONFIG_HID_MICROSOFT=3Dm +CONFIG_HID_MULTITOUCH=3Dm +CONFIG_HID_SUNPLUS=3Dm +CONFIG_HID_WACOM=3Dm +CONFIG_USB_HIDDEV=3Dy +CONFIG_I2C_HID_ACPI=3Dm +CONFIG_I2C_HID_OF=3Dm +CONFIG_I2C_HID_OF_ELAN=3Dm +CONFIG_USB=3Dy +CONFIG_USB_OTG=3Dy +CONFIG_USB_MON=3Dy +CONFIG_USB_XHCI_HCD=3Dy +CONFIG_USB_EHCI_HCD=3Dy +CONFIG_USB_EHCI_ROOT_HUB_TT=3Dy +CONFIG_USB_EHCI_HCD_PLATFORM=3Dy +CONFIG_USB_OHCI_HCD=3Dy +CONFIG_USB_OHCI_HCD_PLATFORM=3Dy +CONFIG_USB_UHCI_HCD=3Dm +CONFIG_USB_ACM=3Dm +CONFIG_USB_PRINTER=3Dm +CONFIG_USB_STORAGE=3Dm +CONFIG_USB_STORAGE_REALTEK=3Dm +CONFIG_USB_UAS=3Dm +CONFIG_USB_DWC2=3Dy +CONFIG_USB_DWC2_HOST=3Dy +CONFIG_USB_SERIAL=3Dm +CONFIG_USB_SERIAL_CH341=3Dm +CONFIG_USB_SERIAL_CP210X=3Dm +CONFIG_USB_SERIAL_FTDI_SIO=3Dm +CONFIG_USB_SERIAL_PL2303=3Dm +CONFIG_USB_SERIAL_OPTION=3Dm +CONFIG_USB_GADGET=3Dy +CONFIG_TYPEC=3Dm +CONFIG_TYPEC_TCPM=3Dm +CONFIG_TYPEC_TCPCI=3Dm +CONFIG_TYPEC_UCSI=3Dm +CONFIG_UCSI_ACPI=3Dm +CONFIG_MMC=3Dy +CONFIG_INFINIBAND=3Dm +CONFIG_EDAC=3Dy +# CONFIG_EDAC_LEGACY_SYSFS is not set +CONFIG_EDAC_LOONGSON=3Dy +CONFIG_RTC_CLASS=3Dy +CONFIG_RTC_DRV_EFI=3Dy +CONFIG_RTC_DRV_LOONGSON=3Dy +CONFIG_DMADEVICES=3Dy +CONFIG_LOONGSON1_APB_DMA=3Dy +CONFIG_UDMABUF=3Dy +CONFIG_DMABUF_HEAPS=3Dy +CONFIG_DMABUF_HEAPS_SYSTEM=3Dy +CONFIG_DMABUF_HEAPS_CMA=3Dy +CONFIG_UIO=3Dm +CONFIG_UIO_PDRV_GENIRQ=3Dm +CONFIG_UIO_DMEM_GENIRQ=3Dm +CONFIG_UIO_PCI_GENERIC=3Dm +CONFIG_VFIO=3Dm +CONFIG_VFIO_PCI=3Dm +CONFIG_VIRTIO_PCI=3Dy +CONFIG_VIRTIO_BALLOON=3Dm +CONFIG_VIRTIO_INPUT=3Dm +CONFIG_VIRTIO_MMIO=3Dm +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=3Dy +CONFIG_VHOST_NET=3Dm +CONFIG_VHOST_SCSI=3Dm +CONFIG_VHOST_VSOCK=3Dm +CONFIG_COMEDI=3Dm +CONFIG_COMEDI_PCI_DRIVERS=3Dm +CONFIG_COMEDI_8255_PCI=3Dm +CONFIG_COMEDI_ADL_PCI6208=3Dm +CONFIG_COMEDI_ADL_PCI7X3X=3Dm +CONFIG_COMEDI_ADL_PCI8164=3Dm +CONFIG_COMEDI_ADL_PCI9111=3Dm +CONFIG_COMEDI_ADL_PCI9118=3Dm +CONFIG_COMEDI_ADV_PCI1710=3Dm +CONFIG_COMEDI_ADV_PCI1720=3Dm +CONFIG_COMEDI_ADV_PCI1723=3Dm +CONFIG_COMEDI_ADV_PCI1724=3Dm +CONFIG_COMEDI_ADV_PCI1760=3Dm +CONFIG_COMEDI_ADV_PCI_DIO=3Dm +CONFIG_COMEDI_NI_LABPC_PCI=3Dm +CONFIG_COMEDI_NI_PCIDIO=3Dm +CONFIG_COMEDI_NI_PCIMIO=3Dm +CONFIG_STAGING=3Dy +CONFIG_CLKSRC_LOONGSON1_PWM=3Dy +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_PM_DEVFREQ=3Dy +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=3Dy +CONFIG_DEVFREQ_GOV_PERFORMANCE=3Dy +CONFIG_DEVFREQ_GOV_POWERSAVE=3Dy +CONFIG_DEVFREQ_GOV_USERSPACE=3Dy +CONFIG_NTB=3Dm +CONFIG_NTB_MSI=3Dy +CONFIG_NTB_IDT=3Dm +CONFIG_NTB_EPF=3Dm +CONFIG_NTB_SWITCHTEC=3Dm +CONFIG_NTB_PERF=3Dm +CONFIG_NTB_TRANSPORT=3Dm +CONFIG_PWM=3Dy +CONFIG_GENERIC_PHY=3Dy +CONFIG_USB4=3Dy +CONFIG_EXT2_FS=3Dy +CONFIG_EXT2_FS_XATTR=3Dy +CONFIG_EXT2_FS_POSIX_ACL=3Dy +CONFIG_EXT2_FS_SECURITY=3Dy +CONFIG_EXT4_FS=3Dy +CONFIG_EXT4_FS_POSIX_ACL=3Dy +CONFIG_EXT4_FS_SECURITY=3Dy +CONFIG_JFS_FS=3Dm +CONFIG_JFS_POSIX_ACL=3Dy +CONFIG_JFS_SECURITY=3Dy +CONFIG_XFS_FS=3Dy +CONFIG_XFS_SUPPORT_V4=3Dy +CONFIG_XFS_SUPPORT_ASCII_CI=3Dy +CONFIG_XFS_QUOTA=3Dy +CONFIG_XFS_POSIX_ACL=3Dy +CONFIG_GFS2_FS=3Dm +CONFIG_GFS2_FS_LOCKING_DLM=3Dy +CONFIG_OCFS2_FS=3Dm +CONFIG_BTRFS_FS=3Dy +CONFIG_BTRFS_FS_POSIX_ACL=3Dy +CONFIG_F2FS_FS=3Dm +CONFIG_F2FS_FS_SECURITY=3Dy +CONFIG_F2FS_CHECK_FS=3Dy +CONFIG_F2FS_FS_COMPRESSION=3Dy +CONFIG_FS_ENCRYPTION=3Dy +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=3Dy +CONFIG_FS_VERITY=3Dy +CONFIG_FANOTIFY=3Dy +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=3Dy +CONFIG_QUOTA=3Dy +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V1=3Dm +CONFIG_QFMT_V2=3Dm +CONFIG_AUTOFS_FS=3Dy +CONFIG_FUSE_FS=3Dm +CONFIG_CUSE=3Dm +CONFIG_VIRTIO_FS=3Dm +CONFIG_OVERLAY_FS=3Dy +CONFIG_OVERLAY_FS_INDEX=3Dy +CONFIG_OVERLAY_FS_XINO_AUTO=3Dy +CONFIG_OVERLAY_FS_METACOPY=3Dy +CONFIG_FSCACHE=3Dy +CONFIG_CACHEFILES=3Dm +CONFIG_ISO9660_FS=3Dy +CONFIG_JOLIET=3Dy +CONFIG_ZISOFS=3Dy +CONFIG_UDF_FS=3Dy +CONFIG_MSDOS_FS=3Dm +CONFIG_VFAT_FS=3Dm +CONFIG_FAT_DEFAULT_CODEPAGE=3D936 +CONFIG_FAT_DEFAULT_IOCHARSET=3D"gb2312" +CONFIG_EXFAT_FS=3Dm +CONFIG_NTFS3_FS=3Dm +CONFIG_NTFS3_LZX_XPRESS=3Dy +CONFIG_PROC_KCORE=3Dy +CONFIG_TMPFS=3Dy +CONFIG_TMPFS_POSIX_ACL=3Dy +CONFIG_CONFIGFS_FS=3Dy +CONFIG_ORANGEFS_FS=3Dm +CONFIG_ECRYPT_FS=3Dm +CONFIG_ECRYPT_FS_MESSAGING=3Dy +CONFIG_HFS_FS=3Dm +CONFIG_HFSPLUS_FS=3Dm +CONFIG_UBIFS_FS=3Dm +CONFIG_UBIFS_FS_ADVANCED_COMPR=3Dy +CONFIG_CRAMFS=3Dm +CONFIG_SQUASHFS=3Dy +CONFIG_SQUASHFS_FILE_DIRECT=3Dy +CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=3Dy +CONFIG_SQUASHFS_XATTR=3Dy +CONFIG_SQUASHFS_LZ4=3Dy +CONFIG_SQUASHFS_LZO=3Dy +CONFIG_SQUASHFS_XZ=3Dy +CONFIG_SQUASHFS_ZSTD=3Dy +CONFIG_MINIX_FS=3Dm +CONFIG_ROMFS_FS=3Dm +CONFIG_PSTORE=3Dm +CONFIG_PSTORE_COMPRESS=3Dy +CONFIG_UFS_FS=3Dm +CONFIG_EROFS_FS=3Dm +CONFIG_EROFS_FS_ZIP_LZMA=3Dy +CONFIG_EROFS_FS_ZIP_DEFLATE=3Dy +CONFIG_EROFS_FS_ZIP_ZSTD=3Dy +CONFIG_EROFS_FS_ONDEMAND=3Dy +CONFIG_EROFS_FS_PCPU_KTHREAD=3Dy +CONFIG_NFS_FS=3Dy +CONFIG_NFS_V3_ACL=3Dy +CONFIG_NFS_V4=3Dy +CONFIG_NFS_V4_1=3Dy +CONFIG_NFS_V4_2=3Dy +CONFIG_ROOT_NFS=3Dy +CONFIG_NFSD=3Dy +CONFIG_NFSD_V3_ACL=3Dy +CONFIG_NFSD_V4=3Dy +CONFIG_NFSD_BLOCKLAYOUT=3Dy +CONFIG_CEPH_FS=3Dm +CONFIG_CEPH_FSCACHE=3Dy +CONFIG_CEPH_FS_POSIX_ACL=3Dy +CONFIG_CEPH_FS_SECURITY_LABEL=3Dy +CONFIG_CIFS=3Dm +# CONFIG_CIFS_DEBUG is not set +CONFIG_9P_FS=3Dy +CONFIG_NLS_DEFAULT=3D"utf8" +CONFIG_NLS_CODEPAGE_437=3Dy +CONFIG_NLS_CODEPAGE_936=3Dy +CONFIG_NLS_CODEPAGE_950=3Dy +CONFIG_NLS_ASCII=3Dy +CONFIG_NLS_ISO8859_1=3Dy +CONFIG_NLS_UTF8=3Dy +CONFIG_DLM=3Dm +CONFIG_KEY_DH_OPERATIONS=3Dy +CONFIG_SECURITY=3Dy +CONFIG_SECURITY_SELINUX=3Dy +CONFIG_SECURITY_SELINUX_BOOTPARAM=3Dy +CONFIG_SECURITY_APPARMOR=3Dy +CONFIG_SECURITY_YAMA=3Dy +CONFIG_DEFAULT_SECURITY_DAC=3Dy +CONFIG_CRYPTO_USER=3Dm +CONFIG_CRYPTO_SELFTESTS=3Dy +CONFIG_CRYPTO_PCRYPT=3Dm +CONFIG_CRYPTO_CRYPTD=3Dm +CONFIG_CRYPTO_ANUBIS=3Dm +CONFIG_CRYPTO_BLOWFISH=3Dm +CONFIG_CRYPTO_CAST5=3Dm +CONFIG_CRYPTO_CAST6=3Dm +CONFIG_CRYPTO_KHAZAD=3Dm +CONFIG_CRYPTO_SEED=3Dm +CONFIG_CRYPTO_SERPENT=3Dm +CONFIG_CRYPTO_SM4_GENERIC=3Dm +CONFIG_CRYPTO_TEA=3Dm +CONFIG_CRYPTO_TWOFISH=3Dm +CONFIG_CRYPTO_CHACHA20POLY1305=3Dm +CONFIG_CRYPTO_SM3_GENERIC=3Dm +CONFIG_CRYPTO_WP512=3Dm +CONFIG_CRYPTO_DEFLATE=3Dm +CONFIG_CRYPTO_LZO=3Dm +CONFIG_CRYPTO_842=3Dm +CONFIG_CRYPTO_LZ4=3Dm +CONFIG_CRYPTO_LZ4HC=3Dm +CONFIG_CRYPTO_USER_API_HASH=3Dm +CONFIG_CRYPTO_USER_API_SKCIPHER=3Dm +CONFIG_CRYPTO_USER_API_RNG=3Dm +CONFIG_CRYPTO_USER_API_AEAD=3Dm +CONFIG_CRYPTO_DEV_VIRTIO=3Dm +CONFIG_CRYPTO_DEV_LOONGSON_RNG=3Dm +CONFIG_DMA_CMA=3Dy +CONFIG_CMA_SIZE_MBYTES=3D0 +CONFIG_PRINTK_TIME=3Dy +CONFIG_STRIP_ASM_SYMS=3Dy +CONFIG_MAGIC_SYSRQ=3Dy +CONFIG_DEBUG_FS=3Dy +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=3Dy +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/co= nfigs/loongson64_defconfig similarity index 99% rename from arch/loongarch/configs/loongson3_defconfig rename to arch/loongarch/configs/loongson64_defconfig index 3e838c229cd5..e9e14a744434 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson64_defconfig @@ -435,7 +435,6 @@ CONFIG_DEVTMPFS=3Dy CONFIG_DEVTMPFS_MOUNT=3Dy CONFIG_FW_LOADER_COMPRESS=3Dy CONFIG_FW_LOADER_COMPRESS_ZSTD=3Dy -CONFIG_SYSFB_SIMPLEFB=3Dy CONFIG_EFI_ZBOOT=3Dy CONFIG_EFI_BOOTLOADER_CONTROL=3Dm CONFIG_EFI_CAPSULE_LOADER=3Dm @@ -530,6 +529,7 @@ CONFIG_PATA_ATIIXP=3Dy CONFIG_PATA_PCMCIA=3Dm CONFIG_MD=3Dy CONFIG_BLK_DEV_MD=3Dm +CONFIG_MD_LLBITMAP=3Dy CONFIG_MD_RAID0=3Dm CONFIG_MD_RAID1=3Dm CONFIG_MD_RAID10=3Dm @@ -801,6 +801,8 @@ CONFIG_VIDEO_BT848=3Dm CONFIG_DVB_BT8XX=3Dm CONFIG_DRM=3Dy CONFIG_DRM_LOAD_EDID_FIRMWARE=3Dy +CONFIG_DRM_EFIDRM=3Dy +CONFIG_DRM_SIMPLEDRM=3Dy CONFIG_DRM_RADEON=3Dm CONFIG_DRM_RADEON_USERPTR=3Dy CONFIG_DRM_AMDGPU=3Dm @@ -811,9 +813,7 @@ CONFIG_DRM_AST=3Dy CONFIG_DRM_QXL=3Dm CONFIG_DRM_VIRTIO_GPU=3Dm CONFIG_DRM_LOONGSON=3Dy -CONFIG_DRM_SIMPLEDRM=3Dy CONFIG_FB=3Dy -CONFIG_FB_EFI=3Dy CONFIG_FB_RADEON=3Dy CONFIG_FIRMWARE_EDID=3Dy CONFIG_LCD_CLASS_DEVICE=3Dy --=20 2.47.3 From nobody Mon Dec 1 21:31:56 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A995A3376A9; Thu, 27 Nov 2025 15:58:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259093; cv=none; b=uCO8jdeV+tggfaiYdF+B4KUpCMghJnOOiX+WpQ7o0hPJ00lctOIB4xFMnqs2Ivlj/NrbgH4Wc4jlrP/OuLf2PmjxSYmUUqWX52G56iMo+EIzaE4M5nQCNadAWHVBo9Jef/Jr9JtfLge6uBayCW+FwsypX7Eu3D1c/+3rzujkcOU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1764259093; c=relaxed/simple; bh=qOARUrU9FZKnzCbK+38fhLHk3A5LGQrx2IMAQbNmiKA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Uvpsj0zGb8OBiG1lJmlLoSZdc9Q3+1CO1y0cNvkP3d18jWYoNSEmiT2anFtpNQflKD69ye4L5ubqOjkdFm+nBGnQ1ihMOmaCRh43dQlijXLT1nzpyfAudpfIEYlhl682IUZeovFrZkPcXa9yA3GXFQWTgs1A+50IzB8ug9RxqKU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 Received: by smtp.kernel.org (Postfix) with ESMTPSA id D6CA2C4CEF8; Thu, 27 Nov 2025 15:58:09 +0000 (UTC) From: Huacai Chen To: Huacai Chen Cc: loongarch@lists.linux.dev, Xuefeng Li , Guo Ren , Xuerui Wang , Jiaxun Yang , linux-kernel@vger.kernel.org, Huacai Chen , Arnd Bergmann Subject: [PATCH V4 14/14] LoongArch: Adjust build infrastructure for 32BIT/64BIT Date: Thu, 27 Nov 2025 23:48:32 +0800 Message-ID: <20251127154832.137925-15-chenhuacai@loongson.cn> X-Mailer: git-send-email 2.47.3 In-Reply-To: <20251127154832.137925-1-chenhuacai@loongson.cn> References: <20251127154832.137925-1-chenhuacai@loongson.cn> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Adjust build infrastructure (Kconfig, Makefile and ld scripts) to let us enable both 32BIT/64BIT kernel build. Reviewed-by: Arnd Bergmann Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 113 ++++++++++++++++++-------- arch/loongarch/Makefile | 23 +++++- arch/loongarch/boot/Makefile | 6 ++ arch/loongarch/kernel/vmlinux.lds.S | 7 +- arch/loongarch/kvm/Kconfig | 2 +- arch/loongarch/lib/Makefile | 5 +- drivers/firmware/efi/libstub/Makefile | 1 + drivers/pci/controller/Kconfig | 2 +- lib/crc/Kconfig | 2 +- 9 files changed, 120 insertions(+), 41 deletions(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 730f34214519..4bacde9f46d1 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -21,11 +21,11 @@ config LOONGARCH select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV - select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU + select ARCH_HAS_KERNEL_FPU_SUPPORT if 64BIT && CPU_HAS_FPU select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PREEMPT_LAZY - select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_PTE_SPECIAL if 64BIT select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST @@ -60,16 +60,15 @@ config LOONGARCH select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO - select ARCH_SPARSEMEM_ENABLE select ARCH_STACKWALK select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW - select ARCH_SUPPORTS_HUGETLBFS + select ARCH_SUPPORTS_HUGETLBFS if 64BIT select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS - select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_NUMA_BALANCING if NUMA select ARCH_SUPPORTS_PER_VMA_LOCK select ARCH_SUPPORTS_RT select ARCH_SUPPORTS_SCHED_SMT if SMP @@ -79,10 +78,10 @@ config LOONGARCH select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS - select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_DEFAULT_BPF_JIT if HAVE_EBPF_JIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_LD_ORPHAN_WARN - select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if 64BIT select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE select BUILDTIME_TABLE_SORT @@ -90,13 +89,14 @@ config LOONGARCH select CPU_PM select EDAC_SUPPORT select EFI + select GENERIC_ATOMIC64 if 32BIT select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_DEVICES select GENERIC_CPU_VULNERABILITIES select GENERIC_ENTRY - select GENERIC_GETTIMEOFDAY + select GENERIC_GETTIMEOFDAY if 64BIT select GENERIC_IOREMAP if !ARCH_IOREMAP select GENERIC_IRQ_MATRIX_ALLOCATOR select GENERIC_IRQ_MULTI_HANDLER @@ -111,15 +111,15 @@ config LOONGARCH select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD - select GENERIC_TIME_VSYSCALL + select GENERIC_TIME_VSYSCALL if GENERIC_GETTIMEOFDAY select GPIOLIB select HAS_IOPORT select HAVE_ARCH_AUDITSYSCALL - select HAVE_ARCH_BITREVERSE + select HAVE_ARCH_BITREVERSE if 64BIT select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE - select HAVE_ARCH_KASAN - select HAVE_ARCH_KFENCE + select HAVE_ARCH_KASAN if 64BIT + select HAVE_ARCH_KFENCE if 64BIT select HAVE_ARCH_KGDB if PERF_EVENTS select HAVE_ARCH_KSTACK_ERASE select HAVE_ARCH_MMAP_RND_BITS if MMU @@ -127,8 +127,8 @@ config LOONGARCH select HAVE_ARCH_SECCOMP select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select HAVE_ARCH_USERFAULTFD_MINOR if 64BIT && USERFAULTFD select HAVE_ASM_MODVERSIONS select HAVE_CONTEXT_TRACKING_USER select HAVE_C_RECORDMCOUNT @@ -140,7 +140,7 @@ config LOONGARCH select HAVE_FTRACE_REGS_HAVING_PT_REGS select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS select HAVE_DYNAMIC_FTRACE_WITH_REGS - select HAVE_EBPF_JIT + select HAVE_EBPF_JIT if 64BIT select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN select HAVE_EXIT_THREAD select HAVE_GENERIC_TIF_BITS @@ -163,9 +163,9 @@ config LOONGARCH select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && 6= 4BIT select HAVE_PCI - select HAVE_PERF_EVENTS + select HAVE_PERF_EVENTS if 64BIT select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_POSIX_CPU_TIMERS_TASK_WORK @@ -205,18 +205,50 @@ config LOONGARCH select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_EXCEPTION_TRACE - select SWIOTLB + select SWIOTLB if 64BIT select TRACE_IRQFLAGS_SUPPORT select USE_PERCPU_NUMA_NODE_ID select USER_STACKTRACE_SUPPORT select VDSO_GETRANDOM - select ZONE_DMA32 + select ZONE_DMA32 if 64BIT + +menu "Kernel type and options" + +choice + prompt "Kernel type" =20 config 32BIT - bool + bool "32-bit kernel" + help + Select this option if you want to build a 32-bit kernel. =20 config 64BIT - def_bool y + bool "64-bit kernel" + help + Select this option if you want to build a 64-bit kernel. + +endchoice + +if 32BIT + +choice + prompt "32-bit kernel sub-type" + +config 32BIT_REDUCED + bool "32-bit kernel for LA32R" + help + Select this option if you want to build a 32-bit kernel for + LoongArch32 Reduced (LA32R). + +config 32BIT_STANDARD + bool "32-bit kernel for LA32S" + help + Select this option if you want to build a 32-bit kernel for + LoongArch32 Standard (LA32S). + +endchoice + +endif =20 config GENERIC_BUG def_bool y @@ -306,8 +338,6 @@ config RUSTC_HAS_ANNOTATE_TABLEJUMP depends on RUST def_bool $(rustc-option,-Cllvm-args=3D--loongarch-annotate-tablejump) =20 -menu "Kernel type and options" - source "kernel/Kconfig.hz" =20 choice @@ -319,8 +349,17 @@ choice of page size and page table levels. The size of virtual memory address space are determined by the page table layout. =20 +config 4KB_2LEVEL + bool "4KB with 2 levels" + select HAVE_PAGE_SIZE_4KB + select PGTABLE_2LEVEL + help + This option selects 16KB page size with 2 level page tables, which + support a maximum of 32 bits of application virtual memory. + config 4KB_3LEVEL bool "4KB with 3 levels" + depends on 64BIT select HAVE_PAGE_SIZE_4KB select PGTABLE_3LEVEL help @@ -329,6 +368,7 @@ config 4KB_3LEVEL =20 config 4KB_4LEVEL bool "4KB with 4 levels" + depends on 64BIT select HAVE_PAGE_SIZE_4KB select PGTABLE_4LEVEL help @@ -345,6 +385,7 @@ config 16KB_2LEVEL =20 config 16KB_3LEVEL bool "16KB with 3 levels" + depends on 64BIT select HAVE_PAGE_SIZE_16KB select PGTABLE_3LEVEL help @@ -361,6 +402,7 @@ config 64KB_2LEVEL =20 config 64KB_3LEVEL bool "64KB with 3 levels" + depends on 64BIT select HAVE_PAGE_SIZE_64KB select PGTABLE_3LEVEL help @@ -458,6 +500,7 @@ config EFI_STUB =20 config SMP bool "Multi-Processing support" + depends on 64BIT help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more @@ -496,6 +539,7 @@ config NR_CPUS config NUMA bool "NUMA Support" select SMP + depends on 64BIT help Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) support. This option improves performance on systems with more @@ -578,7 +622,7 @@ config CPU_HAS_FPU =20 config CPU_HAS_LSX bool "Support for the Loongson SIMD Extension" - depends on AS_HAS_LSX_EXTENSION + depends on AS_HAS_LSX_EXTENSION && 64BIT help Loongson SIMD Extension (LSX) introduces 128 bit wide vector registers and a set of SIMD instructions to operate on them. When this option @@ -593,7 +637,7 @@ config CPU_HAS_LSX config CPU_HAS_LASX bool "Support for the Loongson Advanced SIMD Extension" depends on CPU_HAS_LSX - depends on AS_HAS_LASX_EXTENSION + depends on AS_HAS_LASX_EXTENSION && 64BIT help Loongson Advanced SIMD Extension (LASX) introduces 256 bit wide vector registers and a set of SIMD instructions to operate on them. When this @@ -607,7 +651,7 @@ config CPU_HAS_LASX =20 config CPU_HAS_LBT bool "Support for the Loongson Binary Translation Extension" - depends on AS_HAS_LBT_EXTENSION + depends on AS_HAS_LBT_EXTENSION && 64BIT help Loongson Binary Translation (LBT) introduces 4 scratch registers (SCR0 to SCR3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop). @@ -635,13 +679,13 @@ config ARCH_SELECTS_KEXEC_FILE select HAVE_IMA_KEXEC if IMA =20 config ARCH_SUPPORTS_CRASH_DUMP - def_bool y + def_bool 64BIT =20 config ARCH_DEFAULT_CRASH_DUMP - def_bool y + def_bool 64BIT =20 config ARCH_SELECTS_CRASH_DUMP - def_bool y + def_bool 64BIT depends on CRASH_DUMP select RELOCATABLE =20 @@ -650,6 +694,7 @@ config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION =20 config RELOCATABLE bool "Relocatable kernel" + depends on 64BIT select ARCH_HAS_RELR help This builds the kernel as a Position Independent Executable (PIE), @@ -686,7 +731,7 @@ source "kernel/livepatch/Kconfig" =20 config PARAVIRT bool "Enable paravirtualization code" - depends on AS_HAS_LVZ_EXTENSION + depends on AS_HAS_LVZ_EXTENSION && 64BIT help This changes the kernel so it can modify itself when it is run under a hypervisor, potentially improving performance significantly @@ -714,7 +759,7 @@ config ARCH_FLATMEM_ENABLE depends on !NUMA =20 config ARCH_SPARSEMEM_ENABLE - def_bool y + def_bool 64BIT select SPARSEMEM_VMEMMAP_ENABLE help Say Y to support efficient handling of sparse physical memory, @@ -731,10 +776,12 @@ config MMU default y =20 config ARCH_MMAP_RND_BITS_MIN - default 12 + default 10 if 32BIT + default 12 if 64BIT =20 config ARCH_MMAP_RND_BITS_MAX - default 18 + default 15 if 32BIT + default 18 if 64BIT =20 config ARCH_SUPPORTS_UPROBES def_bool y diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 8d45b860fe56..47516aeea9d2 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -25,6 +25,7 @@ endif # # Select the object file format to substitute into the linker script. # +32bit-tool-archpref =3D loongarch32 64bit-tool-archpref =3D loongarch64 32bit-bfd =3D elf32-loongarch 64bit-bfd =3D elf64-loongarch @@ -51,7 +52,10 @@ KBUILD_CPPFLAGS +=3D -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE :=3D -fpatchable-function-entry=3D2 endif =20 -ifdef CONFIG_64BIT +ifdef CONFIG_32BIT +tool-archpref =3D $(32bit-tool-archpref) +UTS_MACHINE :=3D loongarch32 +else tool-archpref =3D $(64bit-tool-archpref) UTS_MACHINE :=3D loongarch64 endif @@ -62,9 +66,19 @@ ifneq ($(SUBARCH),$(ARCH)) endif endif =20 +ifdef CONFIG_32BIT +ifdef CONFIG_32BIT_STANDARD +ld-emul =3D $(32bit-emul) +cflags-y +=3D -march=3Dla32v1.0 -mabi=3Dilp32s -mcmodel=3Dnormal +else # CONFIG_32BIT_REDUCED +ld-emul =3D $(32bit-emul) +cflags-y +=3D -march=3Dla32rv1.0 -mabi=3Dilp32s -mcmodel=3Dnormal +endif +endif + ifdef CONFIG_64BIT ld-emul =3D $(64bit-emul) -cflags-y +=3D -mabi=3Dlp64s -mcmodel=3Dnormal +cflags-y +=3D -march=3Dloongarch64 -mabi=3Dlp64s -mcmodel=3Dnormal endif =20 cflags-y +=3D -pipe $(CC_FLAGS_NO_FPU) @@ -140,7 +154,12 @@ ifndef CONFIG_KASAN cflags-y +=3D -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset endif =20 +ifdef CONFIG_32BIT +load-y =3D 0xa0200000 +else load-y =3D 0x9000000000200000 +endif + bootvars-y =3D VMLINUX_LOAD_ADDRESS=3D$(load-y) =20 drivers-$(CONFIG_PCI) +=3D arch/loongarch/pci/ diff --git a/arch/loongarch/boot/Makefile b/arch/loongarch/boot/Makefile index 4e1c374c5782..8b6d9b42b5f0 100644 --- a/arch/loongarch/boot/Makefile +++ b/arch/loongarch/boot/Makefile @@ -20,7 +20,13 @@ $(obj)/vmlinux.efi: vmlinux FORCE $(call if_changed,objcopy) =20 EFI_ZBOOT_PAYLOAD :=3D vmlinux.efi + +ifdef CONFIG_32BIT +EFI_ZBOOT_BFD_TARGET :=3D elf32-loongarch +EFI_ZBOOT_MACH_TYPE :=3D LOONGARCH32 +else EFI_ZBOOT_BFD_TARGET :=3D elf64-loongarch EFI_ZBOOT_MACH_TYPE :=3D LOONGARCH64 +endif =20 include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vm= linux.lds.S index 08ea921cdec1..b95c0acdab90 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -6,7 +6,12 @@ =20 #define PAGE_SIZE _PAGE_SIZE #define RO_EXCEPTION_TABLE_ALIGN 4 -#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */ + +#ifdef CONFIG_32BIT +#define PHYSADDR_MASK 0x1fffffff /* 29-bit */ +#else +#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */ +#endif =20 /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index ae64bbdf83a7..3dc5ebc546d5 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -19,7 +19,7 @@ if VIRTUALIZATION =20 config KVM tristate "Kernel-based Virtual Machine (KVM) support" - depends on AS_HAS_LVZ_EXTENSION + depends on AS_HAS_LVZ_EXTENSION && 64BIT select HAVE_KVM_DIRTY_RING_ACQ_REL select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_IRQCHIP diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile index ccea3bbd4353..d8f1e8559487 100644 --- a/arch/loongarch/lib/Makefile +++ b/arch/loongarch/lib/Makefile @@ -3,8 +3,9 @@ # Makefile for LoongArch-specific library files. # =20 -lib-y +=3D delay.o memset.o memcpy.o memmove.o \ - clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o +lib-y +=3D delay.o clear_user.o copy_user.o dump_tlb.o unaligned.o + +lib-$(CONFIG_64BIT) +=3D memset.o memcpy.o memmove.o csum.o =20 obj-$(CONFIG_ARCH_SUPPORTS_INT128) +=3D tishift.o =20 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/l= ibstub/Makefile index 94b05e4451dd..2ba0f7c400a7 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -97,6 +97,7 @@ zboot-obj-$(CONFIG_KERNEL_ZSTD) :=3D zboot-decompress-zst= d.o lib-xxhash.o CFLAGS_zboot-decompress-zstd.o +=3D -I$(srctree)/lib/zstd =20 zboot-obj-$(CONFIG_RISCV) +=3D lib-clz_ctz.o lib-ashldi3.o +zboot-obj-$(CONFIG_LOONGARCH) +=3D lib-clz_ctz.o lib-ashldi3.o lib-$(CONFIG_EFI_ZBOOT) +=3D zboot.o $(zboot-obj-y) =20 lib-$(CONFIG_UNACCEPTED_MEMORY) +=3D unaccepted_memory.o bitmap.o find.o diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 41748d083b93..35ef71b0695d 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -171,7 +171,7 @@ config VMD =20 config PCI_LOONGSON bool "LOONGSON PCIe controller" - depends on MACH_LOONGSON64 || COMPILE_TEST + depends on MACH_LOONGSON32 || MACH_LOONGSON64 || COMPILE_TEST depends on OF || ACPI depends on PCI_QUIRKS default MACH_LOONGSON64 diff --git a/lib/crc/Kconfig b/lib/crc/Kconfig index 70e7a6016de3..5bf613405fdd 100644 --- a/lib/crc/Kconfig +++ b/lib/crc/Kconfig @@ -65,7 +65,7 @@ config CRC32_ARCH depends on CRC32 && CRC_OPTIMIZATIONS default y if ARM && KERNEL_MODE_NEON default y if ARM64 - default y if LOONGARCH + default y if LOONGARCH && 64BIT default y if MIPS && CPU_MIPSR6 default y if PPC64 && ALTIVEC default y if RISCV && RISCV_ISA_ZBC --=20 2.47.3