arch/arm/include/asm/uaccess.h | 26 ++- arch/arm64/include/asm/uaccess.h | 4 +- arch/powerpc/include/asm/uaccess.h | 8 +- arch/riscv/include/asm/uaccess.h | 8 +- arch/s390/include/asm/uaccess.h | 4 +- arch/x86/include/asm/futex.h | 75 ++++----- arch/x86/include/asm/uaccess.h | 12 +- fs/select.c | 12 +- include/linux/uaccess.h | 314 ++++++++++++++++++++++++++++++++++++- kernel/futex/core.c | 4 +- kernel/futex/futex.h | 58 +------ lib/iov_iter.c | 22 ++- lib/strncpy_from_user.c | 2 +- lib/strnlen_user.c | 2 +- net/core/scm.c | 20 +-- 15 files changed, 421 insertions(+), 150 deletions(-)
Linus,
please pull the latest core/uaccess branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core-uaccess-2025-11-30
up to: 4322c8f81c58: lib/strn*,uaccess: Use masked_user_{read/write}_access_begin when required
Scoped user mode access and related changes:
- Implement the missing u64 user access function on ARM when
CONFIG_CPU_SPECTRE=n. This makes it possible to access a 64bit value in
generic code with [unsafe_]get_user(). All other architectures and ARM
variants provide the relevant accessors already.
- Ensure that ASM GOTO jump label usage in the user mode access helpers
always goes through a local C scope label indirection inside the
helpers. This is required because compilers are not supporting that a
ASM GOTO target leaves a auto cleanup scope. GCC silently fails to emit
the cleanup invocation and CLANG fails the build.
This provides generic wrapper macros and the conversion of affected
architecture code to use them.
- Scoped user mode access with auto cleanup
Access to user mode memory can be required in hot code paths, but if it
has to be done with user controlled pointers, the access is shielded
with a speculation barrier, so that the CPU cannot speculate around the
address range check. Those speculation barriers impact performance quite
significantly. This can be avoided by "masking" the provided pointer so
it is guaranteed to be in the valid user memory access range and
otherwise to point to a guaranteed unpopulated address space. This has
to be done without branches so it creates an address dependency for the
access, which the CPU cannot speculate ahead.
This results in repeating and error prone programming patterns:
if (can_do_masked_user_access())
from = masked_user_read_access_begin((from));
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
return 0;
Efault:
user_read_access_end();
return -EFAULT;
which can be replaced with scopes and automatic cleanup:
scoped_user_read_access(from, Efault)
unsafe_get_user(val, from, Efault);
return 0;
Efault:
return -EFAULT;
- Convert code which implements the above pattern over to
scope_user.*.access(). This also corrects a couple of imbalanced
masked_*_begin() instances which are harmless on most architectures, but
prevent PowerPC from implementing the masking optimization.
- Add a missing speculation barrier in copy_from_user_iter()
Thanks,
tglx
------------------>
Christophe Leroy (4):
iov_iter: Convert copy_from_user_iter() to masked user access
iov_iter: Add missing speculation barrier to copy_from_user_iter()
scm: Convert put_cmsg() to scoped user access
lib/strn*,uaccess: Use masked_user_{read/write}_access_begin when required
Thomas Gleixner (12):
ARM: uaccess: Implement missing __get_user_asm_dword()
uaccess: Provide ASM GOTO safe wrappers for unsafe_*_user()
x86/uaccess: Use unsafe wrappers for ASM GOTO
powerpc/uaccess: Use unsafe wrappers for ASM GOTO
riscv/uaccess: Use unsafe wrappers for ASM GOTO
s390/uaccess: Use unsafe wrappers for ASM GOTO
arm64: uaccess: Use unsafe wrappers for ASM GOTO
uaccess: Provide scoped user access regions
uaccess: Provide put/get_user_inline()
futex: Convert to get/put_user_inline()
x86/futex: Convert to scoped user access
select: Convert to scoped user access
arch/arm/include/asm/uaccess.h | 26 ++-
arch/arm64/include/asm/uaccess.h | 4 +-
arch/powerpc/include/asm/uaccess.h | 8 +-
arch/riscv/include/asm/uaccess.h | 8 +-
arch/s390/include/asm/uaccess.h | 4 +-
arch/x86/include/asm/futex.h | 75 ++++-----
arch/x86/include/asm/uaccess.h | 12 +-
fs/select.c | 12 +-
include/linux/uaccess.h | 314 ++++++++++++++++++++++++++++++++++++-
kernel/futex/core.c | 4 +-
kernel/futex/futex.h | 58 +------
lib/iov_iter.c | 22 ++-
lib/strncpy_from_user.c | 2 +-
lib/strnlen_user.c | 2 +-
net/core/scm.c | 20 +--
15 files changed, 421 insertions(+), 150 deletions(-)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index f90be312418e..d6ae80b5df36 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -283,10 +283,17 @@ extern int __put_user_8(void *, unsigned long long);
__gu_err; \
})
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
- unsigned long __gu_val; \
+ __long_type(x) __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -295,6 +302,7 @@ do { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
+ case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
@@ -353,6 +361,22 @@ do { \
#define __get_user_asm_word(x, addr, err, __t) \
__get_user_asm(x, addr, err, "ldr" __t)
+#ifdef __ARMEB__
+#define __WORD0_OFFS 4
+#define __WORD1_OFFS 0
+#else
+#define __WORD0_OFFS 0
+#define __WORD1_OFFS 4
+#endif
+
+#define __get_user_asm_dword(x, addr, err, __t) \
+ ({ \
+ unsigned long __w0, __w1; \
+ __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
+ __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
+ (x) = ((u64)__w1 << 32) | (u64) __w0; \
+})
+
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 1aa4ecb73429..6490930deef8 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -422,9 +422,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() uaccess_ttbr0_disable()
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
-#define unsafe_get_user(x, ptr, label) \
+#define arch_unsafe_get_user(x, ptr, label) \
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
/*
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4f5a46a77fa2..784a00e681fa 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -451,7 +451,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
#define user_write_access_begin user_write_access_begin
#define user_write_access_end prevent_current_write_to_user
-#define unsafe_get_user(x, p, e) do { \
+#define arch_unsafe_get_user(x, p, e) do { \
__long_type(*(p)) __gu_val; \
__typeof__(*(p)) __user *__gu_addr = (p); \
\
@@ -459,7 +459,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
(x) = (__typeof__(*(p)))__gu_val; \
} while (0)
-#define unsafe_put_user(x, p, e) \
+#define arch_unsafe_put_user(x, p, e) \
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
#define unsafe_copy_from_user(d, s, l, e) \
@@ -504,11 +504,11 @@ do { \
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
} while (0)
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_size_goto(*((type *)(dst)), \
(__force type __user *)(src), sizeof(type), err_label)
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_size_goto(*((type *)(src)), \
(__force type __user *)(dst), sizeof(type), err_label)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index f5f4f7f85543..36bba6720c26 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -437,10 +437,10 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__clear_user(untagged_addr(to), n) : n;
}
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
@@ -460,10 +460,10 @@ static inline void user_access_restore(unsigned long enabled) { }
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__put_user_nocheck(x, (ptr), label)
-#define unsafe_get_user(x, ptr, label) do { \
+#define arch_unsafe_get_user(x, ptr, label) do { \
__inttype(*(ptr)) __gu_val; \
__get_user_nocheck(__gu_val, (ptr), label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 3e5b8b677057..c5e02addcd67 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -468,8 +468,8 @@ do { \
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-#define __get_kernel_nofault __mvc_kernel_nofault
-#define __put_kernel_nofault __mvc_kernel_nofault
+#define arch_get_kernel_nofault __mvc_kernel_nofault
+#define arch_put_kernel_nofault __mvc_kernel_nofault
void __cmpxchg_user_key_called_with_bad_pointer(void);
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 6e2458088800..fe5d9a10d900 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -46,38 +46,31 @@ do { \
} while(0)
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
- u32 __user *uaddr)
+ u32 __user *uaddr)
{
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (op) {
- case FUTEX_OP_SET:
- unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ADD:
- unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
- uaddr, oparg, Efault);
- break;
- case FUTEX_OP_OR:
- unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ANDN:
- unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
- break;
- case FUTEX_OP_XOR:
- unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
- break;
- default:
- user_access_end();
- return -ENOSYS;
+ scoped_user_rw_access(uaddr, Efault) {
+ switch (op) {
+ case FUTEX_OP_SET:
+ unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_ADD:
+ unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_OR:
+ unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_ANDN:
+ unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
+ break;
+ case FUTEX_OP_XOR:
+ unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
+ break;
+ default:
+ return -ENOSYS;
+ }
}
- user_access_end();
return 0;
Efault:
- user_access_end();
return -EFAULT;
}
@@ -86,21 +79,19 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
int ret = 0;
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
- asm volatile("\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
- "2:\n"
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "r" (newval), "1" (oldval)
- : "memory"
- );
- user_access_end();
- *uval = oldval;
+ scoped_user_rw_access(uaddr, Efault) {
+ asm_inline volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "r" (newval), "1" (oldval)
+ : "memory");
+ *uval = oldval;
+ }
return ret;
+Efault:
+ return -EFAULT;
}
#endif
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 91a3fb8ae7ff..367297b188c3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -528,18 +528,18 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define unsafe_get_user(x, ptr, err_label) \
+#define arch_unsafe_get_user(x, ptr, err_label) \
do { \
__inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define unsafe_get_user(x, ptr, err_label) \
+#define arch_unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
@@ -618,11 +618,11 @@ do { \
} while (0)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
sizeof(type), err_label)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
do { \
int __kr_err; \
\
@@ -633,7 +633,7 @@ do { \
} while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
sizeof(type), err_label)
diff --git a/fs/select.c b/fs/select.c
index 082cf60c7e23..65019b8ba3f7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -776,17 +776,13 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
- else if (!user_read_access_begin(from, sizeof(*from)))
- return -EFAULT;
- unsafe_get_user(to->p, &from->p, Efault);
- unsafe_get_user(to->size, &from->size, Efault);
- user_read_access_end();
+ scoped_user_read_access(from, Efault) {
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ }
}
return 0;
Efault:
- user_read_access_end();
return -EFAULT;
}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 1beb5b395d81..be395f5f7ee3 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/cleanup.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
@@ -35,9 +36,17 @@
#ifdef masked_user_access_begin
#define can_do_masked_user_access() 1
+# ifndef masked_user_write_access_begin
+# define masked_user_write_access_begin masked_user_access_begin
+# endif
+# ifndef masked_user_read_access_begin
+# define masked_user_read_access_begin masked_user_access_begin
+#endif
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
+ #define masked_user_read_access_begin(src) NULL
+ #define masked_user_write_access_begin(src) NULL
#define mask_user_address(src) (src)
#endif
@@ -518,7 +527,34 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count);
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
-#ifndef __get_kernel_nofault
+#ifdef arch_get_kernel_nofault
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ */
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_get_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_put_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */
+
#define __get_kernel_nofault(dst, src, type, label) \
do { \
type __user *p = (type __force __user *)(src); \
@@ -535,7 +571,8 @@ do { \
if (__put_user(data, p)) \
goto label; \
} while (0)
-#endif
+
+#endif /* !__get_kernel_nofault */
/**
* get_kernel_nofault(): safely attempt to read from a location
@@ -549,7 +586,42 @@ do { \
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
})
-#ifndef user_access_begin
+#ifdef user_access_begin
+
+#ifdef arch_unsafe_get_user
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ *
+ * Some architectures use internal local labels already, but this extra
+ * indirection here is harmless because the compiler optimizes it out
+ * completely in any case. This construct just ensures that the ASM GOTO
+ * target is always in the local scope. The C goto 'label' works correctly
+ * when leaving a cleanup() scope.
+ */
+#define unsafe_get_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_get_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define unsafe_put_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_put_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+#endif /* arch_unsafe_get_user */
+
+#else /* user_access_begin */
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
@@ -559,7 +631,8 @@ do { \
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
-#endif
+#endif /* !user_access_begin */
+
#ifndef user_write_access_begin
#define user_write_access_begin user_access_begin
#define user_write_access_end user_access_end
@@ -569,6 +642,239 @@ static inline void user_access_restore(unsigned long flags) { }
#define user_read_access_end user_access_end
#endif
+/* Define RW variant so the below _mode macro expansion works */
+#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
+#define user_rw_access_begin(u, s) user_access_begin(u, s)
+#define user_rw_access_end() user_access_end()
+
+/* Scoped user access */
+#define USER_ACCESS_GUARD(_mode) \
+static __always_inline void __user * \
+class_user_##_mode##_begin(void __user *ptr) \
+{ \
+ return ptr; \
+} \
+ \
+static __always_inline void \
+class_user_##_mode##_end(void __user *ptr) \
+{ \
+ user_##_mode##_access_end(); \
+} \
+ \
+DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
+ class_user_##_mode##_end(_T), \
+ class_user_##_mode##_begin(ptr), void __user *ptr) \
+ \
+static __always_inline class_user_##_mode##_access_t \
+class_user_##_mode##_access_ptr(void __user *scope) \
+{ \
+ return scope; \
+}
+
+USER_ACCESS_GUARD(read)
+USER_ACCESS_GUARD(write)
+USER_ACCESS_GUARD(rw)
+#undef USER_ACCESS_GUARD
+
+/**
+ * __scoped_user_access_begin - Start a scoped user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * Internal helper for __scoped_user_access(). Don't use directly.
+ */
+#define __scoped_user_access_begin(mode, uptr, size, elbl) \
+({ \
+ typeof(uptr) __retptr; \
+ \
+ if (can_do_masked_user_access()) { \
+ __retptr = masked_user_##mode##_access_begin(uptr); \
+ } else { \
+ __retptr = uptr; \
+ if (!user_##mode##_access_begin(uptr, size)) \
+ goto elbl; \
+ } \
+ __retptr; \
+})
+
+/**
+ * __scoped_user_access - Open a scope for user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected. It
+ * must be placed outside the scope
+ *
+ * If the user access function inside the scope requires a fault label, it
+ * can use @elbl or a different label outside the scope, which requires
+ * that user access which is implemented with ASM GOTO has been properly
+ * wrapped. See unsafe_get_user() for reference.
+ *
+ * scoped_user_rw_access(ptr, efault) {
+ * unsafe_get_user(rval, &ptr->rval, efault);
+ * unsafe_put_user(wval, &ptr->wval, efault);
+ * }
+ * return 0;
+ * efault:
+ * return -EFAULT;
+ *
+ * The scope is internally implemented as a autoterminating nested for()
+ * loop, which can be left with 'return', 'break' and 'goto' at any
+ * point.
+ *
+ * When the scope is left user_##@_mode##_access_end() is automatically
+ * invoked.
+ *
+ * When the architecture supports masked user access and the access region
+ * which is determined by @uptr and @size is not a valid user space
+ * address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user
+ * space address and does not terminate early. This optimizes for the good
+ * case and lets the performance uncritical bad case go through the fault.
+ *
+ * The eventual modification of the pointer is limited to the scope.
+ * Outside of the scope the original pointer value is unmodified, so that
+ * the original pointer value is available for diagnostic purposes in an
+ * out of scope fault path.
+ *
+ * Nesting scoped user access into a user access scope is invalid and fails
+ * the build. Nesting into other guards, e.g. pagefault is safe.
+ *
+ * The masked variant does not check the size of the access and relies on a
+ * mapping hole (e.g. guard page) to catch an out of range pointer, the
+ * first access to user memory inside the scope has to be within
+ * @uptr ... @uptr + PAGE_SIZE - 1
+ *
+ * Don't use directly. Use scoped_masked_user_$MODE_access() instead.
+ */
+#define __scoped_user_access(mode, uptr, size, elbl) \
+for (bool done = false; !done; done = true) \
+ for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
+ !done; done = true) \
+ for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
+ /* Force modified pointer usage within the scope */ \
+ for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
+
+/**
+ * scoped_user_read_access_size - Start a scoped user read access with given size
+ * @usrc: Pointer to the user space address to read from
+ * @size: Size of the access starting from @usrc
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access_size(usrc, size, elbl) \
+ __scoped_user_access(read, usrc, size, elbl)
+
+/**
+ * scoped_user_read_access - Start a scoped user read access
+ * @usrc: Pointer to the user space address to read from
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @usrc is determined via sizeof(*@usrc)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access(usrc, elbl) \
+ scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl)
+
+/**
+ * scoped_user_write_access_size - Start a scoped user write access with given size
+ * @udst: Pointer to the user space address to write to
+ * @size: Size of the access starting from @udst
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access_size(udst, size, elbl) \
+ __scoped_user_access(write, udst, size, elbl)
+
+/**
+ * scoped_user_write_access - Start a scoped user write access
+ * @udst: Pointer to the user space address to write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @udst is determined via sizeof(*@udst)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access(udst, elbl) \
+ scoped_user_write_access_size(udst, sizeof(*(udst)), elbl)
+
+/**
+ * scoped_user_rw_access_size - Start a scoped user read/write access with given size
+ * @uptr Pointer to the user space address to read from and write to
+ * @size: Size of the access starting from @uptr
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access_size(uptr, size, elbl) \
+ __scoped_user_access(rw, uptr, size, elbl)
+
+/**
+ * scoped_user_rw_access - Start a scoped user read/write access
+ * @uptr Pointer to the user space address to read from and write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @uptr is determined via sizeof(*@uptr)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access(uptr, elbl) \
+ scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl)
+
+/**
+ * get_user_inline - Read user data inlined
+ * @val: The variable to store the value read from user memory
+ * @usrc: Pointer to the user space memory to read from
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of get_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define get_user_inline(val, usrc) \
+({ \
+ __label__ efault; \
+ typeof(usrc) _tmpsrc = usrc; \
+ int _ret = 0; \
+ \
+ scoped_user_read_access(_tmpsrc, efault) \
+ unsafe_get_user(val, _tmpsrc, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
+/**
+ * put_user_inline - Write to user memory inlined
+ * @val: The value to write
+ * @udst: Pointer to the user space memory to write to
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of put_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define put_user_inline(val, udst) \
+({ \
+ __label__ efault; \
+ typeof(udst) _tmpdst = udst; \
+ int _ret = 0; \
+ \
+ scoped_user_write_access(_tmpdst, efault) \
+ unsafe_put_user(val, _tmpdst, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
#ifdef CONFIG_HARDENED_USERCOPY
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 125804fbb5cb..ebcccb16ae0b 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
- if (futex_get_value(&node, naddr))
+ if (get_user_inline(node, naddr))
return -EFAULT;
if ((node != FUTEX_NO_NODE) &&
@@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
node = numa_node_id();
node_updated = true;
}
- if (node_updated && futex_put_value(node, naddr))
+ if (node_updated && put_user_inline(node, naddr))
return -EFAULT;
}
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 2cd57096c38e..30c2afa03889 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
return ret;
}
-/*
- * This does a plain atomic user space read, and the user pointer has
- * already been verified earlier by get_futex_key() to be both aligned
- * and actually in user space, just like futex_atomic_cmpxchg_inatomic().
- *
- * We still want to avoid any speculation, and while __get_user() is
- * the traditional model for this, it's actually slower than doing
- * this manually these days.
- *
- * We could just have a per-architecture special function for it,
- * the same way we do futex_atomic_cmpxchg_inatomic(), but rather
- * than force everybody to do that, write it out long-hand using
- * the low-level user-access infrastructure.
- *
- * This looks a bit overkill, but generally just results in a couple
- * of instructions.
- */
-static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
-{
- u32 val;
-
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
- else if (!user_read_access_begin(from, sizeof(*from)))
- return -EFAULT;
- unsafe_get_user(val, from, Efault);
- user_read_access_end();
- *dest = val;
- return 0;
-Efault:
- user_read_access_end();
- return -EFAULT;
-}
-
-static __always_inline int futex_put_value(u32 val, u32 __user *to)
-{
- if (can_do_masked_user_access())
- to = masked_user_access_begin(to);
- else if (!user_write_access_begin(to, sizeof(*to)))
- return -EFAULT;
- unsafe_put_user(val, to, Efault);
- user_write_access_end();
- return 0;
-Efault:
- user_write_access_end();
- return -EFAULT;
-}
-
+/* Read from user memory with pagefaults disabled */
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
{
- int ret;
-
- pagefault_disable();
- ret = futex_get_value(dest, from);
- pagefault_enable();
-
- return ret;
+ guard(pagefault)();
+ return get_user_inline(*dest, from);
}
extern void __futex_unqueue(struct futex_q *q);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 2fe66a6b8789..896760bad455 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -49,12 +49,24 @@ size_t copy_from_user_iter(void __user *iter_from, size_t progress,
if (should_fail_usercopy())
return len;
- if (access_ok(iter_from, len)) {
- to += progress;
- instrument_copy_from_user_before(to, iter_from, len);
- res = raw_copy_from_user(to, iter_from, len);
- instrument_copy_from_user_after(to, iter_from, len, res);
+ if (can_do_masked_user_access()) {
+ iter_from = mask_user_address(iter_from);
+ } else {
+ if (!access_ok(iter_from, len))
+ return res;
+
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
}
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
+
return res;
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 6dc234913dd5..5bb752ff7c61 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -126,7 +126,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (can_do_masked_user_access()) {
long retval;
- src = masked_user_access_begin(src);
+ src = masked_user_read_access_begin(src);
retval = do_strncpy_from_user(dst, src, count, count);
user_read_access_end();
return retval;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 6e489f9e90f1..4a6574b67f82 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -99,7 +99,7 @@ long strnlen_user(const char __user *str, long count)
if (can_do_masked_user_access()) {
long retval;
- str = masked_user_access_begin(str);
+ str = masked_user_read_access_begin(str);
retval = do_strnlen_user(str, count, count);
user_read_access_end();
return retval;
diff --git a/net/core/scm.c b/net/core/scm.c
index 66eaee783e8b..cd87f66671aa 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -273,17 +273,13 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
check_object_size(data, cmlen - sizeof(*cm), true);
- if (can_do_masked_user_access())
- cm = masked_user_access_begin(cm);
- else if (!user_write_access_begin(cm, cmlen))
- goto efault;
-
- unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
- unsafe_put_user(level, &cm->cmsg_level, efault_end);
- unsafe_put_user(type, &cm->cmsg_type, efault_end);
- unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
- cmlen - sizeof(*cm), efault_end);
- user_write_access_end();
+ scoped_user_write_access_size(cm, cmlen, efault) {
+ unsafe_put_user(cmlen, &cm->cmsg_len, efault);
+ unsafe_put_user(level, &cm->cmsg_level, efault);
+ unsafe_put_user(type, &cm->cmsg_type, efault);
+ unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
+ cmlen - sizeof(*cm), efault);
+ }
} else {
struct cmsghdr *cm = msg->msg_control;
@@ -301,8 +297,6 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
msg->msg_controllen -= cmlen;
return 0;
-efault_end:
- user_write_access_end();
efault:
return -EFAULT;
}
Linus,
please pull the latest timers/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-urgent-2025-12-01
up to: c7418164b463: timekeeping: Fix error code in tk_aux_sysfs_init()
A delayed bugfix for the timekeeping code which makes the recent fix
plugging the resource leak correct in terms of the returned error code.
Thanks,
tglx
------------------>
Dan Carpenter (1):
timekeeping: Fix error code in tk_aux_sysfs_init()
kernel/time/timekeeping.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 08e0943b54da..4790da895203 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -3073,8 +3073,10 @@ static int __init tk_aux_sysfs_init(void)
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
- if (!clk)
+ if (!clk) {
+ ret = -ENOMEM;
goto err_clean;
+ }
ret = sysfs_create_group(clk, &aux_clock_enable_attr_group);
if (ret)
* Thomas Gleixner <tglx@linutronix.de> wrote: > Linus, > > please pull the latest timers/urgent branch from: > > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-urgent-2025-12-01 > > up to: c7418164b463: timekeeping: Fix error code in tk_aux_sysfs_init() > > > A delayed bugfix for the timekeeping code which makes the recent fix > plugging the resource leak correct in terms of the returned error code. > > Thanks, > > tglx > > ------------------> > Dan Carpenter (1): > timekeeping: Fix error code in tk_aux_sysfs_init() > > > kernel/time/timekeeping.c | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) So JFYI, this fix is already upstream: e69c7c175115 Merge tag 'timers_urgent_for_v6.18_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Boris sent timers/urgent yesterday morning. Thanks, Ingo
Linus,
please pull the latest core/rseq branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core-rseq-2025-11-30
up to: 653fda7ae73d: sched/mmcid: Switch over to the new mechanism
A large overhaul of the restartable sequences and CID management:
The recent enablement of RSEQ in glibc resulted in regressions which are
caused by the related overhead. It turned out that the decision to invoke
the exit to user work was not really a decision. More or less each
context switch caused that. There is a long list of small issues which
sums up nicely and results in a 3-4% regression in I/O benchmarks.
The other detail which caused issues due to extra work in context switch
and task migration is the CID (memory context ID) management. It also
requires to use a task work to consolidate the CID space, which is
executed in the context of an arbitrary task and results in sporadic
uncontrolled exit latencies.
The rewrite addresses this by:
- Removing deprecated and long unsupported functionality
- Moving the related data into dedicated data structures which are
optimized for fast path processing.
- Caching values so actual decisions can be made
- Replacing the current implementation with a optimized inlined variant.
- Separating fast and slow path for architectures which use the generic
entry code, so that only fault and error handling goes into the
TIF_NOTIFY_RESUME handler.
- Rewriting the CID management so that it becomes mostly invisible in the
context switch path. That moves the work of switching modes into the
fork/exit path, which is a reasonable tradeoff. That work is only
required when a process creates more threads than the cpuset it is
allowed to run on or when enough threads exit after that. An artificial
thread pool benchmarks which triggers this did not degrade, it actually
improved significantly.
The main effect in migration heavy scenarios is that runqueue lock held
time and therefore contention goes down significantly.
Note: This contains parts of the scoped uaccess series.
Thanks,
tglx
------------------>
Dan Carpenter (1):
rseq: Delete duplicate if statement in rseq_virt_userspace_exit()
Peter Zijlstra (2):
cleanup: Always inline everything
x86/ptrace: Always inline trivial accessors
Thomas Gleixner (63):
ARM: uaccess: Implement missing __get_user_asm_dword()
uaccess: Provide ASM GOTO safe wrappers for unsafe_*_user()
x86/uaccess: Use unsafe wrappers for ASM GOTO
powerpc/uaccess: Use unsafe wrappers for ASM GOTO
riscv/uaccess: Use unsafe wrappers for ASM GOTO
s390/uaccess: Use unsafe wrappers for ASM GOTO
arm64: uaccess: Use unsafe wrappers for ASM GOTO
uaccess: Provide scoped user access regions
uaccess: Provide put/get_user_inline()
futex: Convert to get/put_user_inline()
x86/futex: Convert to scoped user access
select: Convert to scoped user access
rseq: Avoid pointless evaluation in __rseq_notify_resume()
rseq: Condense the inline stubs
rseq: Move algorithm comment to top
rseq: Remove the ksig argument from rseq_handle_notify_resume()
rseq: Simplify registration
rseq: Simplify the event notification
rseq, virt: Retrigger RSEQ after vcpu_run()
rseq: Avoid CPU/MM CID updates when no event pending
rseq: Introduce struct rseq_data
entry: Clean up header
entry: Remove syscall_enter_from_user_mode_prepare()
entry: Inline irqentry_enter/exit_from/to_user_mode()
sched: Move MM CID related functions to sched.h
rseq: Cache CPU ID and MM CID values
rseq: Record interrupt from user space
rseq: Provide tracepoint wrappers for inline code
rseq: Expose lightweight statistics in debugfs
rseq: Provide static branch for runtime debugging
rseq: Provide and use rseq_update_user_cs()
rseq: Replace the original debug implementation
rseq: Make exit debugging static branch based
rseq: Use static branch for syscall exit debug when GENERIC_IRQ_ENTRY=y
rseq: Provide and use rseq_set_ids()
rseq: Separate the signal delivery path
rseq: Rework the TIF_NOTIFY handler
rseq: Optimize event setting
rseq: Implement fast path for exit to user
rseq: Switch to fast path processing on exit to user
entry: Split up exit_to_user_mode_prepare()
rseq: Split up rseq_exit_to_user_mode()
rseq: Switch to TIF_RSEQ if supported
sched/mmcid: Revert the complex CID management
sched/mmcid: Use proper data structures
sched/mmcid: Cacheline align MM CID storage
sched: Fixup whitespace damage
sched/mmcid: Move scheduler code out of global header
sched/mmcid: Prevent pointless work in mm_update_cpus_allowed()
cpumask: Introduce cpumask_weighted_or()
sched/mmcid: Use cpumask_weighted_or()
cpumask: Cache num_possible_cpus()
sched/mmcid: Convert mm CID mask to a bitmap
signal: Move MMCID exit out of sighand lock
sched/mmcid: Move initialization out of line
sched/mmcid: Provide precomputed maximal value
sched/mmcid: Serialize sched_mm_cid_fork()/exit() with a mutex
sched/mmcid: Introduce per task/CPU ownership infrastructure
sched/mmcid: Provide new scheduler CID mechanism
sched/mmcid: Provide CID ownership mode fixup functions
irqwork: Move data struct to a types header
sched/mmcid: Implement deferred mode change
sched/mmcid: Switch over to the new mechanism
Documentation/admin-guide/kernel-parameters.txt | 4 +
arch/arm/include/asm/uaccess.h | 26 +-
arch/arm64/include/asm/uaccess.h | 4 +-
arch/arm64/kernel/entry-common.c | 2 +-
arch/powerpc/include/asm/uaccess.h | 8 +-
arch/riscv/include/asm/uaccess.h | 8 +-
arch/s390/include/asm/uaccess.h | 4 +-
arch/x86/entry/syscall_32.c | 3 +-
arch/x86/include/asm/futex.h | 75 +--
arch/x86/include/asm/ptrace.h | 20 +-
arch/x86/include/asm/uaccess.h | 12 +-
drivers/hv/mshv_root_main.c | 3 +
fs/binfmt_elf.c | 2 +-
fs/exec.c | 2 +-
fs/select.c | 12 +-
include/asm-generic/thread_info_tif.h | 3 +
include/linux/bitmap.h | 15 +
include/linux/cleanup.h | 30 +-
include/linux/cpumask.h | 26 +-
include/linux/entry-common.h | 38 +-
include/linux/irq-entry-common.h | 75 ++-
include/linux/irq_work.h | 9 +-
include/linux/irq_work_types.h | 14 +
include/linux/mm.h | 25 -
include/linux/mm_types.h | 128 +---
include/linux/resume_user_mode.h | 2 +-
include/linux/rseq.h | 214 +++---
include/linux/rseq_entry.h | 616 ++++++++++++++++++
include/linux/rseq_types.h | 164 +++++
include/linux/sched.h | 57 +-
include/linux/thread_info.h | 5 +
include/linux/uaccess.h | 314 ++++++++-
include/trace/events/rseq.h | 4 +-
include/uapi/linux/rseq.h | 21 +-
init/Kconfig | 28 +-
init/init_task.c | 3 +
kernel/cpu.c | 19 +
kernel/entry/common.c | 39 +-
kernel/entry/syscall-common.c | 8 -
kernel/exit.c | 1 +
kernel/fork.c | 7 +-
kernel/futex/core.c | 4 +-
kernel/futex/futex.h | 58 +-
kernel/ptrace.c | 6 +-
kernel/rseq.c | 655 ++++++++-----------
kernel/sched/core.c | 827 ++++++++++++------------
kernel/sched/membarrier.c | 8 +-
kernel/sched/sched.h | 392 +++++------
kernel/signal.c | 2 -
lib/bitmap.c | 6 +
virt/kvm/kvm_main.c | 7 +
51 files changed, 2458 insertions(+), 1557 deletions(-)
create mode 100644 include/linux/irq_work_types.h
create mode 100644 include/linux/rseq_entry.h
create mode 100644 include/linux/rseq_types.h
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6c42061ca20e..e63827475792 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6500,6 +6500,10 @@
Memory area to be used by remote processor image,
managed by CMA.
+ rseq_debug= [KNL] Enable or disable restartable sequence
+ debug mode. Defaults to CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE.
+ Format: <bool>
+
rt_group_sched= [KNL] Enable or disable SCHED_RR/FIFO group scheduling
when CONFIG_RT_GROUP_SCHED=y. Defaults to
!CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED.
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index f90be312418e..d6ae80b5df36 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -283,10 +283,17 @@ extern int __put_user_8(void *, unsigned long long);
__gu_err; \
})
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
#define __get_user_err(x, ptr, err, __t) \
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
- unsigned long __gu_val; \
+ __long_type(x) __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -295,6 +302,7 @@ do { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
+ case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
@@ -353,6 +361,22 @@ do { \
#define __get_user_asm_word(x, addr, err, __t) \
__get_user_asm(x, addr, err, "ldr" __t)
+#ifdef __ARMEB__
+#define __WORD0_OFFS 4
+#define __WORD1_OFFS 0
+#else
+#define __WORD0_OFFS 0
+#define __WORD1_OFFS 4
+#endif
+
+#define __get_user_asm_dword(x, addr, err, __t) \
+ ({ \
+ unsigned long __w0, __w1; \
+ __get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
+ __get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
+ (x) = ((u64)__w1 << 32) | (u64) __w0; \
+})
+
#define __put_user_switch(x, ptr, __err, __fn) \
do { \
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 1aa4ecb73429..6490930deef8 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -422,9 +422,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() uaccess_ttbr0_disable()
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
-#define unsafe_get_user(x, ptr, label) \
+#define arch_unsafe_get_user(x, ptr, label) \
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
/*
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index a9c81715ce59..0a97e2621f60 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -100,7 +100,7 @@ static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
{
local_irq_disable();
- exit_to_user_mode_prepare(regs);
+ exit_to_user_mode_prepare_legacy(regs);
local_daif_mask();
mte_check_tfsr_exit();
exit_to_user_mode();
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4f5a46a77fa2..784a00e681fa 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -451,7 +451,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
#define user_write_access_begin user_write_access_begin
#define user_write_access_end prevent_current_write_to_user
-#define unsafe_get_user(x, p, e) do { \
+#define arch_unsafe_get_user(x, p, e) do { \
__long_type(*(p)) __gu_val; \
__typeof__(*(p)) __user *__gu_addr = (p); \
\
@@ -459,7 +459,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
(x) = (__typeof__(*(p)))__gu_val; \
} while (0)
-#define unsafe_put_user(x, p, e) \
+#define arch_unsafe_put_user(x, p, e) \
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
#define unsafe_copy_from_user(d, s, l, e) \
@@ -504,11 +504,11 @@ do { \
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
} while (0)
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_size_goto(*((type *)(dst)), \
(__force type __user *)(src), sizeof(type), err_label)
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_size_goto(*((type *)(src)), \
(__force type __user *)(dst), sizeof(type), err_label)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index f5f4f7f85543..36bba6720c26 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -437,10 +437,10 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__clear_user(untagged_addr(to), n) : n;
}
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
@@ -460,10 +460,10 @@ static inline void user_access_restore(unsigned long enabled) { }
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__put_user_nocheck(x, (ptr), label)
-#define unsafe_get_user(x, ptr, label) do { \
+#define arch_unsafe_get_user(x, ptr, label) do { \
__inttype(*(ptr)) __gu_val; \
__get_user_nocheck(__gu_val, (ptr), label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 3e5b8b677057..c5e02addcd67 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -468,8 +468,8 @@ do { \
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
-#define __get_kernel_nofault __mvc_kernel_nofault
-#define __put_kernel_nofault __mvc_kernel_nofault
+#define arch_get_kernel_nofault __mvc_kernel_nofault
+#define arch_put_kernel_nofault __mvc_kernel_nofault
void __cmpxchg_user_key_called_with_bad_pointer(void);
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 2b15ea17bb7c..a67a644d0cfe 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -274,9 +274,10 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
* fetch EBP before invoking any of the syscall entry work
* functions.
*/
- syscall_enter_from_user_mode_prepare(regs);
+ enter_from_user_mode(regs);
instrumentation_begin();
+ local_irq_enable();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
/*
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 6e2458088800..fe5d9a10d900 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -46,38 +46,31 @@ do { \
} while(0)
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
- u32 __user *uaddr)
+ u32 __user *uaddr)
{
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
-
- switch (op) {
- case FUTEX_OP_SET:
- unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ADD:
- unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
- uaddr, oparg, Efault);
- break;
- case FUTEX_OP_OR:
- unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
- break;
- case FUTEX_OP_ANDN:
- unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
- break;
- case FUTEX_OP_XOR:
- unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
- break;
- default:
- user_access_end();
- return -ENOSYS;
+ scoped_user_rw_access(uaddr, Efault) {
+ switch (op) {
+ case FUTEX_OP_SET:
+ unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_ADD:
+ unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_OR:
+ unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
+ break;
+ case FUTEX_OP_ANDN:
+ unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
+ break;
+ case FUTEX_OP_XOR:
+ unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
+ break;
+ default:
+ return -ENOSYS;
+ }
}
- user_access_end();
return 0;
Efault:
- user_access_end();
return -EFAULT;
}
@@ -86,21 +79,19 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
int ret = 0;
- if (can_do_masked_user_access())
- uaddr = masked_user_access_begin(uaddr);
- else if (!user_access_begin(uaddr, sizeof(u32)))
- return -EFAULT;
- asm volatile("\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
- "2:\n"
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "r" (newval), "1" (oldval)
- : "memory"
- );
- user_access_end();
- *uval = oldval;
+ scoped_user_rw_access(uaddr, Efault) {
+ asm_inline volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "r" (newval), "1" (oldval)
+ : "memory");
+ *uval = oldval;
+ }
return ret;
+Efault:
+ return -EFAULT;
}
#endif
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 50f75467f73d..b5dec859bc75 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,12 +187,12 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code);
-static inline unsigned long regs_return_value(struct pt_regs *regs)
+static __always_inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+static __always_inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ax = rc;
}
@@ -277,34 +277,34 @@ static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs)
}
#endif
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
-static inline unsigned long instruction_pointer(struct pt_regs *regs)
+static __always_inline unsigned long instruction_pointer(struct pt_regs *regs)
{
return regs->ip;
}
-static inline void instruction_pointer_set(struct pt_regs *regs,
- unsigned long val)
+static __always_inline
+void instruction_pointer_set(struct pt_regs *regs, unsigned long val)
{
regs->ip = val;
}
-static inline unsigned long frame_pointer(struct pt_regs *regs)
+static __always_inline unsigned long frame_pointer(struct pt_regs *regs)
{
return regs->bp;
}
-static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+static __always_inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
-static inline void user_stack_pointer_set(struct pt_regs *regs,
- unsigned long val)
+static __always_inline
+void user_stack_pointer_set(struct pt_regs *regs, unsigned long val)
{
regs->sp = val;
}
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 91a3fb8ae7ff..367297b188c3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -528,18 +528,18 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define unsafe_get_user(x, ptr, err_label) \
+#define arch_unsafe_get_user(x, ptr, err_label) \
do { \
__inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define unsafe_get_user(x, ptr, err_label) \
+#define arch_unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
@@ -618,11 +618,11 @@ do { \
} while (0)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
sizeof(type), err_label)
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __get_kernel_nofault(dst, src, type, err_label) \
+#define arch_get_kernel_nofault(dst, src, type, err_label) \
do { \
int __kr_err; \
\
@@ -633,7 +633,7 @@ do { \
} while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-#define __put_kernel_nofault(dst, src, type, err_label) \
+#define arch_put_kernel_nofault(dst, src, type, err_label) \
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
sizeof(type), err_label)
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index e3b2bd417c46..a21a0eb0f5be 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -29,6 +29,7 @@
#include <linux/crash_dump.h>
#include <linux/panic_notifier.h>
#include <linux/vmalloc.h>
+#include <linux/rseq.h>
#include "mshv_eventfd.h"
#include "mshv.h"
@@ -560,6 +561,8 @@ static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
}
} while (!vp->run.flags.intercept_suspend);
+ rseq_virt_userspace_exit();
+
return ret;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e4653bb99946..3eb734c192e9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -46,7 +46,7 @@
#include <linux/cred.h>
#include <linux/dax.h>
#include <linux/uaccess.h>
-#include <linux/rseq.h>
+#include <uapi/linux/rseq.h>
#include <asm/param.h>
#include <asm/page.h>
diff --git a/fs/exec.c b/fs/exec.c
index 4298e7e08d5d..90e47eb156ab 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1775,7 +1775,7 @@ static int bprm_execve(struct linux_binprm *bprm)
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
- rseq_set_notify_resume(current);
+ rseq_force_update();
current->in_execve = 0;
return retval;
diff --git a/fs/select.c b/fs/select.c
index 082cf60c7e23..65019b8ba3f7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -776,17 +776,13 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
{
// the path is hot enough for overhead of copy_from_user() to matter
if (from) {
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
- else if (!user_read_access_begin(from, sizeof(*from)))
- return -EFAULT;
- unsafe_get_user(to->p, &from->p, Efault);
- unsafe_get_user(to->size, &from->size, Efault);
- user_read_access_end();
+ scoped_user_read_access(from, Efault) {
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ }
}
return 0;
Efault:
- user_read_access_end();
return -EFAULT;
}
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
index ee3793e9b1a4..da1610a78f92 100644
--- a/include/asm-generic/thread_info_tif.h
+++ b/include/asm-generic/thread_info_tif.h
@@ -45,4 +45,7 @@
# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
#endif
+#define TIF_RSEQ 11 // Run RSEQ fast path
+#define _TIF_RSEQ BIT(TIF_RSEQ)
+
#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 595217b7a6e7..b0395e4ccf90 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@ struct device;
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
@@ -165,6 +166,8 @@ bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
@@ -337,6 +340,18 @@ void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
+static __always_inline
+unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 | *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_or(dst, src1, src2, nbits);
+ }
+}
+
static __always_inline
void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 2573585b7f06..d1806ac5342c 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -208,7 +208,7 @@
*/
#define DEFINE_FREE(_name, _type, _free) \
- static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+ static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
#define __free(_name) __cleanup(__free_##_name)
@@ -220,7 +220,7 @@
__val; \
})
-static inline __must_check
+static __always_inline __must_check
const volatile void * __must_check_fn(const volatile void *val)
{ return val; }
@@ -274,16 +274,16 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
-static inline void class_##_name##_destructor(_type *p) \
+static __always_inline void class_##_name##_destructor(_type *p) \
{ _type _T = *p; _exit; } \
-static inline _type class_##_name##_constructor(_init_args) \
+static __always_inline _type class_##_name##_constructor(_init_args) \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
typedef class_##_name##_t class_##_name##ext##_t; \
-static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
+static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
-static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
@@ -347,7 +347,7 @@ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
})
#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
- static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ \
void *_ptr = (void *)(__force unsigned long)*(_exp); \
if (IS_ERR(_ptr)) { \
@@ -355,7 +355,7 @@ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
} \
return _ptr; \
} \
- static inline int class_##_name##_lock_err(class_##_name##_t *_T) \
+ static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
{ \
long _rc = (__force unsigned long)*(_exp); \
if (!_rc) { \
@@ -384,9 +384,9 @@ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
EXTEND_CLASS(_name, _ext, \
({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
class_##_name##_t _T) \
- static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
{ return class_##_name##_lock_ptr(_T); } \
- static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
{ return class_##_name##_lock_err(_T); }
/*
@@ -466,7 +466,7 @@ typedef struct { \
__VA_ARGS__; \
} class_##_name##_t; \
\
-static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
@@ -474,7 +474,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \
__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
-static inline class_##_name##_t class_##_name##_constructor(_type *l) \
+static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
_lock; \
@@ -482,7 +482,7 @@ static inline class_##_name##_t class_##_name##_constructor(_type *l) \
}
#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
-static inline class_##_name##_t class_##_name##_constructor(void) \
+static __always_inline class_##_name##_t class_##_name##_constructor(void) \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
@@ -508,9 +508,9 @@ __DEFINE_LOCK_GUARD_0(_name, _lock)
if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
_t; }), \
typeof_member(class_##_name##_t, lock) l) \
- static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
{ return class_##_name##_lock_ptr(_T); } \
- static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
{ return class_##_name##_lock_err(_T); }
#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index ff8f41ab7ce6..66694ee8d86e 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -126,6 +126,7 @@ extern struct cpumask __cpu_dying_mask;
#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
+extern unsigned int __num_possible_cpus;
extern cpumask_t cpus_booted_once_mask;
@@ -728,6 +729,22 @@ void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
cpumask_bits(src2p), small_cpumask_bits);
}
+/**
+ * cpumask_weighted_or - *dstp = *src1p | *src2p and return the weight of the result
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Return: The number of bits set in the resulting cpumask @dstp
+ */
+static __always_inline
+unsigned int cpumask_weighted_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_weighted_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
+}
+
/**
* cpumask_xor - *dstp = *src1p ^ *src2p
* @dstp: the cpumask result
@@ -1136,13 +1153,13 @@ void init_cpu_possible(const struct cpumask *src);
#define __assign_cpu(cpu, mask, val) \
__assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
void set_cpu_online(unsigned int cpu, bool online);
+void set_cpu_possible(unsigned int cpu, bool possible);
/**
* to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
@@ -1195,7 +1212,12 @@ static __always_inline unsigned int num_online_cpus(void)
{
return raw_atomic_read(&__num_online_cpus);
}
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+
+static __always_inline unsigned int num_possible_cpus(void)
+{
+ return __num_possible_cpus;
+}
+
#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 7177436f0f9e..87efb38b7081 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -3,11 +3,11 @@
#define __LINUX_ENTRYCOMMON_H
#include <linux/irq-entry-common.h>
+#include <linux/livepatch.h>
#include <linux/ptrace.h>
+#include <linux/resume_user_mode.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
-#include <linux/livepatch.h>
-#include <linux/resume_user_mode.h>
#include <asm/entry-common.h>
#include <asm/syscall.h>
@@ -37,6 +37,7 @@
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
ARCH_SYSCALL_WORK_ENTER)
+
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -44,25 +45,7 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-/**
- * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from architecture specific syscall entry code with interrupts
- * disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct, interrupts are enabled and the
- * subsequent functions can be instrumented.
- *
- * This handles lockdep, RCU (context tracking) and tracing state, i.e.
- * the functionality provided by enter_from_user_mode().
- *
- * This is invoked when there is extra architecture specific functionality
- * to be done between establishing state and handling user mode entry work.
- */
-void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
-
-long syscall_trace_enter(struct pt_regs *regs, long syscall,
- unsigned long work);
+long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -71,8 +54,8 @@ long syscall_trace_enter(struct pt_regs *regs, long syscall,
* @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
- * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
- * architecture specific work.
+ * enabled after invoking enter_from_user_mode(), enabling interrupts and
+ * extra architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -108,8 +91,9 @@ static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *re
* function returns all state is correct, interrupts are enabled and the
* subsequent functions can be instrumented.
*
- * This is combination of syscall_enter_from_user_mode_prepare() and
- * syscall_enter_from_user_mode_work().
+ * This is the combination of enter_from_user_mode() and
+ * syscall_enter_from_user_mode_work() to be used when there is no
+ * architecture specific work to be done between the two.
*
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
@@ -162,7 +146,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
local_irq_enable();
}
- rseq_syscall(regs);
+ rseq_debug_syscall_return(regs);
/*
* Do one-time syscall specific work. If these work items are
@@ -172,7 +156,7 @@ static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
local_irq_disable_exit_to_user();
- exit_to_user_mode_prepare(regs);
+ syscall_exit_to_user_mode_prepare(regs);
}
/**
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
index d643c7c87822..72e3f7a59469 100644
--- a/include/linux/irq-entry-common.h
+++ b/include/linux/irq-entry-common.h
@@ -2,11 +2,12 @@
#ifndef __LINUX_IRQENTRYCOMMON_H
#define __LINUX_IRQENTRYCOMMON_H
+#include <linux/context_tracking.h>
+#include <linux/kmsan.h>
+#include <linux/rseq_entry.h>
#include <linux/static_call_types.h>
#include <linux/syscalls.h>
-#include <linux/context_tracking.h>
#include <linux/tick.h>
-#include <linux/kmsan.h>
#include <linux/unwind_deferred.h>
#include <asm/entry-common.h>
@@ -29,7 +30,7 @@
#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
- _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
ARCH_EXIT_TO_USER_MODE_WORK)
/**
@@ -67,6 +68,7 @@ static __always_inline bool arch_in_rcu_eqs(void) { return false; }
/**
* enter_from_user_mode - Establish state when coming from user mode
+ * @regs: Pointer to currents pt_regs
*
* Syscall/interrupt entry disables interrupts, but user mode is traced as
* interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
@@ -195,14 +197,11 @@ static __always_inline void arch_exit_to_user_mode(void) { }
*/
void arch_do_signal_or_restart(struct pt_regs *regs);
-/**
- * exit_to_user_mode_loop - do any pending work before leaving to user space
- */
-unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
- unsigned long ti_work);
+/* Handle pending TIF work */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
/**
- * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
* @regs: Pointer to pt_regs on entry stack
*
* 1) check that interrupts are disabled
@@ -210,8 +209,10 @@ unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
* 3) call exit_to_user_mode_loop() if any flags from
* EXIT_TO_USER_MODE_WORK are set
* 4) check that interrupts are still disabled
+ *
+ * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
*/
-static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long ti_work;
@@ -225,13 +226,52 @@ static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
ti_work = exit_to_user_mode_loop(regs, ti_work);
arch_exit_to_user_mode_prepare(regs, ti_work);
+}
+static __always_inline void __exit_to_user_mode_validate(void)
+{
/* Ensure that kernel state is sane for a return to userspace */
kmap_assert_nomap();
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
}
+/* Temporary workaround to keep ARM64 alive */
+static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_exit_to_user_mode_legacy();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_syscall_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_irqentry_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
@@ -274,7 +314,11 @@ static __always_inline void exit_to_user_mode(void)
*
* The function establishes state (lockdep, RCU (context tracking), tracing)
*/
-void irqentry_enter_from_user_mode(struct pt_regs *regs);
+static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ rseq_note_user_irq_entry();
+}
/**
* irqentry_exit_to_user_mode - Interrupt exit work
@@ -289,7 +333,13 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs);
* Interrupt exit is not invoking #1 which is the syscall specific one time
* work.
*/
-void irqentry_exit_to_user_mode(struct pt_regs *regs);
+static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ irqentry_exit_to_user_mode_prepare(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
#ifndef irqentry_state
/**
@@ -354,6 +404,7 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
* Conditional reschedule with additional sanity checks.
*/
void raw_irqentry_exit_cond_resched(void);
+
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 136f2980cba3..c5afd053ae32 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -2,8 +2,9 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/smp_types.h>
+#include <linux/irq_work_types.h>
#include <linux/rcuwait.h>
+#include <linux/smp_types.h>
/*
* An entry can be in one of four states:
@@ -14,12 +15,6 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-struct irq_work {
- struct __call_single_node node;
- void (*func)(struct irq_work *);
- struct rcuwait irqwait;
-};
-
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
.node = { .u_flags = (_flags), }, \
.func = (_func), \
diff --git a/include/linux/irq_work_types.h b/include/linux/irq_work_types.h
new file mode 100644
index 000000000000..73abec5bb06e
--- /dev/null
+++ b/include/linux/irq_work_types.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQ_WORK_TYPES_H
+#define _LINUX_IRQ_WORK_TYPES_H
+
+#include <linux/smp_types.h>
+#include <linux/types.h>
+
+struct irq_work {
+ struct __call_single_node node;
+ void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
+};
+
+#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d16b33bacc32..17cfbba9914c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2401,31 +2401,6 @@ struct zap_details {
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
-#ifdef CONFIG_SCHED_MM_CID
-void sched_mm_cid_before_execve(struct task_struct *t);
-void sched_mm_cid_after_execve(struct task_struct *t);
-void sched_mm_cid_fork(struct task_struct *t);
-void sched_mm_cid_exit_signals(struct task_struct *t);
-static inline int task_mm_cid(struct task_struct *t)
-{
- return t->mm_cid;
-}
-#else
-static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_fork(struct task_struct *t) { }
-static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
-static inline int task_mm_cid(struct task_struct *t)
-{
- /*
- * Use the processor id as a fall-back when the mm cid feature is
- * disabled. This provides functional per-cpu data structure accesses
- * in user-space, althrough it won't provide the memory usage benefits.
- */
- return raw_smp_processor_id();
-}
-#endif
-
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 90e5790c318f..3b7d05e7169c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -20,6 +20,7 @@
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
#include <linux/types.h>
+#include <linux/rseq_types.h>
#include <linux/bitmap.h>
#include <asm/mmu.h>
@@ -922,14 +923,6 @@ struct vm_area_struct {
#define vma_policy(vma) NULL
#endif
-#ifdef CONFIG_SCHED_MM_CID
-struct mm_cid {
- u64 time;
- int cid;
- int recent_cid;
-};
-#endif
-
/*
* Opaque type representing current mm_struct flag state. Must be accessed via
* mm_flags_xxx() helper functions.
@@ -991,44 +984,9 @@ struct mm_struct {
*/
atomic_t mm_users;
-#ifdef CONFIG_SCHED_MM_CID
- /**
- * @pcpu_cid: Per-cpu current cid.
- *
- * Keep track of the currently allocated mm_cid for each cpu.
- * The per-cpu mm_cid values are serialized by their respective
- * runqueue locks.
- */
- struct mm_cid __percpu *pcpu_cid;
- /*
- * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
- *
- * When the next mm_cid scan is due (in jiffies).
- */
- unsigned long mm_cid_next_scan;
- /**
- * @nr_cpus_allowed: Number of CPUs allowed for mm.
- *
- * Number of CPUs allowed in the union of all mm's
- * threads allowed CPUs.
- */
- unsigned int nr_cpus_allowed;
- /**
- * @max_nr_cid: Maximum number of allowed concurrency
- * IDs allocated.
- *
- * Track the highest number of allowed concurrency IDs
- * allocated for the mm.
- */
- atomic_t max_nr_cid;
- /**
- * @cpus_allowed_lock: Lock protecting mm cpus_allowed.
- *
- * Provide mutual exclusion for mm cpus_allowed and
- * mm nr_cpus_allowed updates.
- */
- raw_spinlock_t cpus_allowed_lock;
-#endif
+ /* MM CID related storage */
+ struct mm_mm_cid mm_cid;
+
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
@@ -1370,37 +1328,6 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
}
#ifdef CONFIG_SCHED_MM_CID
-
-enum mm_cid_state {
- MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
- MM_CID_LAZY_PUT = (1U << 31),
-};
-
-static inline bool mm_cid_is_unset(int cid)
-{
- return cid == MM_CID_UNSET;
-}
-
-static inline bool mm_cid_is_lazy_put(int cid)
-{
- return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
-}
-
-static inline bool mm_cid_is_valid(int cid)
-{
- return !(cid & MM_CID_LAZY_PUT);
-}
-
-static inline int mm_cid_set_lazy_put(int cid)
-{
- return cid | MM_CID_LAZY_PUT;
-}
-
-static inline int mm_cid_clear_lazy_put(int cid)
-{
- return cid & ~MM_CID_LAZY_PUT;
-}
-
/*
* mm_cpus_allowed: Union of all mm's threads allowed CPUs.
*/
@@ -1415,37 +1342,21 @@ static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
}
/* Accessor for struct mm_struct's cidmask. */
-static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
- return (struct cpumask *)cid_bitmap;
+ return (unsigned long *)cid_bitmap;
}
-static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
-{
- int i;
-
- for_each_possible_cpu(i) {
- struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
-
- pcpu_cid->cid = MM_CID_UNSET;
- pcpu_cid->recent_cid = MM_CID_UNSET;
- pcpu_cid->time = 0;
- }
- mm->nr_cpus_allowed = p->nr_cpus_allowed;
- atomic_set(&mm->max_nr_cid, 0);
- raw_spin_lock_init(&mm->cpus_allowed_lock);
- cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
- cpumask_clear(mm_cidmask(mm));
-}
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p);
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
- mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
- if (!mm->pcpu_cid)
+ mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu);
+ if (!mm->mm_cid.pcpu)
return -ENOMEM;
mm_init_cid(mm, p);
return 0;
@@ -1454,37 +1365,24 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *
static inline void mm_destroy_cid(struct mm_struct *mm)
{
- free_percpu(mm->pcpu_cid);
- mm->pcpu_cid = NULL;
+ free_percpu(mm->mm_cid.pcpu);
+ mm->mm_cid.pcpu = NULL;
}
static inline unsigned int mm_cid_size(void)
{
- return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
+ /* mm_cpus_allowed(), mm_cidmask(). */
+ return cpumask_size() + bitmap_size(num_possible_cpus());
}
-static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask)
-{
- struct cpumask *mm_allowed = mm_cpus_allowed(mm);
-
- if (!mm)
- return;
- /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
- raw_spin_lock(&mm->cpus_allowed_lock);
- cpumask_or(mm_allowed, mm_allowed, cpumask);
- WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
- raw_spin_unlock(&mm->cpus_allowed_lock);
-}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
static inline void mm_destroy_cid(struct mm_struct *mm) { }
-
static inline unsigned int mm_cid_size(void)
{
return 0;
}
-static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather;
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index e0135e0adae0..bf92227c78d0 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -59,7 +59,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
- rseq_handle_notify_resume(NULL, regs);
+ rseq_handle_slowpath(regs);
}
#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index 69553e7c14c1..2266f4dc77b6 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -3,134 +3,164 @@
#define _LINUX_RSEQ_H
#ifdef CONFIG_RSEQ
-
-#include <linux/preempt.h>
#include <linux/sched.h>
-#ifdef CONFIG_MEMBARRIER
-# define RSEQ_EVENT_GUARD irq
-#else
-# define RSEQ_EVENT_GUARD preempt
-#endif
-
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-}
+#include <uapi/linux/rseq.h>
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+void __rseq_handle_slowpath(struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
+/* Invoked from resume_user_mode_work() */
+static inline void rseq_handle_slowpath(struct pt_regs *regs)
{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
+ if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
+ if (current->rseq.event.slowpath)
+ __rseq_handle_slowpath(regs);
+ } else {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.sched_switch & current->rseq.event.has_rseq)
+ __rseq_handle_slowpath(regs);
+ }
}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- scoped_guard(RSEQ_EVENT_GUARD)
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
- rseq_handle_notify_resume(ksig, regs);
-}
+void __rseq_signal_deliver(int sig, struct pt_regs *regs);
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
+/*
+ * Invoked from signal delivery to fixup based on the register context before
+ * switching to the signal delivery context.
+ */
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs)
{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.has_rseq & current->rseq.event.user_irq)
+ __rseq_signal_deliver(ksig->sig, regs);
+ } else {
+ if (current->rseq.event.has_rseq)
+ __rseq_signal_deliver(ksig->sig, regs);
+ }
}
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
+static inline void rseq_raise_notify_resume(struct task_struct *t)
{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
+ set_tsk_thread_flag(t, TIF_RSEQ);
}
-/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
- */
-static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
+/* Invoked from context switch to force evaluation on exit to user */
+static __always_inline void rseq_sched_switch_event(struct task_struct *t)
{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
+ struct rseq_event *ev = &t->rseq.event;
+
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /*
+ * Avoid a boat load of conditionals by using simple logic
+ * to determine whether NOTIFY_RESUME needs to be raised.
+ *
+ * It's required when the CPU or MM CID has changed or
+ * the entry was from user space.
+ */
+ bool raise = (ev->user_irq | ev->ids_changed) & ev->has_rseq;
+
+ if (raise) {
+ ev->sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
} else {
- t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
+ if (ev->has_rseq) {
+ t->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
}
}
-static inline void rseq_execve(struct task_struct *t)
+/*
+ * Invoked from __set_task_cpu() when a task migrates or from
+ * mm_cid_schedin() when the CID changes to enforce an IDs update.
+ *
+ * This does not raise TIF_NOTIFY_RESUME as that happens in
+ * rseq_sched_switch_event().
+ */
+static __always_inline void rseq_sched_set_ids_changed(struct task_struct *t)
{
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
+ t->rseq.event.ids_changed = true;
}
-#else
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
-}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
+/* Enforce a full update after RSEQ registration and when execve() failed */
+static inline void rseq_force_update(void)
{
+ if (current->rseq.event.has_rseq) {
+ current->rseq.event.ids_changed = true;
+ current->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(current);
+ }
}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
+
+/*
+ * KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
+ * which clears TIF_NOTIFY_RESUME on architectures that don't use the
+ * generic TIF bits and therefore can't provide a separate TIF_RSEQ flag.
+ *
+ * To avoid updating user space RSEQ in that case just to do it eventually
+ * again before returning to user space, because __rseq_handle_slowpath()
+ * does nothing when invoked with NULL register state.
+ *
+ * After returning from guest mode, before exiting to userspace, hypervisors
+ * must invoke this function to re-raise TIF_NOTIFY_RESUME if necessary.
+ */
+static inline void rseq_virt_userspace_exit(void)
{
+ /*
+ * The generic optimization for deferring RSEQ updates until the next
+ * exit relies on having a dedicated TIF_RSEQ.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_GENERIC_TIF_BITS) &&
+ current->rseq.event.sched_switch)
+ rseq_raise_notify_resume(current);
}
-static inline void rseq_preempt(struct task_struct *t)
+
+static inline void rseq_reset(struct task_struct *t)
{
+ memset(&t->rseq, 0, sizeof(t->rseq));
+ t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
}
-static inline void rseq_migrate(struct task_struct *t)
+
+static inline void rseq_execve(struct task_struct *t)
{
+ rseq_reset(t);
}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ *
+ * On fork, keep the IDs (CPU, MMCID) of the parent, which avoids a fault
+ * on the COW page on exit to user space, when the child stays on the same
+ * CPU as the parent. That's obviously not guaranteed, but in overcommit
+ * scenarios it is more likely and optimizes for the fork/exec case without
+ * taking the fault.
+ */
static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
{
-}
-static inline void rseq_execve(struct task_struct *t)
-{
+ if (clone_flags & CLONE_VM)
+ rseq_reset(t);
+ else
+ t->rseq = current->rseq;
}
-#endif
+#else /* CONFIG_RSEQ */
+static inline void rseq_handle_slowpath(struct pt_regs *regs) { }
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
+static inline void rseq_sched_switch_event(struct task_struct *t) { }
+static inline void rseq_sched_set_ids_changed(struct task_struct *t) { }
+static inline void rseq_force_update(void) { }
+static inline void rseq_virt_userspace_exit(void) { }
+static inline void rseq_fork(struct task_struct *t, u64 clone_flags) { }
+static inline void rseq_execve(struct task_struct *t) { }
+#endif /* !CONFIG_RSEQ */
#ifdef CONFIG_DEBUG_RSEQ
-
void rseq_syscall(struct pt_regs *regs);
-
-#else
-
-static inline void rseq_syscall(struct pt_regs *regs)
-{
-}
-
-#endif
+#else /* CONFIG_DEBUG_RSEQ */
+static inline void rseq_syscall(struct pt_regs *regs) { }
+#endif /* !CONFIG_DEBUG_RSEQ */
#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
new file mode 100644
index 000000000000..c92167ff8a7f
--- /dev/null
+++ b/include/linux/rseq_entry.h
@@ -0,0 +1,616 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_ENTRY_H
+#define _LINUX_RSEQ_ENTRY_H
+
+/* Must be outside the CONFIG_RSEQ guard to resolve the stubs */
+#ifdef CONFIG_RSEQ_STATS
+#include <linux/percpu.h>
+
+struct rseq_stats {
+ unsigned long exit;
+ unsigned long signal;
+ unsigned long slowpath;
+ unsigned long fastpath;
+ unsigned long ids;
+ unsigned long cs;
+ unsigned long clear;
+ unsigned long fixup;
+};
+
+DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
+
+/*
+ * Slow path has interrupts and preemption enabled, but the fast path
+ * runs with interrupts disabled so there is no point in having the
+ * preemption checks implied in __this_cpu_inc() for every operation.
+ */
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_stat_inc(which) this_cpu_inc((which))
+#else
+#define rseq_stat_inc(which) raw_cpu_inc((which))
+#endif
+
+#else /* CONFIG_RSEQ_STATS */
+#define rseq_stat_inc(x) do { } while (0)
+#endif /* !CONFIG_RSEQ_STATS */
+
+#ifdef CONFIG_RSEQ
+#include <linux/jump_label.h>
+#include <linux/rseq.h>
+#include <linux/uaccess.h>
+
+#include <linux/tracepoint-defs.h>
+
+#ifdef CONFIG_TRACEPOINTS
+DECLARE_TRACEPOINT(rseq_update);
+DECLARE_TRACEPOINT(rseq_ip_fixup);
+void __rseq_trace_update(struct task_struct *t);
+void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip);
+
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids)
+{
+ if (tracepoint_enabled(rseq_update) && ids)
+ __rseq_trace_update(t);
+}
+
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip)
+{
+ if (tracepoint_enabled(rseq_ip_fixup))
+ __rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+}
+
+#else /* CONFIG_TRACEPOINT */
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids) { }
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip) { }
+#endif /* !CONFIG_TRACEPOINT */
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_inline
+#else
+#define rseq_inline __always_inline
+#endif
+
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
+bool rseq_debug_validate_ids(struct task_struct *t);
+
+static __always_inline void rseq_note_user_irq_entry(void)
+{
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY))
+ current->rseq.event.user_irq = true;
+}
+
+/*
+ * Check whether there is a valid critical section and whether the
+ * instruction pointer in @regs is inside the critical section.
+ *
+ * - If the critical section is invalid, terminate the task.
+ *
+ * - If valid and the instruction pointer is inside, set it to the abort IP.
+ *
+ * - If valid and the instruction pointer is outside, clear the critical
+ * section address.
+ *
+ * Returns true, if the section was valid and either fixup or clear was
+ * done, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when a invalid
+ * section was found. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+/*
+ * The debug version is put out of line, but kept here so the code stays
+ * together.
+ *
+ * @csaddr has already been checked by the caller to be in user space
+ */
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs,
+ unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ u64 start_ip, abort_ip, offset, cs_end, head, tasksize = TASK_SIZE;
+ unsigned long ip = instruction_pointer(regs);
+ u64 __user *uc_head = (u64 __user *) ucs;
+ u32 usig, __user *uc_sig;
+
+ scoped_user_rw_access(ucs, efault) {
+ /*
+ * Evaluate the user pile and exit if one of the conditions
+ * is not fulfilled.
+ */
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ if (unlikely(start_ip >= tasksize))
+ goto die;
+ /* If outside, just clear the critical section. */
+ if (ip < start_ip)
+ goto clear;
+
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ cs_end = start_ip + offset;
+ /* Check for overflow and wraparound */
+ if (unlikely(cs_end >= tasksize || cs_end < start_ip))
+ goto die;
+
+ /* If not inside, clear it. */
+ if (ip >= cs_end)
+ goto clear;
+
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+ /* Ensure it's "valid" */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+ /* Validate that the abort IP is not in the critical section */
+ if (unlikely(abort_ip - start_ip < offset))
+ goto die;
+
+ /*
+ * Check version and flags for 0. No point in emitting
+ * deprecated warnings before dying. That could be done in
+ * the slow path eventually, but *shrug*.
+ */
+ unsafe_get_user(head, uc_head, efault);
+ if (unlikely(head))
+ goto die;
+
+ /* abort_ip - 4 is >= 0. See abort_ip check above */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* rseq_event.user_irq is only valid if CONFIG_GENERIC_IRQ_ENTRY=y */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* If not in interrupt from user context, let it die */
+ if (unlikely(!t->rseq.event.user_irq))
+ goto die;
+ }
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * On debug kernels validate that user space did not mess with it if the
+ * debug branch is enabled.
+ */
+bool rseq_debug_validate_ids(struct task_struct *t)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+ u32 cpu_id, uval, node_id;
+
+ /*
+ * On the first exit after registering the rseq region CPU ID is
+ * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0!
+ */
+ node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ?
+ cpu_to_node(t->rseq.ids.cpu_id) : 0;
+
+ scoped_user_read_access(rseq, efault) {
+ unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault);
+ if (cpu_id != t->rseq.ids.cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->cpu_id, efault);
+ if (uval != cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->node_id, efault);
+ if (uval != node_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->mm_cid, efault);
+ if (uval != t->rseq.ids.mm_cid)
+ goto die;
+ }
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+#endif /* RSEQ_BUILD_SLOW_PATH */
+
+/*
+ * This only ensures that abort_ip is in the user address space and
+ * validates that it is preceded by the signature.
+ *
+ * No other sanity checks are done here, that's what the debug code is for.
+ */
+static rseq_inline bool
+rseq_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ unsigned long ip = instruction_pointer(regs);
+ unsigned long tasksize = TASK_SIZE;
+ u64 start_ip, abort_ip, offset;
+ u32 usig, __user *uc_sig;
+
+ rseq_stat_inc(rseq_stats.cs);
+
+ if (unlikely(csaddr >= tasksize)) {
+ t->rseq.event.fatal = true;
+ return false;
+ }
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ return rseq_debug_update_user_cs(t, regs, csaddr);
+
+ scoped_user_rw_access(ucs, efault) {
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+
+ /*
+ * No sanity checks. If user space screwed it up, it can
+ * keep the pieces. That's what debug code is for.
+ *
+ * If outside, just clear the critical section.
+ */
+ if (ip - start_ip >= offset)
+ goto clear;
+
+ /*
+ * Two requirements for @abort_ip:
+ * - Must be in user space as x86 IRET would happily return to
+ * the kernel.
+ * - The four bytes preceding the instruction at @abort_ip must
+ * contain the signature.
+ *
+ * The latter protects against the following attack vector:
+ *
+ * An attacker with limited abilities to write, creates a critical
+ * section descriptor, sets the abort IP to a library function or
+ * some other ROP gadget and stores the address of the descriptor
+ * in TLS::rseq::rseq_cs. An RSEQ abort would then evade ROP
+ * protection.
+ */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+
+ /* The address is guaranteed to be >= 0 and < TASK_SIZE */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* Invalidate the critical section */
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ /* Update the instruction pointer */
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * Updates CPU ID, Node ID and MM CID and reads the critical section
+ * address, when @csaddr != NULL. This allows to put the ID update and the
+ * read under the same uaccess region to spare a separate begin/end.
+ *
+ * As this is either invoked from a C wrapper with @csaddr = NULL or from
+ * the fast path code with a valid pointer, a clever compiler should be
+ * able to optimize the read out. Spares a duplicate implementation.
+ *
+ * Returns true, if the operation was successful, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when invalid data
+ * was found on debug kernels. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+static rseq_inline
+bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
+ u32 node_id, u64 *csaddr)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+
+ if (static_branch_unlikely(&rseq_debug_enabled)) {
+ if (!rseq_debug_validate_ids(t))
+ return false;
+ }
+
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault);
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault);
+ unsafe_put_user(node_id, &rseq->node_id, efault);
+ unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
+ if (csaddr)
+ unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+ }
+
+ /* Cache the new values */
+ t->rseq.ids.cpu_cid = ids->cpu_cid;
+ rseq_stat_inc(rseq_stats.ids);
+ rseq_trace_update(t, ids);
+ return true;
+efault:
+ return false;
+}
+
+/*
+ * Update user space with new IDs and conditionally check whether the task
+ * is in a critical section.
+ */
+static rseq_inline bool rseq_update_usr(struct task_struct *t, struct pt_regs *regs,
+ struct rseq_ids *ids, u32 node_id)
+{
+ u64 csaddr;
+
+ if (!rseq_set_ids_get_csaddr(t, ids, node_id, &csaddr))
+ return false;
+
+ /*
+ * On architectures which utilize the generic entry code this
+ * allows to skip the critical section when the entry was not from
+ * a user space interrupt, unless debug mode is enabled.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ if (!static_branch_unlikely(&rseq_debug_enabled)) {
+ if (likely(!t->rseq.event.user_irq))
+ return true;
+ }
+ }
+ if (likely(!csaddr))
+ return true;
+ /* Sigh, this really needs to do work */
+ return rseq_update_user_cs(t, regs, csaddr);
+}
+
+/*
+ * If you want to use this then convert your architecture to the generic
+ * entry code. I'm tired of building workarounds for people who can't be
+ * bothered to make the maintenance of generic infrastructure less
+ * burdensome. Just sucking everything into the architecture code and
+ * thereby making others chase the horrible hacks and keep them working is
+ * neither acceptable nor sustainable.
+ */
+#ifdef CONFIG_GENERIC_ENTRY
+
+/*
+ * This is inlined into the exit path because:
+ *
+ * 1) It's a one time comparison in the fast path when there is no event to
+ * handle
+ *
+ * 2) The access to the user space rseq memory (TLS) is unlikely to fault
+ * so the straight inline operation is:
+ *
+ * - Four 32-bit stores only if CPU ID/ MM CID need to be updated
+ * - One 64-bit load to retrieve the critical section address
+ *
+ * 3) In the unlikely case that the critical section address is != NULL:
+ *
+ * - One 64-bit load to retrieve the start IP
+ * - One 64-bit load to retrieve the offset for calculating the end
+ * - One 64-bit load to retrieve the abort IP
+ * - One 64-bit load to retrieve the signature
+ * - One store to clear the critical section address
+ *
+ * The non-debug case implements only the minimal required checking. It
+ * provides protection against a rogue abort IP in kernel space, which
+ * would be exploitable at least on x86, and also against a rogue CS
+ * descriptor by checking the signature at the abort IP. Any fallout from
+ * invalid critical section descriptors is a user space problem. The debug
+ * case provides the full set of checks and terminates the task if a
+ * condition is not met.
+ *
+ * In case of a fault or an invalid value, this sets TIF_NOTIFY_RESUME and
+ * tells the caller to loop back into exit_to_user_mode_loop(). The rseq
+ * slow path there will handle the failure.
+ */
+static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct task_struct *t)
+{
+ /*
+ * Page faults need to be disabled as this is called with
+ * interrupts disabled
+ */
+ guard(pagefault)();
+ if (likely(!t->rseq.event.ids_changed)) {
+ struct rseq __user *rseq = t->rseq.usrptr;
+ /*
+ * If IDs have not changed rseq_event::user_irq must be true
+ * See rseq_sched_switch_event().
+ */
+ u64 csaddr;
+
+ if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
+ return false;
+
+ if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
+ if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
+ return false;
+ }
+ return true;
+ }
+
+ struct rseq_ids ids = {
+ .cpu_id = task_cpu(t),
+ .mm_cid = task_mm_cid(t),
+ };
+ u32 node_id = cpu_to_node(ids.cpu_id);
+
+ return rseq_update_usr(t, regs, &ids, node_id);
+}
+
+static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
+{
+ struct task_struct *t = current;
+
+ /*
+ * If the task did not go through schedule or got the flag enforced
+ * by the rseq syscall or execve, then nothing to do here.
+ *
+ * CPU ID and MM CID can only change when going through a context
+ * switch.
+ *
+ * rseq_sched_switch_event() sets the rseq_event::sched_switch bit
+ * only when rseq_event::has_rseq is true. That conditional is
+ * required to avoid setting the TIF bit if RSEQ is not registered
+ * for a task. rseq_event::sched_switch is cleared when RSEQ is
+ * unregistered by a task so it's sufficient to check for the
+ * sched_switch bit alone.
+ *
+ * A sane compiler requires three instructions for the nothing to do
+ * case including clearing the events, but your mileage might vary.
+ */
+ if (unlikely((t->rseq.event.sched_switch))) {
+ rseq_stat_inc(rseq_stats.fastpath);
+
+ if (unlikely(!rseq_exit_user_update(regs, t)))
+ return true;
+ }
+ /* Clear state so next entry starts from a clean slate */
+ t->rseq.event.events = 0;
+ return false;
+}
+
+/* Required to allow conversion to GENERIC_ENTRY w/o GENERIC_TIF_BITS */
+#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
+static __always_inline bool test_tif_rseq(unsigned long ti_work)
+{
+ return ti_work & _TIF_RSEQ;
+}
+
+static __always_inline void clear_tif_rseq(void)
+{
+ static_assert(TIF_RSEQ != TIF_NOTIFY_RESUME);
+ clear_thread_flag(TIF_RSEQ);
+}
+#else
+static __always_inline bool test_tif_rseq(unsigned long ti_work) { return true; }
+static __always_inline void clear_tif_rseq(void) { }
+#endif
+
+static __always_inline bool
+rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ if (likely(!test_tif_rseq(ti_work)))
+ return false;
+
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+
+ clear_tif_rseq();
+ return false;
+}
+
+#else /* CONFIG_GENERIC_ENTRY */
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+#endif /* !CONFIG_GENERIC_ENTRY */
+
+static __always_inline void rseq_syscall_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = ¤t->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ /* Needed to remove the store for the !lockdep case */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ WARN_ON_ONCE(ev->sched_switch);
+ ev->events = 0;
+ }
+}
+
+static __always_inline void rseq_irqentry_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = ¤t->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ lockdep_assert_once(!ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing could not clear it.
+ */
+ ev->events = 0;
+}
+
+/* Required to keep ARM64 working */
+static __always_inline void rseq_exit_to_user_mode_legacy(void)
+{
+ struct rseq_event *ev = ¤t->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ WARN_ON_ONCE(ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing did not clear it.
+ */
+ ev->events = 0;
+}
+
+void __rseq_debug_syscall_return(struct pt_regs *regs);
+
+static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+{
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ __rseq_debug_syscall_return(regs);
+}
+#else /* CONFIG_RSEQ */
+static inline void rseq_note_user_irq_entry(void) { }
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+static inline void rseq_syscall_exit_to_user_mode(void) { }
+static inline void rseq_irqentry_exit_to_user_mode(void) { }
+static inline void rseq_exit_to_user_mode_legacy(void) { }
+static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+#endif /* !CONFIG_RSEQ */
+
+#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
new file mode 100644
index 000000000000..332dc14b81c9
--- /dev/null
+++ b/include/linux/rseq_types.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_TYPES_H
+#define _LINUX_RSEQ_TYPES_H
+
+#include <linux/irq_work_types.h>
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+#ifdef CONFIG_RSEQ
+struct rseq;
+
+/**
+ * struct rseq_event - Storage for rseq related event management
+ * @all: Compound to initialize and clear the data efficiently
+ * @events: Compound to access events with a single load/store
+ * @sched_switch: True if the task was scheduled and needs update on
+ * exit to user
+ * @ids_changed: Indicator that IDs need to be updated
+ * @user_irq: True on interrupt entry from user mode
+ * @has_rseq: True if the task has a rseq pointer installed
+ * @error: Compound error code for the slow path to analyze
+ * @fatal: User space data corrupted or invalid
+ * @slowpath: Indicator that slow path processing via TIF_NOTIFY_RESUME
+ * is required
+ *
+ * @sched_switch and @ids_changed must be adjacent and the combo must be
+ * 16bit aligned to allow a single store, when both are set at the same
+ * time in the scheduler.
+ */
+struct rseq_event {
+ union {
+ u64 all;
+ struct {
+ union {
+ u32 events;
+ struct {
+ u8 sched_switch;
+ u8 ids_changed;
+ u8 user_irq;
+ };
+ };
+
+ u8 has_rseq;
+ u8 __pad;
+ union {
+ u16 error;
+ struct {
+ u8 fatal;
+ u8 slowpath;
+ };
+ };
+ };
+ };
+};
+
+/**
+ * struct rseq_ids - Cache for ids, which need to be updated
+ * @cpu_cid: Compound of @cpu_id and @mm_cid to make the
+ * compiler emit a single compare on 64-bit
+ * @cpu_id: The CPU ID which was written last to user space
+ * @mm_cid: The MM CID which was written last to user space
+ *
+ * @cpu_id and @mm_cid are updated when the data is written to user space.
+ */
+struct rseq_ids {
+ union {
+ u64 cpu_cid;
+ struct {
+ u32 cpu_id;
+ u32 mm_cid;
+ };
+ };
+};
+
+/**
+ * struct rseq_data - Storage for all rseq related data
+ * @usrptr: Pointer to the registered user space RSEQ memory
+ * @len: Length of the RSEQ region
+ * @sig: Signature of critial section abort IPs
+ * @event: Storage for event management
+ * @ids: Storage for cached CPU ID and MM CID
+ */
+struct rseq_data {
+ struct rseq __user *usrptr;
+ u32 len;
+ u32 sig;
+ struct rseq_event event;
+ struct rseq_ids ids;
+};
+
+#else /* CONFIG_RSEQ */
+struct rseq_data { };
+#endif /* !CONFIG_RSEQ */
+
+#ifdef CONFIG_SCHED_MM_CID
+
+#define MM_CID_UNSET BIT(31)
+#define MM_CID_ONCPU BIT(30)
+#define MM_CID_TRANSIT BIT(29)
+
+/**
+ * struct sched_mm_cid - Storage for per task MM CID data
+ * @active: MM CID is active for the task
+ * @cid: The CID associated to the task either permanently or
+ * borrowed from the CPU
+ */
+struct sched_mm_cid {
+ unsigned int active;
+ unsigned int cid;
+};
+
+/**
+ * struct mm_cid_pcpu - Storage for per CPU MM_CID data
+ * @cid: The CID associated to the CPU either permanently or
+ * while a task with a CID is running
+ */
+struct mm_cid_pcpu {
+ unsigned int cid;
+}____cacheline_aligned_in_smp;
+
+/**
+ * struct mm_mm_cid - Storage for per MM CID data
+ * @pcpu: Per CPU storage for CIDs associated to a CPU
+ * @percpu: Set, when CIDs are in per CPU mode
+ * @transit: Set to MM_CID_TRANSIT during a mode change transition phase
+ * @max_cids: The exclusive maximum CID value for allocation and convergence
+ * @irq_work: irq_work to handle the affinity mode change case
+ * @work: Regular work to handle the affinity mode change case
+ * @lock: Spinlock to protect against affinity setting which can't take @mutex
+ * @mutex: Mutex to serialize forks and exits related to this mm
+ * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
+ * is growth only.
+ * @users: The number of tasks sharing this MM. Separate from mm::mm_users
+ * as that is modified by mmget()/mm_put() by other entities which
+ * do not actually share the MM.
+ * @pcpu_thrs: Threshold for switching back from per CPU mode
+ * @update_deferred: A deferred switch back to per task mode is pending.
+ */
+struct mm_mm_cid {
+ /* Hotpath read mostly members */
+ struct mm_cid_pcpu __percpu *pcpu;
+ unsigned int percpu;
+ unsigned int transit;
+ unsigned int max_cids;
+
+ /* Rarely used. Moves @lock and @mutex into the second cacheline */
+ struct irq_work irq_work;
+ struct work_struct work;
+
+ raw_spinlock_t lock;
+ struct mutex mutex;
+
+ /* Low frequency modified */
+ unsigned int nr_cpus_allowed;
+ unsigned int users;
+ unsigned int pcpu_thrs;
+ unsigned int update_deferred;
+}____cacheline_aligned_in_smp;
+#else /* CONFIG_SCHED_MM_CID */
+struct mm_mm_cid { };
+struct sched_mm_cid { };
+#endif /* !CONFIG_SCHED_MM_CID */
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b469878de25c..9eec409745f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -41,7 +41,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
-#include <uapi/linux/rseq.h>
+#include <linux/rseq_types.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
@@ -1406,33 +1406,8 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_RSEQ
- struct rseq __user *rseq;
- u32 rseq_len;
- u32 rseq_sig;
- /*
- * RmW on rseq_event_mask must be performed atomically
- * with respect to preemption.
- */
- unsigned long rseq_event_mask;
-# ifdef CONFIG_DEBUG_RSEQ
- /*
- * This is a place holder to save a copy of the rseq fields for
- * validation of read-only fields. The struct rseq has a
- * variable-length array at the end, so it cannot be used
- * directly. Reserve a size large enough for the known fields.
- */
- char rseq_fields[sizeof(struct rseq)];
-# endif
-#endif
-
-#ifdef CONFIG_SCHED_MM_CID
- int mm_cid; /* Current cid in mm */
- int last_mm_cid; /* Most recent cid in mm */
- int migrate_from_cpu;
- int mm_cid_active; /* Whether cid bitmap is active */
- struct callback_head cid_work;
-#endif
+ struct rseq_data rseq;
+ struct sched_mm_cid mm_cid;
struct tlbflush_unmap_batch tlb_ubc;
@@ -2318,6 +2293,32 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+/* Avoids recursive inclusion hell */
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit(struct task_struct *t);
+static __always_inline int task_mm_cid(struct task_struct *t)
+{
+ return t->mm_cid.cid & ~(MM_CID_ONCPU | MM_CID_TRANSIT);
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit(struct task_struct *t) { }
+static __always_inline int task_mm_cid(struct task_struct *t)
+{
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return task_cpu(t);
+}
+#endif
+
#ifndef MODULE
#ifndef COMPILE_OFFSETS
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index dd925d84fa46..b40de9bab4b7 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -67,6 +67,11 @@ enum syscall_work_bit {
#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
#endif
+#ifndef TIF_RSEQ
+# define TIF_RSEQ TIF_NOTIFY_RESUME
+# define _TIF_RSEQ _TIF_NOTIFY_RESUME
+#endif
+
#ifdef __KERNEL__
#ifndef arch_set_restart_data
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 1beb5b395d81..be395f5f7ee3 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/cleanup.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
@@ -35,9 +36,17 @@
#ifdef masked_user_access_begin
#define can_do_masked_user_access() 1
+# ifndef masked_user_write_access_begin
+# define masked_user_write_access_begin masked_user_access_begin
+# endif
+# ifndef masked_user_read_access_begin
+# define masked_user_read_access_begin masked_user_access_begin
+#endif
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
+ #define masked_user_read_access_begin(src) NULL
+ #define masked_user_write_access_begin(src) NULL
#define mask_user_address(src) (src)
#endif
@@ -518,7 +527,34 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count);
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
-#ifndef __get_kernel_nofault
+#ifdef arch_get_kernel_nofault
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ */
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_get_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_put_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */
+
#define __get_kernel_nofault(dst, src, type, label) \
do { \
type __user *p = (type __force __user *)(src); \
@@ -535,7 +571,8 @@ do { \
if (__put_user(data, p)) \
goto label; \
} while (0)
-#endif
+
+#endif /* !__get_kernel_nofault */
/**
* get_kernel_nofault(): safely attempt to read from a location
@@ -549,7 +586,42 @@ do { \
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
})
-#ifndef user_access_begin
+#ifdef user_access_begin
+
+#ifdef arch_unsafe_get_user
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ *
+ * Some architectures use internal local labels already, but this extra
+ * indirection here is harmless because the compiler optimizes it out
+ * completely in any case. This construct just ensures that the ASM GOTO
+ * target is always in the local scope. The C goto 'label' works correctly
+ * when leaving a cleanup() scope.
+ */
+#define unsafe_get_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_get_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define unsafe_put_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_put_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+#endif /* arch_unsafe_get_user */
+
+#else /* user_access_begin */
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
@@ -559,7 +631,8 @@ do { \
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
-#endif
+#endif /* !user_access_begin */
+
#ifndef user_write_access_begin
#define user_write_access_begin user_access_begin
#define user_write_access_end user_access_end
@@ -569,6 +642,239 @@ static inline void user_access_restore(unsigned long flags) { }
#define user_read_access_end user_access_end
#endif
+/* Define RW variant so the below _mode macro expansion works */
+#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
+#define user_rw_access_begin(u, s) user_access_begin(u, s)
+#define user_rw_access_end() user_access_end()
+
+/* Scoped user access */
+#define USER_ACCESS_GUARD(_mode) \
+static __always_inline void __user * \
+class_user_##_mode##_begin(void __user *ptr) \
+{ \
+ return ptr; \
+} \
+ \
+static __always_inline void \
+class_user_##_mode##_end(void __user *ptr) \
+{ \
+ user_##_mode##_access_end(); \
+} \
+ \
+DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
+ class_user_##_mode##_end(_T), \
+ class_user_##_mode##_begin(ptr), void __user *ptr) \
+ \
+static __always_inline class_user_##_mode##_access_t \
+class_user_##_mode##_access_ptr(void __user *scope) \
+{ \
+ return scope; \
+}
+
+USER_ACCESS_GUARD(read)
+USER_ACCESS_GUARD(write)
+USER_ACCESS_GUARD(rw)
+#undef USER_ACCESS_GUARD
+
+/**
+ * __scoped_user_access_begin - Start a scoped user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * Internal helper for __scoped_user_access(). Don't use directly.
+ */
+#define __scoped_user_access_begin(mode, uptr, size, elbl) \
+({ \
+ typeof(uptr) __retptr; \
+ \
+ if (can_do_masked_user_access()) { \
+ __retptr = masked_user_##mode##_access_begin(uptr); \
+ } else { \
+ __retptr = uptr; \
+ if (!user_##mode##_access_begin(uptr, size)) \
+ goto elbl; \
+ } \
+ __retptr; \
+})
+
+/**
+ * __scoped_user_access - Open a scope for user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected. It
+ * must be placed outside the scope
+ *
+ * If the user access function inside the scope requires a fault label, it
+ * can use @elbl or a different label outside the scope, which requires
+ * that user access which is implemented with ASM GOTO has been properly
+ * wrapped. See unsafe_get_user() for reference.
+ *
+ * scoped_user_rw_access(ptr, efault) {
+ * unsafe_get_user(rval, &ptr->rval, efault);
+ * unsafe_put_user(wval, &ptr->wval, efault);
+ * }
+ * return 0;
+ * efault:
+ * return -EFAULT;
+ *
+ * The scope is internally implemented as a autoterminating nested for()
+ * loop, which can be left with 'return', 'break' and 'goto' at any
+ * point.
+ *
+ * When the scope is left user_##@_mode##_access_end() is automatically
+ * invoked.
+ *
+ * When the architecture supports masked user access and the access region
+ * which is determined by @uptr and @size is not a valid user space
+ * address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user
+ * space address and does not terminate early. This optimizes for the good
+ * case and lets the performance uncritical bad case go through the fault.
+ *
+ * The eventual modification of the pointer is limited to the scope.
+ * Outside of the scope the original pointer value is unmodified, so that
+ * the original pointer value is available for diagnostic purposes in an
+ * out of scope fault path.
+ *
+ * Nesting scoped user access into a user access scope is invalid and fails
+ * the build. Nesting into other guards, e.g. pagefault is safe.
+ *
+ * The masked variant does not check the size of the access and relies on a
+ * mapping hole (e.g. guard page) to catch an out of range pointer, the
+ * first access to user memory inside the scope has to be within
+ * @uptr ... @uptr + PAGE_SIZE - 1
+ *
+ * Don't use directly. Use scoped_masked_user_$MODE_access() instead.
+ */
+#define __scoped_user_access(mode, uptr, size, elbl) \
+for (bool done = false; !done; done = true) \
+ for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
+ !done; done = true) \
+ for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
+ /* Force modified pointer usage within the scope */ \
+ for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
+
+/**
+ * scoped_user_read_access_size - Start a scoped user read access with given size
+ * @usrc: Pointer to the user space address to read from
+ * @size: Size of the access starting from @usrc
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access_size(usrc, size, elbl) \
+ __scoped_user_access(read, usrc, size, elbl)
+
+/**
+ * scoped_user_read_access - Start a scoped user read access
+ * @usrc: Pointer to the user space address to read from
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @usrc is determined via sizeof(*@usrc)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access(usrc, elbl) \
+ scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl)
+
+/**
+ * scoped_user_write_access_size - Start a scoped user write access with given size
+ * @udst: Pointer to the user space address to write to
+ * @size: Size of the access starting from @udst
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access_size(udst, size, elbl) \
+ __scoped_user_access(write, udst, size, elbl)
+
+/**
+ * scoped_user_write_access - Start a scoped user write access
+ * @udst: Pointer to the user space address to write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @udst is determined via sizeof(*@udst)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access(udst, elbl) \
+ scoped_user_write_access_size(udst, sizeof(*(udst)), elbl)
+
+/**
+ * scoped_user_rw_access_size - Start a scoped user read/write access with given size
+ * @uptr Pointer to the user space address to read from and write to
+ * @size: Size of the access starting from @uptr
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access_size(uptr, size, elbl) \
+ __scoped_user_access(rw, uptr, size, elbl)
+
+/**
+ * scoped_user_rw_access - Start a scoped user read/write access
+ * @uptr Pointer to the user space address to read from and write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @uptr is determined via sizeof(*@uptr)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access(uptr, elbl) \
+ scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl)
+
+/**
+ * get_user_inline - Read user data inlined
+ * @val: The variable to store the value read from user memory
+ * @usrc: Pointer to the user space memory to read from
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of get_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define get_user_inline(val, usrc) \
+({ \
+ __label__ efault; \
+ typeof(usrc) _tmpsrc = usrc; \
+ int _ret = 0; \
+ \
+ scoped_user_read_access(_tmpsrc, efault) \
+ unsafe_get_user(val, _tmpsrc, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
+/**
+ * put_user_inline - Write to user memory inlined
+ * @val: The value to write
+ * @udst: Pointer to the user space memory to write to
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of put_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define put_user_inline(val, udst) \
+({ \
+ __label__ efault; \
+ typeof(udst) _tmpdst = udst; \
+ int _ret = 0; \
+ \
+ scoped_user_write_access(_tmpdst, efault) \
+ unsafe_put_user(val, _tmpdst, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
#ifdef CONFIG_HARDENED_USERCOPY
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
index 823b47d1ba1e..ce85d650bf4b 100644
--- a/include/trace/events/rseq.h
+++ b/include/trace/events/rseq.h
@@ -21,9 +21,9 @@ TRACE_EVENT(rseq_update,
),
TP_fast_assign(
- __entry->cpu_id = raw_smp_processor_id();
+ __entry->cpu_id = t->rseq.ids.cpu_id;
__entry->node_id = cpu_to_node(__entry->cpu_id);
- __entry->mm_cid = task_mm_cid(t);
+ __entry->mm_cid = t->rseq.ids.mm_cid;
),
TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id,
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index c233aae5eac9..1b76d508400c 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -114,20 +114,13 @@ struct rseq {
/*
* Restartable sequences flags field.
*
- * This field should only be updated by the thread which
- * registered this data structure. Read by the kernel.
- * Mainly used for single-stepping through rseq critical sections
- * with debuggers.
- *
- * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
- * Inhibit instruction sequence block restart on preemption
- * for this thread.
- * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
- * Inhibit instruction sequence block restart on signal
- * delivery for this thread.
- * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
- * Inhibit instruction sequence block restart on migration for
- * this thread.
+ * This field was initially intended to allow event masking for
+ * single-stepping through rseq critical sections with debuggers.
+ * The kernel does not support this anymore and the relevant bits
+ * are checked for being always false:
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
+ * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
*/
__u32 flags;
diff --git a/init/Kconfig b/init/Kconfig
index cab3ad28ca49..d1c606ec632e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1913,10 +1913,36 @@ config RSEQ
If unsure, say Y.
+config RSEQ_STATS
+ default n
+ bool "Enable lightweight statistics of restartable sequences" if EXPERT
+ depends on RSEQ && DEBUG_FS
+ help
+ Enable lightweight counters which expose information about the
+ frequency of RSEQ operations via debugfs. Mostly interesting for
+ kernel debugging or performance analysis. While lightweight it's
+ still adding code into the user/kernel mode transitions.
+
+ If unsure, say N.
+
+config RSEQ_DEBUG_DEFAULT_ENABLE
+ default n
+ bool "Enable restartable sequences debug mode by default" if EXPERT
+ depends on RSEQ
+ help
+ This enables the static branch for debug mode of restartable
+ sequences.
+
+ This also can be controlled on the kernel command line via the
+ command line parameter "rseq_debug=0/1" and through debugfs.
+
+ If unsure, say N.
+
config DEBUG_RSEQ
default n
bool "Enable debugging of rseq() system call" if EXPERT
- depends on RSEQ && DEBUG_KERNEL
+ depends on RSEQ && DEBUG_KERNEL && !GENERIC_ENTRY
+ select RSEQ_DEBUG_DEFAULT_ENABLE
help
Enable extra debugging checks for the rseq system call.
diff --git a/init/init_task.c b/init/init_task.c
index a55e2189206f..5d122699b664 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -223,6 +223,9 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
#ifdef CONFIG_SECCOMP_FILTER
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
+#ifdef CONFIG_SCHED_MM_CID
+ .mm_cid = { .cid = MM_CID_UNSET, },
+#endif
};
EXPORT_SYMBOL(init_task);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index db9f6c539b28..b674fdf96208 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -3085,10 +3085,13 @@ EXPORT_SYMBOL(cpu_all_bits);
#ifdef CONFIG_INIT_ALL_POSSIBLE
struct cpumask __cpu_possible_mask __ro_after_init
= {CPU_BITS_ALL};
+unsigned int __num_possible_cpus __ro_after_init = NR_CPUS;
#else
struct cpumask __cpu_possible_mask __ro_after_init;
+unsigned int __num_possible_cpus __ro_after_init;
#endif
EXPORT_SYMBOL(__cpu_possible_mask);
+EXPORT_SYMBOL(__num_possible_cpus);
struct cpumask __cpu_online_mask __read_mostly;
EXPORT_SYMBOL(__cpu_online_mask);
@@ -3116,6 +3119,7 @@ void init_cpu_present(const struct cpumask *src)
void init_cpu_possible(const struct cpumask *src)
{
cpumask_copy(&__cpu_possible_mask, src);
+ __num_possible_cpus = cpumask_weight(&__cpu_possible_mask);
}
void set_cpu_online(unsigned int cpu, bool online)
@@ -3139,6 +3143,21 @@ void set_cpu_online(unsigned int cpu, bool online)
}
}
+/*
+ * This should be marked __init, but there is a boatload of call sites
+ * which need to be fixed up to do so. Sigh...
+ */
+void set_cpu_possible(unsigned int cpu, bool possible)
+{
+ if (possible) {
+ if (!cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask))
+ __num_possible_cpus++;
+ } else {
+ if (cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask))
+ __num_possible_cpus--;
+ }
+}
+
/*
* Activate the first processor.
*/
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index f62e1d1b2063..5c792b30c58a 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -11,19 +11,20 @@
/* Workaround to allow gradual conversion of architecture code */
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
-/**
- * exit_to_user_mode_loop - do any pending work before leaving to user space
- * @regs: Pointer to pt_regs on entry stack
- * @ti_work: TIF work flags as read by the caller
- */
-__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
- unsigned long ti_work)
+#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
+#define EXIT_TO_USER_MODE_WORK_LOOP (EXIT_TO_USER_MODE_WORK & ~_TIF_RSEQ)
+#else
+#define EXIT_TO_USER_MODE_WORK_LOOP (EXIT_TO_USER_MODE_WORK)
+#endif
+
+static __always_inline unsigned long __exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work)
{
/*
* Before returning to user space ensure that all pending work
* items have been completed.
*/
- while (ti_work & EXIT_TO_USER_MODE_WORK) {
+ while (ti_work & EXIT_TO_USER_MODE_WORK_LOOP) {
local_irq_enable_exit_to_user(ti_work);
@@ -62,17 +63,21 @@ __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
return ti_work;
}
-noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
+/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ * @regs: Pointer to pt_regs on entry stack
+ * @ti_work: TIF work flags as read by the caller
+ */
+__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work)
{
- enter_from_user_mode(regs);
-}
+ for (;;) {
+ ti_work = __exit_to_user_mode_loop(regs, ti_work);
-noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
-{
- instrumentation_begin();
- exit_to_user_mode_prepare(regs);
- instrumentation_end();
- exit_to_user_mode();
+ if (likely(!rseq_exit_to_user_mode_restart(regs, ti_work)))
+ return ti_work;
+ ti_work = read_thread_flags();
+ }
}
noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
diff --git a/kernel/entry/syscall-common.c b/kernel/entry/syscall-common.c
index 66e6ba7fa80c..940a597ded40 100644
--- a/kernel/entry/syscall-common.c
+++ b/kernel/entry/syscall-common.c
@@ -63,14 +63,6 @@ long syscall_trace_enter(struct pt_regs *regs, long syscall,
return ret ? : syscall;
}
-noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
-{
- enter_from_user_mode(regs);
- instrumentation_begin();
- local_irq_enable();
- instrumentation_end();
-}
-
/*
* If SYSCALL_EMU is set, then the only reason to report is when
* SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
diff --git a/kernel/exit.c b/kernel/exit.c
index 9f74e8f1c431..324616f690b7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -910,6 +910,7 @@ void __noreturn do_exit(long code)
user_events_exit(tsk);
io_uring_files_cancel();
+ sched_mm_cid_exit(tsk);
exit_signals(tsk); /* sets PF_EXITING */
seccomp_filter_release(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3da0f08615a9..8475958e029b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -955,10 +955,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#endif
#ifdef CONFIG_SCHED_MM_CID
- tsk->mm_cid = -1;
- tsk->last_mm_cid = -1;
- tsk->mm_cid_active = 0;
- tsk->migrate_from_cpu = -1;
+ tsk->mm_cid.cid = MM_CID_UNSET;
+ tsk->mm_cid.active = 0;
#endif
return tsk;
@@ -2456,6 +2454,7 @@ __latent_entropy struct task_struct *copy_process(
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm) {
+ sched_mm_cid_exit(p);
mm_clear_owner(p->mm, p);
mmput(p->mm);
}
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 125804fbb5cb..ebcccb16ae0b 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
if (flags & FLAGS_NUMA) {
u32 __user *naddr = (void *)uaddr + size / 2;
- if (futex_get_value(&node, naddr))
+ if (get_user_inline(node, naddr))
return -EFAULT;
if ((node != FUTEX_NO_NODE) &&
@@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
node = numa_node_id();
node_updated = true;
}
- if (node_updated && futex_put_value(node, naddr))
+ if (node_updated && put_user_inline(node, naddr))
return -EFAULT;
}
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 2cd57096c38e..30c2afa03889 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
return ret;
}
-/*
- * This does a plain atomic user space read, and the user pointer has
- * already been verified earlier by get_futex_key() to be both aligned
- * and actually in user space, just like futex_atomic_cmpxchg_inatomic().
- *
- * We still want to avoid any speculation, and while __get_user() is
- * the traditional model for this, it's actually slower than doing
- * this manually these days.
- *
- * We could just have a per-architecture special function for it,
- * the same way we do futex_atomic_cmpxchg_inatomic(), but rather
- * than force everybody to do that, write it out long-hand using
- * the low-level user-access infrastructure.
- *
- * This looks a bit overkill, but generally just results in a couple
- * of instructions.
- */
-static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
-{
- u32 val;
-
- if (can_do_masked_user_access())
- from = masked_user_access_begin(from);
- else if (!user_read_access_begin(from, sizeof(*from)))
- return -EFAULT;
- unsafe_get_user(val, from, Efault);
- user_read_access_end();
- *dest = val;
- return 0;
-Efault:
- user_read_access_end();
- return -EFAULT;
-}
-
-static __always_inline int futex_put_value(u32 val, u32 __user *to)
-{
- if (can_do_masked_user_access())
- to = masked_user_access_begin(to);
- else if (!user_write_access_begin(to, sizeof(*to)))
- return -EFAULT;
- unsafe_put_user(val, to, Efault);
- user_write_access_end();
- return 0;
-Efault:
- user_write_access_end();
- return -EFAULT;
-}
-
+/* Read from user memory with pagefaults disabled */
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
{
- int ret;
-
- pagefault_disable();
- ret = futex_get_value(dest, from);
- pagefault_enable();
-
- return ret;
+ guard(pagefault)();
+ return get_user_inline(*dest, from);
}
extern void __futex_unqueue(struct futex_q *q);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 75a84efad40f..392ec2f75f01 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -793,9 +793,9 @@ static long ptrace_get_rseq_configuration(struct task_struct *task,
unsigned long size, void __user *data)
{
struct ptrace_rseq_configuration conf = {
- .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
- .rseq_abi_size = task->rseq_len,
- .signature = task->rseq_sig,
+ .rseq_abi_pointer = (u64)(uintptr_t)task->rseq.usrptr,
+ .rseq_abi_size = task->rseq.len,
+ .signature = task->rseq.sig,
.flags = 0,
};
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 2452b7366b00..395d8b002350 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -8,98 +8,7 @@
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/syscalls.h>
-#include <linux/rseq.h>
-#include <linux/types.h>
-#include <linux/ratelimit.h>
-#include <asm/ptrace.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/rseq.h>
-
-/* The original rseq structure size (including padding) is 32 bytes. */
-#define ORIG_RSEQ_SIZE 32
-
-#define RSEQ_CS_NO_RESTART_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT | \
- RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL | \
- RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE)
-
-#ifdef CONFIG_DEBUG_RSEQ
-static struct rseq *rseq_kernel_fields(struct task_struct *t)
-{
- return (struct rseq *) t->rseq_fields;
-}
-
-static int rseq_validate_ro_fields(struct task_struct *t)
-{
- static DEFINE_RATELIMIT_STATE(_rs,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
- u32 cpu_id_start, cpu_id, node_id, mm_cid;
- struct rseq __user *rseq = t->rseq;
-
- /*
- * Validate fields which are required to be read-only by
- * user-space.
- */
- if (!user_read_access_begin(rseq, t->rseq_len))
- goto efault;
- unsafe_get_user(cpu_id_start, &rseq->cpu_id_start, efault_end);
- unsafe_get_user(cpu_id, &rseq->cpu_id, efault_end);
- unsafe_get_user(node_id, &rseq->node_id, efault_end);
- unsafe_get_user(mm_cid, &rseq->mm_cid, efault_end);
- user_read_access_end();
-
- if ((cpu_id_start != rseq_kernel_fields(t)->cpu_id_start ||
- cpu_id != rseq_kernel_fields(t)->cpu_id ||
- node_id != rseq_kernel_fields(t)->node_id ||
- mm_cid != rseq_kernel_fields(t)->mm_cid) && __ratelimit(&_rs)) {
-
- pr_warn("Detected rseq corruption for pid: %d, name: %s\n"
- "\tcpu_id_start: %u ?= %u\n"
- "\tcpu_id: %u ?= %u\n"
- "\tnode_id: %u ?= %u\n"
- "\tmm_cid: %u ?= %u\n",
- t->pid, t->comm,
- cpu_id_start, rseq_kernel_fields(t)->cpu_id_start,
- cpu_id, rseq_kernel_fields(t)->cpu_id,
- node_id, rseq_kernel_fields(t)->node_id,
- mm_cid, rseq_kernel_fields(t)->mm_cid);
- }
-
- /* For now, only print a console warning on mismatch. */
- return 0;
-
-efault_end:
- user_read_access_end();
-efault:
- return -EFAULT;
-}
-
-/*
- * Update an rseq field and its in-kernel copy in lock-step to keep a coherent
- * state.
- */
-#define rseq_unsafe_put_user(t, value, field, error_label) \
- do { \
- unsafe_put_user(value, &t->rseq->field, error_label); \
- rseq_kernel_fields(t)->field = value; \
- } while (0)
-
-#else
-static int rseq_validate_ro_fields(struct task_struct *t)
-{
- return 0;
-}
-
-#define rseq_unsafe_put_user(t, value, field, error_label) \
- unsafe_put_user(value, &t->rseq->field, error_label)
-#endif
-
/*
- *
* Restartable sequences are a lightweight interface that allows
* user-level code to be executed atomically relative to scheduler
* preemption and signal delivery. Typically used for implementing
@@ -158,356 +67,356 @@ static int rseq_validate_ro_fields(struct task_struct *t)
* F1. <failure>
*/
-static int rseq_update_cpu_node_id(struct task_struct *t)
-{
- struct rseq __user *rseq = t->rseq;
- u32 cpu_id = raw_smp_processor_id();
- u32 node_id = cpu_to_node(cpu_id);
- u32 mm_cid = task_mm_cid(t);
+/* Required to select the proper per_cpu ops for rseq_stats_inc() */
+#define RSEQ_BUILD_SLOW_PATH
- /*
- * Validate read-only rseq fields.
- */
- if (rseq_validate_ro_fields(t))
- goto efault;
- WARN_ON_ONCE((int) mm_cid < 0);
- if (!user_write_access_begin(rseq, t->rseq_len))
- goto efault;
+#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
+#include <linux/rseq_entry.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
- rseq_unsafe_put_user(t, cpu_id, cpu_id_start, efault_end);
- rseq_unsafe_put_user(t, cpu_id, cpu_id, efault_end);
- rseq_unsafe_put_user(t, node_id, node_id, efault_end);
- rseq_unsafe_put_user(t, mm_cid, mm_cid, efault_end);
+#define CREATE_TRACE_POINTS
+#include <trace/events/rseq.h>
- /*
- * Additional feature fields added after ORIG_RSEQ_SIZE
- * need to be conditionally updated only if
- * t->rseq_len != ORIG_RSEQ_SIZE.
- */
- user_write_access_end();
- trace_rseq_update(t);
- return 0;
+DEFINE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
-efault_end:
- user_write_access_end();
-efault:
- return -EFAULT;
+static inline void rseq_control_debug(bool on)
+{
+ if (on)
+ static_branch_enable(&rseq_debug_enabled);
+ else
+ static_branch_disable(&rseq_debug_enabled);
}
-static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
+static int __init rseq_setup_debug(char *str)
{
- struct rseq __user *rseq = t->rseq;
- u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0,
- mm_cid = 0;
-
- /*
- * Validate read-only rseq fields.
- */
- if (rseq_validate_ro_fields(t))
- goto efault;
+ bool on;
- if (!user_write_access_begin(rseq, t->rseq_len))
- goto efault;
-
- /*
- * Reset all fields to their initial state.
- *
- * All fields have an initial state of 0 except cpu_id which is set to
- * RSEQ_CPU_ID_UNINITIALIZED, so that any user coming in after
- * unregistration can figure out that rseq needs to be registered
- * again.
- */
- rseq_unsafe_put_user(t, cpu_id_start, cpu_id_start, efault_end);
- rseq_unsafe_put_user(t, cpu_id, cpu_id, efault_end);
- rseq_unsafe_put_user(t, node_id, node_id, efault_end);
- rseq_unsafe_put_user(t, mm_cid, mm_cid, efault_end);
-
- /*
- * Additional feature fields added after ORIG_RSEQ_SIZE
- * need to be conditionally reset only if
- * t->rseq_len != ORIG_RSEQ_SIZE.
- */
- user_write_access_end();
- return 0;
-
-efault_end:
- user_write_access_end();
-efault:
- return -EFAULT;
+ if (kstrtobool(str, &on))
+ return -EINVAL;
+ rseq_control_debug(on);
+ return 1;
}
+__setup("rseq_debug=", rseq_setup_debug);
+#ifdef CONFIG_TRACEPOINTS
/*
- * Get the user-space pointer value stored in the 'rseq_cs' field.
+ * Out of line, so the actual update functions can be in a header to be
+ * inlined into the exit to user code.
*/
-static int rseq_get_rseq_cs_ptr_val(struct rseq __user *rseq, u64 *rseq_cs)
+void __rseq_trace_update(struct task_struct *t)
{
- if (!rseq_cs)
- return -EFAULT;
-
-#ifdef CONFIG_64BIT
- if (get_user(*rseq_cs, &rseq->rseq_cs))
- return -EFAULT;
-#else
- if (copy_from_user(rseq_cs, &rseq->rseq_cs, sizeof(*rseq_cs)))
- return -EFAULT;
-#endif
+ trace_rseq_update(t);
+}
- return 0;
+void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip)
+{
+ trace_rseq_ip_fixup(ip, start_ip, offset, abort_ip);
}
+#endif /* CONFIG_TRACEPOINTS */
-/*
- * If the rseq_cs field of 'struct rseq' contains a valid pointer to
- * user-space, copy 'struct rseq_cs' from user-space and validate its fields.
- */
-static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
+#ifdef CONFIG_DEBUG_FS
+#ifdef CONFIG_RSEQ_STATS
+DEFINE_PER_CPU(struct rseq_stats, rseq_stats);
+
+static int rseq_stats_show(struct seq_file *m, void *p)
{
- struct rseq_cs __user *urseq_cs;
- u64 ptr;
- u32 __user *usig;
- u32 sig;
- int ret;
-
- ret = rseq_get_rseq_cs_ptr_val(t->rseq, &ptr);
- if (ret)
- return ret;
-
- /* If the rseq_cs pointer is NULL, return a cleared struct rseq_cs. */
- if (!ptr) {
- memset(rseq_cs, 0, sizeof(*rseq_cs));
- return 0;
+ struct rseq_stats stats = { };
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stats.exit += data_race(per_cpu(rseq_stats.exit, cpu));
+ stats.signal += data_race(per_cpu(rseq_stats.signal, cpu));
+ stats.slowpath += data_race(per_cpu(rseq_stats.slowpath, cpu));
+ stats.fastpath += data_race(per_cpu(rseq_stats.fastpath, cpu));
+ stats.ids += data_race(per_cpu(rseq_stats.ids, cpu));
+ stats.cs += data_race(per_cpu(rseq_stats.cs, cpu));
+ stats.clear += data_race(per_cpu(rseq_stats.clear, cpu));
+ stats.fixup += data_race(per_cpu(rseq_stats.fixup, cpu));
}
- /* Check that the pointer value fits in the user-space process space. */
- if (ptr >= TASK_SIZE)
- return -EINVAL;
- urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
- if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
- return -EFAULT;
- if (rseq_cs->start_ip >= TASK_SIZE ||
- rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
- rseq_cs->abort_ip >= TASK_SIZE ||
- rseq_cs->version > 0)
- return -EINVAL;
- /* Check for overflow. */
- if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
- return -EINVAL;
- /* Ensure that abort_ip is not in the critical section. */
- if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
- return -EINVAL;
+ seq_printf(m, "exit: %16lu\n", stats.exit);
+ seq_printf(m, "signal: %16lu\n", stats.signal);
+ seq_printf(m, "slowp: %16lu\n", stats.slowpath);
+ seq_printf(m, "fastp: %16lu\n", stats.fastpath);
+ seq_printf(m, "ids: %16lu\n", stats.ids);
+ seq_printf(m, "cs: %16lu\n", stats.cs);
+ seq_printf(m, "clear: %16lu\n", stats.clear);
+ seq_printf(m, "fixup: %16lu\n", stats.fixup);
+ return 0;
+}
- usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
- ret = get_user(sig, usig);
- if (ret)
- return ret;
+static int rseq_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rseq_stats_show, inode->i_private);
+}
- if (current->rseq_sig != sig) {
- printk_ratelimited(KERN_WARNING
- "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
- sig, current->rseq_sig, current->pid, usig);
- return -EINVAL;
- }
+static const struct file_operations stat_ops = {
+ .open = rseq_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init rseq_stats_init(struct dentry *root_dir)
+{
+ debugfs_create_file("stats", 0444, root_dir, NULL, &stat_ops);
return 0;
}
+#else
+static inline void rseq_stats_init(struct dentry *root_dir) { }
+#endif /* CONFIG_RSEQ_STATS */
-static bool rseq_warn_flags(const char *str, u32 flags)
+static int rseq_debug_show(struct seq_file *m, void *p)
{
- u32 test_flags;
-
- if (!flags)
- return false;
- test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS;
- if (test_flags)
- pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str);
- test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS;
- if (test_flags)
- pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str);
- return true;
+ bool on = static_branch_unlikely(&rseq_debug_enabled);
+
+ seq_printf(m, "%d\n", on);
+ return 0;
}
-static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
+static ssize_t rseq_debug_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
{
- u32 flags, event_mask;
- int ret;
+ bool on;
- if (rseq_warn_flags("rseq_cs", cs_flags))
+ if (kstrtobool_from_user(ubuf, count, &on))
return -EINVAL;
- /* Get thread flags. */
- ret = get_user(flags, &t->rseq->flags);
- if (ret)
- return ret;
+ rseq_control_debug(on);
+ return count;
+}
- if (rseq_warn_flags("rseq", flags))
- return -EINVAL;
+static int rseq_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rseq_debug_show, inode->i_private);
+}
- /*
- * Load and clear event mask atomically with respect to
- * scheduler preemption and membarrier IPIs.
- */
- scoped_guard(RSEQ_EVENT_GUARD) {
- event_mask = t->rseq_event_mask;
- t->rseq_event_mask = 0;
- }
+static const struct file_operations debug_ops = {
+ .open = rseq_debug_open,
+ .read = seq_read,
+ .write = rseq_debug_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init rseq_debugfs_init(void)
+{
+ struct dentry *root_dir = debugfs_create_dir("rseq", NULL);
- return !!event_mask;
+ debugfs_create_file("debug", 0644, root_dir, NULL, &debug_ops);
+ rseq_stats_init(root_dir);
+ return 0;
}
+__initcall(rseq_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
-static int clear_rseq_cs(struct rseq __user *rseq)
+static bool rseq_set_ids(struct task_struct *t, struct rseq_ids *ids, u32 node_id)
{
- /*
- * The rseq_cs field is set to NULL on preemption or signal
- * delivery on top of rseq assembly block, as well as on top
- * of code outside of the rseq assembly block. This performs
- * a lazy clear of the rseq_cs field.
- *
- * Set rseq_cs to NULL.
- */
-#ifdef CONFIG_64BIT
- return put_user(0UL, &rseq->rseq_cs);
-#else
- if (clear_user(&rseq->rseq_cs, sizeof(rseq->rseq_cs)))
- return -EFAULT;
- return 0;
-#endif
+ return rseq_set_ids_get_csaddr(t, ids, node_id, NULL);
}
-/*
- * Unsigned comparison will be true when ip >= start_ip, and when
- * ip < start_ip + post_commit_offset.
- */
-static bool in_rseq_cs(unsigned long ip, struct rseq_cs *rseq_cs)
+static bool rseq_handle_cs(struct task_struct *t, struct pt_regs *regs)
{
- return ip - rseq_cs->start_ip < rseq_cs->post_commit_offset;
+ struct rseq __user *urseq = t->rseq.usrptr;
+ u64 csaddr;
+
+ scoped_user_read_access(urseq, efault)
+ unsafe_get_user(csaddr, &urseq->rseq_cs, efault);
+ if (likely(!csaddr))
+ return true;
+ return rseq_update_user_cs(t, regs, csaddr);
+efault:
+ return false;
}
-static int rseq_ip_fixup(struct pt_regs *regs)
+static void rseq_slowpath_update_usr(struct pt_regs *regs)
{
- unsigned long ip = instruction_pointer(regs);
+ /*
+ * Preserve rseq state and user_irq state. The generic entry code
+ * clears user_irq on the way out, the non-generic entry
+ * architectures are not having user_irq.
+ */
+ const struct rseq_event evt_mask = { .has_rseq = true, .user_irq = true, };
struct task_struct *t = current;
- struct rseq_cs rseq_cs;
- int ret;
+ struct rseq_ids ids;
+ u32 node_id;
+ bool event;
+
+ if (unlikely(t->flags & PF_EXITING))
+ return;
- ret = rseq_get_rseq_cs(t, &rseq_cs);
- if (ret)
- return ret;
+ rseq_stat_inc(rseq_stats.slowpath);
/*
- * Handle potentially not being within a critical section.
- * If not nested over a rseq critical section, restart is useless.
- * Clear the rseq_cs pointer and return.
+ * Read and clear the event pending bit first. If the task
+ * was not preempted or migrated or a signal is on the way,
+ * there is no point in doing any of the heavy lifting here
+ * on production kernels. In that case TIF_NOTIFY_RESUME
+ * was raised by some other functionality.
+ *
+ * This is correct because the read/clear operation is
+ * guarded against scheduler preemption, which makes it CPU
+ * local atomic. If the task is preempted right after
+ * re-enabling preemption then TIF_NOTIFY_RESUME is set
+ * again and this function is invoked another time _before_
+ * the task is able to return to user mode.
+ *
+ * On a debug kernel, invoke the fixup code unconditionally
+ * with the result handed in to allow the detection of
+ * inconsistencies.
*/
- if (!in_rseq_cs(ip, &rseq_cs))
- return clear_rseq_cs(t->rseq);
- ret = rseq_need_restart(t, rseq_cs.flags);
- if (ret <= 0)
- return ret;
- ret = clear_rseq_cs(t->rseq);
- if (ret)
- return ret;
- trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset,
- rseq_cs.abort_ip);
- instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip);
- return 0;
+ scoped_guard(irq) {
+ event = t->rseq.event.sched_switch;
+ t->rseq.event.all &= evt_mask.all;
+ ids.cpu_id = task_cpu(t);
+ ids.mm_cid = task_mm_cid(t);
+ }
+
+ if (!event)
+ return;
+
+ node_id = cpu_to_node(ids.cpu_id);
+
+ if (unlikely(!rseq_update_usr(t, regs, &ids, node_id))) {
+ /*
+ * Clear the errors just in case this might survive magically, but
+ * leave the rest intact.
+ */
+ t->rseq.event.error = 0;
+ force_sig(SIGSEGV);
+ }
}
-/*
- * This resume handler must always be executed between any of:
- * - preemption,
- * - signal delivery,
- * and return to user-space.
- *
- * This is how we can ensure that the entire rseq critical section
- * will issue the commit instruction only if executed atomically with
- * respect to other threads scheduled on the same CPU, and with respect
- * to signal handlers.
- */
-void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
+void __rseq_handle_slowpath(struct pt_regs *regs)
{
- struct task_struct *t = current;
- int ret, sig;
-
- if (unlikely(t->flags & PF_EXITING))
+ /*
+ * If invoked from hypervisors before entering the guest via
+ * resume_user_mode_work(), then @regs is a NULL pointer.
+ *
+ * resume_user_mode_work() clears TIF_NOTIFY_RESUME and re-raises
+ * it before returning from the ioctl() to user space when
+ * rseq_event.sched_switch is set.
+ *
+ * So it's safe to ignore here instead of pointlessly updating it
+ * in the vcpu_run() loop.
+ */
+ if (!regs)
return;
+ rseq_slowpath_update_usr(regs);
+}
+
+void __rseq_signal_deliver(int sig, struct pt_regs *regs)
+{
+ rseq_stat_inc(rseq_stats.signal);
/*
- * regs is NULL if and only if the caller is in a syscall path. Skip
- * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
- * kill a misbehaving userspace on debug kernels.
+ * Don't update IDs, they are handled on exit to user if
+ * necessary. The important thing is to abort a critical section of
+ * the interrupted context as after this point the instruction
+ * pointer in @regs points to the signal handler.
*/
- if (regs) {
- ret = rseq_ip_fixup(regs);
- if (unlikely(ret < 0))
- goto error;
+ if (unlikely(!rseq_handle_cs(current, regs))) {
+ /*
+ * Clear the errors just in case this might survive
+ * magically, but leave the rest intact.
+ */
+ current->rseq.event.error = 0;
+ force_sigsegv(sig);
}
- if (unlikely(rseq_update_cpu_node_id(t)))
- goto error;
- return;
-
-error:
- sig = ksig ? ksig->sig : 0;
- force_sigsegv(sig);
}
-#ifdef CONFIG_DEBUG_RSEQ
-
/*
* Terminate the process if a syscall is issued within a restartable
* sequence.
*/
-void rseq_syscall(struct pt_regs *regs)
+void __rseq_debug_syscall_return(struct pt_regs *regs)
{
- unsigned long ip = instruction_pointer(regs);
struct task_struct *t = current;
- struct rseq_cs rseq_cs;
+ u64 csaddr;
- if (!t->rseq)
+ if (!t->rseq.event.has_rseq)
return;
- if (rseq_get_rseq_cs(t, &rseq_cs) || in_rseq_cs(ip, &rseq_cs))
- force_sig(SIGSEGV);
+ if (get_user(csaddr, &t->rseq.usrptr->rseq_cs))
+ goto fail;
+ if (likely(!csaddr))
+ return;
+ if (unlikely(csaddr >= TASK_SIZE))
+ goto fail;
+ if (rseq_debug_update_user_cs(t, regs, csaddr))
+ return;
+fail:
+ force_sig(SIGSEGV);
}
+#ifdef CONFIG_DEBUG_RSEQ
+/* Kept around to keep GENERIC_ENTRY=n architectures supported. */
+void rseq_syscall(struct pt_regs *regs)
+{
+ __rseq_debug_syscall_return(regs);
+}
#endif
+static bool rseq_reset_ids(void)
+{
+ struct rseq_ids ids = {
+ .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
+ .mm_cid = 0,
+ };
+
+ /*
+ * If this fails, terminate it because this leaves the kernel in
+ * stupid state as exit to user space will try to fixup the ids
+ * again.
+ */
+ if (rseq_set_ids(current, &ids, 0))
+ return true;
+
+ force_sig(SIGSEGV);
+ return false;
+}
+
+/* The original rseq structure size (including padding) is 32 bytes. */
+#define ORIG_RSEQ_SIZE 32
+
/*
* sys_rseq - setup restartable sequences for caller thread.
*/
-SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
- int, flags, u32, sig)
+SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
{
- int ret;
- u64 rseq_cs;
-
if (flags & RSEQ_FLAG_UNREGISTER) {
if (flags & ~RSEQ_FLAG_UNREGISTER)
return -EINVAL;
/* Unregister rseq for current thread. */
- if (current->rseq != rseq || !current->rseq)
+ if (current->rseq.usrptr != rseq || !current->rseq.usrptr)
return -EINVAL;
- if (rseq_len != current->rseq_len)
+ if (rseq_len != current->rseq.len)
return -EINVAL;
- if (current->rseq_sig != sig)
+ if (current->rseq.sig != sig)
return -EPERM;
- ret = rseq_reset_rseq_cpu_node_id(current);
- if (ret)
- return ret;
- current->rseq = NULL;
- current->rseq_sig = 0;
- current->rseq_len = 0;
+ if (!rseq_reset_ids())
+ return -EFAULT;
+ rseq_reset(current);
return 0;
}
if (unlikely(flags))
return -EINVAL;
- if (current->rseq) {
+ if (current->rseq.usrptr) {
/*
* If rseq is already registered, check whether
* the provided address differs from the prior
* one.
*/
- if (current->rseq != rseq || rseq_len != current->rseq_len)
+ if (current->rseq.usrptr != rseq || rseq_len != current->rseq.len)
return -EINVAL;
- if (current->rseq_sig != sig)
+ if (current->rseq.sig != sig)
return -EPERM;
/* Already registered. */
return -EBUSY;
@@ -531,43 +440,39 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
if (!access_ok(rseq, rseq_len))
return -EFAULT;
- /*
- * If the rseq_cs pointer is non-NULL on registration, clear it to
- * avoid a potential segfault on return to user-space. The proper thing
- * to do would have been to fail the registration but this would break
- * older libcs that reuse the rseq area for new threads without
- * clearing the fields.
- */
- if (rseq_get_rseq_cs_ptr_val(rseq, &rseq_cs))
- return -EFAULT;
- if (rseq_cs && clear_rseq_cs(rseq))
- return -EFAULT;
+ scoped_user_write_access(rseq, efault) {
+ /*
+ * If the rseq_cs pointer is non-NULL on registration, clear it to
+ * avoid a potential segfault on return to user-space. The proper thing
+ * to do would have been to fail the registration but this would break
+ * older libcs that reuse the rseq area for new threads without
+ * clearing the fields. Don't bother reading it, just reset it.
+ */
+ unsafe_put_user(0UL, &rseq->rseq_cs, efault);
+ /* Initialize IDs in user space */
+ unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id_start, efault);
+ unsafe_put_user(RSEQ_CPU_ID_UNINITIALIZED, &rseq->cpu_id, efault);
+ unsafe_put_user(0U, &rseq->node_id, efault);
+ unsafe_put_user(0U, &rseq->mm_cid, efault);
+ }
-#ifdef CONFIG_DEBUG_RSEQ
- /*
- * Initialize the in-kernel rseq fields copy for validation of
- * read-only fields.
- */
- if (get_user(rseq_kernel_fields(current)->cpu_id_start, &rseq->cpu_id_start) ||
- get_user(rseq_kernel_fields(current)->cpu_id, &rseq->cpu_id) ||
- get_user(rseq_kernel_fields(current)->node_id, &rseq->node_id) ||
- get_user(rseq_kernel_fields(current)->mm_cid, &rseq->mm_cid))
- return -EFAULT;
-#endif
/*
* Activate the registration by setting the rseq area address, length
* and signature in the task struct.
*/
- current->rseq = rseq;
- current->rseq_len = rseq_len;
- current->rseq_sig = sig;
+ current->rseq.usrptr = rseq;
+ current->rseq.len = rseq_len;
+ current->rseq.sig = sig;
/*
* If rseq was previously inactive, and has just been
* registered, ensure the cpu_id_start and cpu_id fields
* are updated before returning to user-space.
*/
- rseq_set_notify_resume(current);
-
+ current->rseq.event.has_rseq = true;
+ rseq_force_update();
return 0;
+
+efault:
+ return -EFAULT;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f1ebf67b48e2..62235f1dc04e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2128,8 +2128,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_on_rq_migrating(p))
flags |= ENQUEUE_MIGRATED;
- if (flags & ENQUEUE_MIGRATED)
- sched_mm_cid_migrate_to(rq, p);
enqueue_task(rq, p, flags);
@@ -2671,6 +2669,8 @@ int push_cpu_stop(void *arg)
return 0;
}
+static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask);
+
/*
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
@@ -2684,6 +2684,7 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
cpumask_copy(&p->cpus_mask, ctx->new_mask);
p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
+ mm_update_cpus_allowed(p->mm, ctx->new_mask);
/*
* Swap in a new user_cpus_ptr if SCA_USER flag set
@@ -2730,7 +2731,6 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
put_prev_task(rq, p);
p->sched_class->set_cpus_allowed(p, ctx);
- mm_set_cpus_allowed(p->mm, ctx->new_mask);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -3329,8 +3329,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
- rseq_migrate(p);
- sched_mm_cid_migrate_from(p);
perf_event_task_migrate(p);
}
@@ -4487,7 +4485,6 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p)
init_numa_balancing(clone_flags, p);
p->wake_entry.u_flags = CSD_TYPE_TTWU;
p->migration_pending = NULL;
- init_sched_mm_cid(p);
}
DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
@@ -4763,7 +4760,6 @@ int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
p->sched_task_group = tg;
}
#endif
- rseq_migrate(p);
/*
* We're setting the CPU for the first time, we don't migrate,
* so use __set_task_cpu().
@@ -4827,7 +4823,6 @@ void wake_up_new_task(struct task_struct *p)
* as we're not fully set-up yet.
*/
p->recent_used_cpu = task_cpu(p);
- rseq_migrate(p);
__set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
@@ -5121,7 +5116,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
- rseq_preempt(prev);
fire_sched_out_preempt_notifiers(prev, next);
kmap_local_sched_out();
prepare_task(next);
@@ -5284,19 +5278,16 @@ context_switch(struct rq *rq, struct task_struct *prev,
*
* kernel -> user switch + mmdrop_lazy_tlb() active
* user -> user switch
- *
- * switch_mm_cid() needs to be updated if the barriers provided
- * by context_switch() are modified.
*/
- if (!next->mm) { // to kernel
+ if (!next->mm) { // to kernel
enter_lazy_tlb(prev->active_mm, next);
next->active_mm = prev->active_mm;
- if (prev->mm) // from user
+ if (prev->mm) // from user
mmgrab_lazy_tlb(prev->active_mm);
else
prev->active_mm = NULL;
- } else { // to user
+ } else { // to user
membarrier_switch_mm(rq, prev->active_mm, next->mm);
/*
* sys_membarrier() requires an smp_mb() between setting
@@ -5309,15 +5300,20 @@ context_switch(struct rq *rq, struct task_struct *prev,
switch_mm_irqs_off(prev->active_mm, next->mm, next);
lru_gen_use_mm(next->mm);
- if (!prev->mm) { // from kernel
+ if (!prev->mm) { // from kernel
/* will mmdrop_lazy_tlb() in finish_task_switch(). */
rq->prev_mm = prev->active_mm;
prev->active_mm = NULL;
}
}
- /* switch_mm_cid() requires the memory barriers above. */
- switch_mm_cid(rq, prev, next);
+ mm_cid_switch_to(prev, next);
+
+ /*
+ * Tell rseq that the task was scheduled in. Must be after
+ * switch_mm_cid() to get the TIF flag set.
+ */
+ rseq_sched_switch_event(next);
prepare_lock_switch(rq, next, rf);
@@ -5602,7 +5598,6 @@ void sched_tick(void)
resched_latency = cpu_resched_latency(rq);
calc_global_load_tick(rq);
sched_core_tick(rq);
- task_tick_mm_cid(rq, donor);
scx_tick(rq);
rq_unlock(rq, &rf);
@@ -10374,525 +10369,501 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
}
#ifdef CONFIG_SCHED_MM_CID
-
/*
- * @cid_lock: Guarantee forward-progress of cid allocation.
+ * Concurrency IDentifier management
*
- * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
- * is only used when contention is detected by the lock-free allocation so
- * forward progress can be guaranteed.
- */
-DEFINE_RAW_SPINLOCK(cid_lock);
-
-/*
- * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
- *
- * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
- * detected, it is set to 1 to ensure that all newly coming allocations are
- * serialized by @cid_lock until the allocation which detected contention
- * completes and sets @use_cid_lock back to 0. This guarantees forward progress
- * of a cid allocation.
- */
-int use_cid_lock;
-
-/*
- * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
- * concurrently with respect to the execution of the source runqueue context
- * switch.
- *
- * There is one basic properties we want to guarantee here:
- *
- * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
- * used by a task. That would lead to concurrent allocation of the cid and
- * userspace corruption.
- *
- * Provide this guarantee by introducing a Dekker memory ordering to guarantee
- * that a pair of loads observe at least one of a pair of stores, which can be
- * shown as:
+ * Serialization rules:
*
- * X = Y = 0
+ * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
+ * protects mm::mm_cid::users.
*
- * w[X]=1 w[Y]=1
- * MB MB
- * r[Y]=y r[X]=x
+ * mm::mm_cid::lock: Serializes mm_update_max_cids() and
+ * mm_update_cpus_allowed(). Nests in mm_cid::mutex
+ * and runqueue lock.
*
- * Which guarantees that x==0 && y==0 is impossible. But rather than using
- * values 0 and 1, this algorithm cares about specific state transitions of the
- * runqueue current task (as updated by the scheduler context switch), and the
- * per-mm/cpu cid value.
+ * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
+ * and can only be modified with atomic operations.
*
- * Let's introduce task (Y) which has task->mm == mm and task (N) which has
- * task->mm != mm for the rest of the discussion. There are two scheduler state
- * transitions on context switch we care about:
+ * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
+ * lock.
*
- * (TSA) Store to rq->curr with transition from (N) to (Y)
+ * CID ownership:
*
- * (TSB) Store to rq->curr with transition from (Y) to (N)
+ * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or
+ * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the
+ * MM_CID_ONCPU bit set. During transition from CPU to task ownership mode,
+ * MM_CID_TRANSIT is set on the per task CIDs. When this bit is set the
+ * task needs to drop the CID into the pool when scheduling out. Both bits
+ * (ONCPU and TRANSIT) are filtered out by task_cid() when the CID is
+ * actually handed over to user space in the RSEQ memory.
*
- * On the remote-clear side, there is one transition we care about:
+ * Mode switching:
*
- * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
+ * Switching to per CPU mode happens when the user count becomes greater
+ * than the maximum number of CIDs, which is calculated by:
*
- * There is also a transition to UNSET state which can be performed from all
- * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
- * guarantees that only a single thread will succeed:
+ * opt_cids = min(mm_cid::nr_cpus_allowed, mm_cid::users);
+ * max_cids = min(1.25 * opt_cids, num_possible_cpus());
*
- * (TMB) cmpxchg to *pcpu_cid to mark UNSET
+ * The +25% allowance is useful for tight CPU masks in scenarios where only
+ * a few threads are created and destroyed to avoid frequent mode
+ * switches. Though this allowance shrinks, the closer opt_cids becomes to
+ * num_possible_cpus(), which is the (unfortunate) hard ABI limit.
*
- * Just to be clear, what we do _not_ want to happen is a transition to UNSET
- * when a thread is actively using the cid (property (1)).
+ * At the point of switching to per CPU mode the new user is not yet
+ * visible in the system, so the task which initiated the fork() runs the
+ * fixup function: mm_cid_fixup_tasks_to_cpu() walks the thread list and
+ * either transfers each tasks owned CID to the CPU the task runs on or
+ * drops it into the CID pool if a task is not on a CPU at that point in
+ * time. Tasks which schedule in before the task walk reaches them do the
+ * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus() completes
+ * it's guaranteed that no task related to that MM owns a CID anymore.
*
- * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
+ * Switching back to task mode happens when the user count goes below the
+ * threshold which was recorded on the per CPU mode switch:
*
- * Scenario A) (TSA)+(TMA) (from next task perspective)
+ * pcpu_thrs = min(opt_cids - (opt_cids / 4), num_possible_cpus() / 2);
*
- * CPU0 CPU1
+ * This threshold is updated when a affinity change increases the number of
+ * allowed CPUs for the MM, which might cause a switch back to per task
+ * mode.
*
- * Context switch CS-1 Remote-clear
- * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
- * (implied barrier after cmpxchg)
- * - switch_mm_cid()
- * - memory barrier (see switch_mm_cid()
- * comment explaining how this barrier
- * is combined with other scheduler
- * barriers)
- * - mm_cid_get (next)
- * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
+ * If the switch back was initiated by a exiting task, then that task runs
+ * the fixup function. If it was initiated by a affinity change, then it's
+ * run either in the deferred update function in context of a workqueue or
+ * by a task which forks a new one or by a task which exits. Whatever
+ * happens first. mm_cid_fixup_cpus_to_task() walks through the possible
+ * CPUs and either transfers the CPU owned CIDs to a related task which
+ * runs on the CPU or drops it into the pool. Tasks which schedule in on a
+ * CPU which the walk did not cover yet do the handover themself.
*
- * This Dekker ensures that either task (Y) is observed by the
- * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
- * observed.
+ * This transition from CPU to per task ownership happens in two phases:
*
- * If task (Y) store is observed by rcu_dereference(), it means that there is
- * still an active task on the cpu. Remote-clear will therefore not transition
- * to UNSET, which fulfills property (1).
+ * 1) mm:mm_cid.transit contains MM_CID_TRANSIT This is OR'ed on the task
+ * CID and denotes that the CID is only temporarily owned by the
+ * task. When it schedules out the task drops the CID back into the
+ * pool if this bit is set.
*
- * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
- * it will move its state to UNSET, which clears the percpu cid perhaps
- * uselessly (which is not an issue for correctness). Because task (Y) is not
- * observed, CPU1 can move ahead to set the state to UNSET. Because moving
- * state to UNSET is done with a cmpxchg expecting that the old state has the
- * LAZY flag set, only one thread will successfully UNSET.
+ * 2) The initiating context walks the per CPU space and after completion
+ * clears mm:mm_cid.transit. So after that point the CIDs are strictly
+ * task owned again.
*
- * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
- * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
- * CPU1 will observe task (Y) and do nothing more, which is fine.
+ * This two phase transition is required to prevent CID space exhaustion
+ * during the transition as a direct transfer of ownership would fail if
+ * two tasks are scheduled in on the same CPU before the fixup freed per
+ * CPU CIDs.
*
- * What we are effectively preventing with this Dekker is a scenario where
- * neither LAZY flag nor store (Y) are observed, which would fail property (1)
- * because this would UNSET a cid which is actively used.
+ * When mm_cid_fixup_cpus_to_tasks() completes it's guaranteed that no CID
+ * related to that MM is owned by a CPU anymore.
*/
-void sched_mm_cid_migrate_from(struct task_struct *t)
-{
- t->migrate_from_cpu = task_cpu(t);
-}
-
-static
-int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
- struct task_struct *t,
- struct mm_cid *src_pcpu_cid)
+/*
+ * Update the CID range properties when the constraints change. Invoked via
+ * fork(), exit() and affinity changes
+ */
+static void __mm_update_max_cids(struct mm_mm_cid *mc)
{
- struct mm_struct *mm = t->mm;
- struct task_struct *src_task;
- int src_cid, last_mm_cid;
+ unsigned int opt_cids, max_cids;
- if (!mm)
- return -1;
+ /* Calculate the new optimal constraint */
+ opt_cids = min(mc->nr_cpus_allowed, mc->users);
- last_mm_cid = t->last_mm_cid;
- /*
- * If the migrated task has no last cid, or if the current
- * task on src rq uses the cid, it means the source cid does not need
- * to be moved to the destination cpu.
- */
- if (last_mm_cid == -1)
- return -1;
- src_cid = READ_ONCE(src_pcpu_cid->cid);
- if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
- return -1;
+ /* Adjust the maximum CIDs to +25% limited by the number of possible CPUs */
+ max_cids = min(opt_cids + (opt_cids / 4), num_possible_cpus());
+ WRITE_ONCE(mc->max_cids, max_cids);
+}
- /*
- * If we observe an active task using the mm on this rq, it means we
- * are not the last task to be migrated from this cpu for this mm, so
- * there is no need to move src_cid to the destination cpu.
- */
- guard(rcu)();
- src_task = rcu_dereference(src_rq->curr);
- if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
- t->last_mm_cid = -1;
- return -1;
- }
+static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc)
+{
+ unsigned int opt_cids;
- return src_cid;
+ opt_cids = min(mc->nr_cpus_allowed, mc->users);
+ /* Has to be at least 1 because 0 indicates PCPU mode off */
+ return max(min(opt_cids - opt_cids / 4, num_possible_cpus() / 2), 1);
}
-static
-int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
- struct task_struct *t,
- struct mm_cid *src_pcpu_cid,
- int src_cid)
+static bool mm_update_max_cids(struct mm_struct *mm)
{
- struct task_struct *src_task;
- struct mm_struct *mm = t->mm;
- int lazy_cid;
+ struct mm_mm_cid *mc = &mm->mm_cid;
- if (src_cid == -1)
- return -1;
-
- /*
- * Attempt to clear the source cpu cid to move it to the destination
- * cpu.
- */
- lazy_cid = mm_cid_set_lazy_put(src_cid);
- if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
- return -1;
+ lockdep_assert_held(&mm->mm_cid.lock);
- /*
- * The implicit barrier after cmpxchg per-mm/cpu cid before loading
- * rq->curr->mm matches the scheduler barrier in context_switch()
- * between store to rq->curr and load of prev and next task's
- * per-mm/cpu cid.
- *
- * The implicit barrier after cmpxchg per-mm/cpu cid before loading
- * rq->curr->mm_cid_active matches the barrier in
- * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
- * sched_mm_cid_after_execve() between store to t->mm_cid_active and
- * load of per-mm/cpu cid.
- */
+ /* Clear deferred mode switch flag. A change is handled by the caller */
+ mc->update_deferred = false;
+ __mm_update_max_cids(mc);
- /*
- * If we observe an active task using the mm on this rq after setting
- * the lazy-put flag, this task will be responsible for transitioning
- * from lazy-put flag set to MM_CID_UNSET.
- */
- scoped_guard (rcu) {
- src_task = rcu_dereference(src_rq->curr);
- if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
- /*
- * We observed an active task for this mm, there is therefore
- * no point in moving this cid to the destination cpu.
- */
- t->last_mm_cid = -1;
- return -1;
- }
+ /* Check whether owner mode must be changed */
+ if (!mc->percpu) {
+ /* Enable per CPU mode when the number of users is above max_cids */
+ if (mc->users > mc->max_cids)
+ mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
+ } else {
+ /* Switch back to per task if user count under threshold */
+ if (mc->users < mc->pcpu_thrs)
+ mc->pcpu_thrs = 0;
}
- /*
- * The src_cid is unused, so it can be unset.
- */
- if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
- return -1;
- WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
- return src_cid;
+ /* Mode change required? */
+ if (!!mc->percpu == !!mc->pcpu_thrs)
+ return false;
+ /* When switching back to per TASK mode, set the transition flag */
+ if (!mc->pcpu_thrs)
+ WRITE_ONCE(mc->transit, MM_CID_TRANSIT);
+ WRITE_ONCE(mc->percpu, !!mc->pcpu_thrs);
+ return true;
}
-/*
- * Migration to dst cpu. Called with dst_rq lock held.
- * Interrupts are disabled, which keeps the window of cid ownership without the
- * source rq lock held small.
- */
-void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
+static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
{
- struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
- struct mm_struct *mm = t->mm;
- int src_cid, src_cpu;
- bool dst_cid_is_set;
- struct rq *src_rq;
-
- lockdep_assert_rq_held(dst_rq);
+ struct cpumask *mm_allowed;
+ struct mm_mm_cid *mc;
+ unsigned int weight;
- if (!mm)
+ if (!mm || !READ_ONCE(mm->mm_cid.users))
return;
- src_cpu = t->migrate_from_cpu;
- if (src_cpu == -1) {
- t->last_mm_cid = -1;
- return;
- }
/*
- * Move the src cid if the dst cid is unset. This keeps id
- * allocation closest to 0 in cases where few threads migrate around
- * many CPUs.
- *
- * If destination cid or recent cid is already set, we may have
- * to just clear the src cid to ensure compactness in frequent
- * migrations scenarios.
- *
- * It is not useful to clear the src cid when the number of threads is
- * greater or equal to the number of allowed CPUs, because user-space
- * can expect that the number of allowed cids can reach the number of
- * allowed CPUs.
- */
- dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
- dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
- !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
- if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
+ * mm::mm_cid::mm_cpus_allowed is the superset of each threads
+ * allowed CPUs mask which means it can only grow.
+ */
+ mc = &mm->mm_cid;
+ guard(raw_spinlock)(&mc->lock);
+ mm_allowed = mm_cpus_allowed(mm);
+ weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
+ if (weight == mc->nr_cpus_allowed)
return;
- src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
- src_rq = cpu_rq(src_cpu);
- src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
- if (src_cid == -1)
+
+ WRITE_ONCE(mc->nr_cpus_allowed, weight);
+ __mm_update_max_cids(mc);
+ if (!mc->percpu)
return;
- src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
- src_cid);
- if (src_cid == -1)
+
+ /* Adjust the threshold to the wider set */
+ mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
+ /* Switch back to per task mode? */
+ if (mc->users >= mc->pcpu_thrs)
return;
- if (dst_cid_is_set) {
- __mm_cid_put(mm, src_cid);
+
+ /* Don't queue twice */
+ if (mc->update_deferred)
return;
- }
- /* Move src_cid to dst cpu. */
- mm_cid_snapshot_time(dst_rq, mm);
- WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
- WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
+
+ /* Queue the irq work, which schedules the real work */
+ mc->update_deferred = true;
+ irq_work_queue(&mc->irq_work);
}
-static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
- int cpu)
+static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)
{
- struct rq *rq = cpu_rq(cpu);
- struct task_struct *t;
- int cid, lazy_cid;
+ if (cid_on_cpu(t->mm_cid.cid)) {
+ unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid);
- cid = READ_ONCE(pcpu_cid->cid);
- if (!mm_cid_is_valid(cid))
- return;
+ t->mm_cid.cid = cid_to_transit_cid(cid);
+ pcp->cid = t->mm_cid.cid;
+ }
+}
- /*
- * Clear the cpu cid if it is set to keep cid allocation compact. If
- * there happens to be other tasks left on the source cpu using this
- * mm, the next task using this mm will reallocate its cid on context
- * switch.
- */
- lazy_cid = mm_cid_set_lazy_put(cid);
- if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
- return;
+static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm)
+{
+ unsigned int cpu;
- /*
- * The implicit barrier after cmpxchg per-mm/cpu cid before loading
- * rq->curr->mm matches the scheduler barrier in context_switch()
- * between store to rq->curr and load of prev and next task's
- * per-mm/cpu cid.
- *
- * The implicit barrier after cmpxchg per-mm/cpu cid before loading
- * rq->curr->mm_cid_active matches the barrier in
- * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
- * sched_mm_cid_after_execve() between store to t->mm_cid_active and
- * load of per-mm/cpu cid.
- */
+ /* Walk the CPUs and fixup all stale CIDs */
+ for_each_possible_cpu(cpu) {
+ struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
+ struct rq *rq = cpu_rq(cpu);
- /*
- * If we observe an active task using the mm on this rq after setting
- * the lazy-put flag, that task will be responsible for transitioning
- * from lazy-put flag set to MM_CID_UNSET.
- */
- scoped_guard (rcu) {
- t = rcu_dereference(rq->curr);
- if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
- return;
+ /* Remote access to mm::mm_cid::pcpu requires rq_lock */
+ guard(rq_lock_irq)(rq);
+ /* Is the CID still owned by the CPU? */
+ if (cid_on_cpu(pcp->cid)) {
+ /*
+ * If rq->curr has @mm, transfer it with the
+ * transition bit set. Otherwise drop it.
+ */
+ if (rq->curr->mm == mm && rq->curr->mm_cid.active)
+ mm_cid_transit_to_task(rq->curr, pcp);
+ else
+ mm_drop_cid_on_cpu(mm, pcp);
+
+ } else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
+ unsigned int cid = rq->curr->mm_cid.cid;
+
+ /* Ensure it has the transition bit set */
+ if (!cid_in_transit(cid)) {
+ cid = cid_to_transit_cid(cid);
+ rq->curr->mm_cid.cid = cid;
+ pcp->cid = cid;
+ }
+ }
}
+ /* Clear the transition bit */
+ WRITE_ONCE(mm->mm_cid.transit, 0);
+}
- /*
- * The cid is unused, so it can be unset.
- * Disable interrupts to keep the window of cid ownership without rq
- * lock small.
- */
- scoped_guard (irqsave) {
- if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
- __mm_cid_put(mm, cid);
+static inline void mm_cid_transfer_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)
+{
+ if (cid_on_task(t->mm_cid.cid)) {
+ t->mm_cid.cid = cid_to_cpu_cid(t->mm_cid.cid);
+ pcp->cid = t->mm_cid.cid;
}
}
-static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
+static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
{
- struct rq *rq = cpu_rq(cpu);
- struct mm_cid *pcpu_cid;
- struct task_struct *curr;
- u64 rq_clock;
+ /* Remote access to mm::mm_cid::pcpu requires rq_lock */
+ guard(task_rq_lock)(t);
+ /* If the task is not active it is not in the users count */
+ if (!t->mm_cid.active)
+ return false;
+ if (cid_on_task(t->mm_cid.cid)) {
+ /* If running on the CPU, transfer the CID, otherwise drop it */
+ if (task_rq(t)->curr == t)
+ mm_cid_transfer_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
+ else
+ mm_unset_cid_on_task(t);
+ }
+ return true;
+}
- /*
- * rq->clock load is racy on 32-bit but one spurious clear once in a
- * while is irrelevant.
- */
- rq_clock = READ_ONCE(rq->clock);
- pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
+static void mm_cid_fixup_tasks_to_cpus(void)
+{
+ struct mm_struct *mm = current->mm;
+ struct task_struct *p, *t;
+ unsigned int users;
/*
- * In order to take care of infrequently scheduled tasks, bump the time
- * snapshot associated with this cid if an active task using the mm is
- * observed on this rq.
+ * This can obviously race with a concurrent affinity change, which
+ * increases the number of allowed CPUs for this mm, but that does
+ * not affect the mode and only changes the CID constraints. A
+ * possible switch back to per task mode happens either in the
+ * deferred handler function or in the next fork()/exit().
+ *
+ * The caller has already transferred. The newly incoming task is
+ * already accounted for, but not yet visible.
*/
- scoped_guard (rcu) {
- curr = rcu_dereference(rq->curr);
- if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
- WRITE_ONCE(pcpu_cid->time, rq_clock);
- return;
- }
+ users = mm->mm_cid.users - 2;
+ if (!users)
+ return;
+
+ guard(rcu)();
+ for_other_threads(current, t) {
+ if (mm_cid_fixup_task_to_cpu(t, mm))
+ users--;
}
- if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
+ if (!users)
return;
- sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
+
+ /* Happens only for VM_CLONE processes. */
+ for_each_process_thread(p, t) {
+ if (t == current || t->mm != mm)
+ continue;
+ if (mm_cid_fixup_task_to_cpu(t, mm)) {
+ if (--users == 0)
+ return;
+ }
+ }
}
-static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
- int weight)
+static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
{
- struct mm_cid *pcpu_cid;
- int cid;
-
- pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
- cid = READ_ONCE(pcpu_cid->cid);
- if (!mm_cid_is_valid(cid) || cid < weight)
- return;
- sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
+ t->mm_cid.active = 1;
+ mm->mm_cid.users++;
+ return mm_update_max_cids(mm);
}
-static void task_mm_cid_work(struct callback_head *work)
+void sched_mm_cid_fork(struct task_struct *t)
{
- unsigned long now = jiffies, old_scan, next_scan;
- struct task_struct *t = current;
- struct cpumask *cidmask;
- struct mm_struct *mm;
- int weight, cpu;
+ struct mm_struct *mm = t->mm;
+ bool percpu;
- WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work));
+ WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
- work->next = work; /* Prevent double-add */
- if (t->flags & PF_EXITING)
- return;
- mm = t->mm;
- if (!mm)
- return;
- old_scan = READ_ONCE(mm->mm_cid_next_scan);
- next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
- if (!old_scan) {
- unsigned long res;
-
- res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
- if (res != old_scan)
- old_scan = res;
+ guard(mutex)(&mm->mm_cid.mutex);
+ scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
+ struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu);
+
+ /* First user ? */
+ if (!mm->mm_cid.users) {
+ sched_mm_cid_add_user(t, mm);
+ t->mm_cid.cid = mm_get_cid(mm);
+ /* Required for execve() */
+ pcp->cid = t->mm_cid.cid;
+ return;
+ }
+
+ if (!sched_mm_cid_add_user(t, mm)) {
+ if (!mm->mm_cid.percpu)
+ t->mm_cid.cid = mm_get_cid(mm);
+ return;
+ }
+
+ /* Handle the mode change and transfer current's CID */
+ percpu = !!mm->mm_cid.percpu;
+ if (!percpu)
+ mm_cid_transit_to_task(current, pcp);
else
- old_scan = next_scan;
+ mm_cid_transfer_to_cpu(current, pcp);
}
- if (time_before(now, old_scan))
- return;
- if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
- return;
- cidmask = mm_cidmask(mm);
- /* Clear cids that were not recently used. */
- for_each_possible_cpu(cpu)
- sched_mm_cid_remote_clear_old(mm, cpu);
- weight = cpumask_weight(cidmask);
- /*
- * Clear cids that are greater or equal to the cidmask weight to
- * recompact it.
- */
- for_each_possible_cpu(cpu)
- sched_mm_cid_remote_clear_weight(mm, cpu, weight);
-}
-
-void init_sched_mm_cid(struct task_struct *t)
-{
- struct mm_struct *mm = t->mm;
- int mm_users = 0;
- if (mm) {
- mm_users = atomic_read(&mm->mm_users);
- if (mm_users == 1)
- mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
+ if (percpu) {
+ mm_cid_fixup_tasks_to_cpus();
+ } else {
+ mm_cid_fixup_cpus_to_tasks(mm);
+ t->mm_cid.cid = mm_get_cid(mm);
}
- t->cid_work.next = &t->cid_work; /* Protect against double add */
- init_task_work(&t->cid_work, task_mm_cid_work);
}
-void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
+static bool sched_mm_cid_remove_user(struct task_struct *t)
{
- struct callback_head *work = &curr->cid_work;
- unsigned long now = jiffies;
-
- if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
- work->next != work)
- return;
- if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
- return;
-
- /* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME);
+ t->mm_cid.active = 0;
+ scoped_guard(preempt) {
+ /* Clear the transition bit */
+ t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
+ mm_unset_cid_on_task(t);
+ }
+ t->mm->mm_cid.users--;
+ return mm_update_max_cids(t->mm);
}
-void sched_mm_cid_exit_signals(struct task_struct *t)
+static bool __sched_mm_cid_exit(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq *rq;
-
- if (!mm)
- return;
- preempt_disable();
- rq = this_rq();
- guard(rq_lock_irqsave)(rq);
- preempt_enable_no_resched(); /* holding spinlock */
- WRITE_ONCE(t->mm_cid_active, 0);
+ if (!sched_mm_cid_remove_user(t))
+ return false;
+ /*
+ * Contrary to fork() this only deals with a switch back to per
+ * task mode either because the above decreased users or an
+ * affinity change increased the number of allowed CPUs and the
+ * deferred fixup did not run yet.
+ */
+ if (WARN_ON_ONCE(mm->mm_cid.percpu))
+ return false;
/*
- * Store t->mm_cid_active before loading per-mm/cpu cid.
- * Matches barrier in sched_mm_cid_remote_clear_old().
+ * A failed fork(2) cleanup never gets here, so @current must have
+ * the same MM as @t. That's true for exit() and the failed
+ * pthread_create() cleanup case.
*/
- smp_mb();
- mm_cid_put(mm);
- t->last_mm_cid = t->mm_cid = -1;
+ if (WARN_ON_ONCE(current->mm != mm))
+ return false;
+ return true;
}
-void sched_mm_cid_before_execve(struct task_struct *t)
+/*
+ * When a task exits, the MM CID held by the task is not longer required as
+ * the task cannot return to user space.
+ */
+void sched_mm_cid_exit(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
- struct rq *rq;
- if (!mm)
+ if (!mm || !t->mm_cid.active)
return;
+ /*
+ * Ensure that only one instance is doing MM CID operations within
+ * a MM. The common case is uncontended. The rare fixup case adds
+ * some overhead.
+ */
+ scoped_guard(mutex, &mm->mm_cid.mutex) {
+ /* mm_cid::mutex is sufficient to protect mm_cid::users */
+ if (likely(mm->mm_cid.users > 1)) {
+ scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
+ if (!__sched_mm_cid_exit(t))
+ return;
+ /* Mode change required. Transfer currents CID */
+ mm_cid_transit_to_task(current, this_cpu_ptr(mm->mm_cid.pcpu));
+ }
+ mm_cid_fixup_cpus_to_tasks(mm);
+ return;
+ }
+ /* Last user */
+ scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
+ /* Required across execve() */
+ if (t == current)
+ mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu));
+ /* Ignore mode change. There is nothing to do. */
+ sched_mm_cid_remove_user(t);
+ }
+ }
- preempt_disable();
- rq = this_rq();
- guard(rq_lock_irqsave)(rq);
- preempt_enable_no_resched(); /* holding spinlock */
- WRITE_ONCE(t->mm_cid_active, 0);
/*
- * Store t->mm_cid_active before loading per-mm/cpu cid.
- * Matches barrier in sched_mm_cid_remote_clear_old().
+ * As this is the last user (execve(), process exit or failed
+ * fork(2)) there is no concurrency anymore.
+ *
+ * Synchronize eventually pending work to ensure that there are no
+ * dangling references left. @t->mm_cid.users is zero so nothing
+ * can queue this work anymore.
*/
- smp_mb();
- mm_cid_put(mm);
- t->last_mm_cid = t->mm_cid = -1;
+ irq_work_sync(&mm->mm_cid.irq_work);
+ cancel_work_sync(&mm->mm_cid.work);
+}
+
+/* Deactivate MM CID allocation across execve() */
+void sched_mm_cid_before_execve(struct task_struct *t)
+{
+ sched_mm_cid_exit(t);
}
+/* Reactivate MM CID after successful execve() */
void sched_mm_cid_after_execve(struct task_struct *t)
{
- struct mm_struct *mm = t->mm;
- struct rq *rq;
+ sched_mm_cid_fork(t);
+}
+
+static void mm_cid_work_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
- if (!mm)
+ guard(mutex)(&mm->mm_cid.mutex);
+ /* Did the last user task exit already? */
+ if (!mm->mm_cid.users)
return;
- preempt_disable();
- rq = this_rq();
- scoped_guard (rq_lock_irqsave, rq) {
- preempt_enable_no_resched(); /* holding spinlock */
- WRITE_ONCE(t->mm_cid_active, 1);
- /*
- * Store t->mm_cid_active before loading per-mm/cpu cid.
- * Matches barrier in sched_mm_cid_remote_clear_old().
- */
- smp_mb();
- t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
+ scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
+ /* Have fork() or exit() handled it already? */
+ if (!mm->mm_cid.update_deferred)
+ return;
+ /* This clears mm_cid::update_deferred */
+ if (!mm_update_max_cids(mm))
+ return;
+ /* Affinity changes can only switch back to task mode */
+ if (WARN_ON_ONCE(mm->mm_cid.percpu))
+ return;
}
+ mm_cid_fixup_cpus_to_tasks(mm);
}
-void sched_mm_cid_fork(struct task_struct *t)
+static void mm_cid_irq_work(struct irq_work *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
+
+ /*
+ * Needs to be unconditional because mm_cid::lock cannot be held
+ * when scheduling work as mm_update_cpus_allowed() nests inside
+ * rq::lock and schedule_work() might end up in wakeup...
+ */
+ schedule_work(&mm->mm_cid.work);
+}
+
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
{
- WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
- t->mm_cid_active = 1;
+ mm->mm_cid.max_cids = 0;
+ mm->mm_cid.percpu = 0;
+ mm->mm_cid.transit = 0;
+ mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
+ mm->mm_cid.users = 0;
+ mm->mm_cid.pcpu_thrs = 0;
+ mm->mm_cid.update_deferred = 0;
+ raw_spin_lock_init(&mm->mm_cid.lock);
+ mutex_init(&mm->mm_cid.mutex);
+ mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
+ INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
+ cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
+ bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}
-#endif /* CONFIG_SCHED_MM_CID */
+#else /* CONFIG_SCHED_MM_CID */
+static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
+#endif /* !CONFIG_SCHED_MM_CID */
#ifdef CONFIG_SCHED_CLASS_EXT
void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 62fba83b7bb1..623445603725 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -199,7 +199,7 @@ static void ipi_rseq(void *info)
* is negligible.
*/
smp_mb();
- rseq_preempt(current);
+ rseq_sched_switch_event(current);
}
static void ipi_sync_rq_state(void *info)
@@ -407,9 +407,9 @@ static int membarrier_private_expedited(int flags, int cpu_id)
* membarrier, we will end up with some thread in the mm
* running without a core sync.
*
- * For RSEQ, don't rseq_preempt() the caller. User code
- * is not supposed to issue syscalls at all from inside an
- * rseq critical section.
+ * For RSEQ, don't invoke rseq_sched_switch_event() on the
+ * caller. User code is not supposed to issue syscalls at
+ * all from inside an rseq critical section.
*/
if (flags != MEMBARRIER_FLAG_SYNC_CORE) {
preempt_disable();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index adfb6e3409d7..f9d0515db130 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2209,6 +2209,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
smp_wmb();
WRITE_ONCE(task_thread_info(p)->cpu, cpu);
p->wake_cpu = cpu;
+ rseq_sched_set_ids_changed(p);
#endif /* CONFIG_SMP */
}
@@ -3540,283 +3541,212 @@ extern const char *preempt_modes[];
#ifdef CONFIG_SCHED_MM_CID
-#define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */
-#define MM_CID_SCAN_DELAY 100 /* 100ms */
+static __always_inline bool cid_on_cpu(unsigned int cid)
+{
+ return cid & MM_CID_ONCPU;
+}
-extern raw_spinlock_t cid_lock;
-extern int use_cid_lock;
+static __always_inline bool cid_in_transit(unsigned int cid)
+{
+ return cid & MM_CID_TRANSIT;
+}
-extern void sched_mm_cid_migrate_from(struct task_struct *t);
-extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
-extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
-extern void init_sched_mm_cid(struct task_struct *t);
+static __always_inline unsigned int cpu_cid_to_cid(unsigned int cid)
+{
+ return cid & ~MM_CID_ONCPU;
+}
-static inline void __mm_cid_put(struct mm_struct *mm, int cid)
+static __always_inline unsigned int cid_to_cpu_cid(unsigned int cid)
{
- if (cid < 0)
- return;
- cpumask_clear_cpu(cid, mm_cidmask(mm));
+ return cid | MM_CID_ONCPU;
}
-/*
- * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to
- * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
- * be held to transition to other states.
- *
- * State transitions synchronized with cmpxchg or try_cmpxchg need to be
- * consistent across CPUs, which prevents use of this_cpu_cmpxchg.
- */
-static inline void mm_cid_put_lazy(struct task_struct *t)
+static __always_inline unsigned int cid_to_transit_cid(unsigned int cid)
{
- struct mm_struct *mm = t->mm;
- struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid;
+ return cid | MM_CID_TRANSIT;
+}
- lockdep_assert_irqs_disabled();
- cid = __this_cpu_read(pcpu_cid->cid);
- if (!mm_cid_is_lazy_put(cid) ||
- !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
- return;
- __mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
+static __always_inline unsigned int cid_from_transit_cid(unsigned int cid)
+{
+ return cid & ~MM_CID_TRANSIT;
}
-static inline int mm_cid_pcpu_unset(struct mm_struct *mm)
+static __always_inline bool cid_on_task(unsigned int cid)
{
- struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid, res;
+ /* True if none of the MM_CID_ONCPU, MM_CID_TRANSIT, MM_CID_UNSET bits is set */
+ return cid < MM_CID_TRANSIT;
+}
- lockdep_assert_irqs_disabled();
- cid = __this_cpu_read(pcpu_cid->cid);
- for (;;) {
- if (mm_cid_is_unset(cid))
- return MM_CID_UNSET;
- /*
- * Attempt transition from valid or lazy-put to unset.
- */
- res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET);
- if (res == cid)
- break;
- cid = res;
- }
- return cid;
+static __always_inline void mm_drop_cid(struct mm_struct *mm, unsigned int cid)
+{
+ clear_bit(cid, mm_cidmask(mm));
}
-static inline void mm_cid_put(struct mm_struct *mm)
+static __always_inline void mm_unset_cid_on_task(struct task_struct *t)
{
- int cid;
+ unsigned int cid = t->mm_cid.cid;
- lockdep_assert_irqs_disabled();
- cid = mm_cid_pcpu_unset(mm);
- if (cid == MM_CID_UNSET)
- return;
- __mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
+ t->mm_cid.cid = MM_CID_UNSET;
+ if (cid_on_task(cid))
+ mm_drop_cid(t->mm, cid);
}
-static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm)
+static __always_inline void mm_drop_cid_on_cpu(struct mm_struct *mm, struct mm_cid_pcpu *pcp)
{
- struct cpumask *cidmask = mm_cidmask(mm);
- struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid, max_nr_cid, allowed_max_nr_cid;
+ /* Clear the ONCPU bit, but do not set UNSET in the per CPU storage */
+ pcp->cid = cpu_cid_to_cid(pcp->cid);
+ mm_drop_cid(mm, pcp->cid);
+}
- /*
- * After shrinking the number of threads or reducing the number
- * of allowed cpus, reduce the value of max_nr_cid so expansion
- * of cid allocation will preserve cache locality if the number
- * of threads or allowed cpus increase again.
- */
- max_nr_cid = atomic_read(&mm->max_nr_cid);
- while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed),
- atomic_read(&mm->mm_users))),
- max_nr_cid > allowed_max_nr_cid) {
- /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */
- if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) {
- max_nr_cid = allowed_max_nr_cid;
- break;
- }
- }
- /* Try to re-use recent cid. This improves cache locality. */
- cid = __this_cpu_read(pcpu_cid->recent_cid);
- if (!mm_cid_is_unset(cid) && cid < max_nr_cid &&
- !cpumask_test_and_set_cpu(cid, cidmask))
- return cid;
- /*
- * Expand cid allocation if the maximum number of concurrency
- * IDs allocated (max_nr_cid) is below the number cpus allowed
- * and number of threads. Expanding cid allocation as much as
- * possible improves cache locality.
- */
- cid = max_nr_cid;
- while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) {
- /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */
- if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1))
- continue;
- if (!cpumask_test_and_set_cpu(cid, cidmask))
- return cid;
- }
- /*
- * Find the first available concurrency id.
- * Retry finding first zero bit if the mask is temporarily
- * filled. This only happens during concurrent remote-clear
- * which owns a cid without holding a rq lock.
- */
- for (;;) {
- cid = cpumask_first_zero(cidmask);
- if (cid < READ_ONCE(mm->nr_cpus_allowed))
- break;
- cpu_relax();
- }
- if (cpumask_test_and_set_cpu(cid, cidmask))
- return -1;
+static inline unsigned int __mm_get_cid(struct mm_struct *mm, unsigned int max_cids)
+{
+ unsigned int cid = find_first_zero_bit(mm_cidmask(mm), max_cids);
+ if (cid >= max_cids)
+ return MM_CID_UNSET;
+ if (test_and_set_bit(cid, mm_cidmask(mm)))
+ return MM_CID_UNSET;
return cid;
}
-/*
- * Save a snapshot of the current runqueue time of this cpu
- * with the per-cpu cid value, allowing to estimate how recently it was used.
- */
-static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm)
+static inline unsigned int mm_get_cid(struct mm_struct *mm)
{
- struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq));
+ unsigned int cid = __mm_get_cid(mm, READ_ONCE(mm->mm_cid.max_cids));
- lockdep_assert_rq_held(rq);
- WRITE_ONCE(pcpu_cid->time, rq->clock);
+ while (cid == MM_CID_UNSET) {
+ cpu_relax();
+ cid = __mm_get_cid(mm, num_possible_cpus());
+ }
+ return cid;
}
-static inline int __mm_cid_get(struct rq *rq, struct task_struct *t,
- struct mm_struct *mm)
+static inline unsigned int mm_cid_converge(struct mm_struct *mm, unsigned int orig_cid,
+ unsigned int max_cids)
{
- int cid;
+ unsigned int new_cid, cid = cpu_cid_to_cid(orig_cid);
- /*
- * All allocations (even those using the cid_lock) are lock-free. If
- * use_cid_lock is set, hold the cid_lock to perform cid allocation to
- * guarantee forward progress.
- */
- if (!READ_ONCE(use_cid_lock)) {
- cid = __mm_cid_try_get(t, mm);
- if (cid >= 0)
- goto end;
- raw_spin_lock(&cid_lock);
- } else {
- raw_spin_lock(&cid_lock);
- cid = __mm_cid_try_get(t, mm);
- if (cid >= 0)
- goto unlock;
+ /* Is it in the optimal CID space? */
+ if (likely(cid < max_cids))
+ return orig_cid;
+
+ /* Try to find one in the optimal space. Otherwise keep the provided. */
+ new_cid = __mm_get_cid(mm, max_cids);
+ if (new_cid != MM_CID_UNSET) {
+ mm_drop_cid(mm, cid);
+ /* Preserve the ONCPU mode of the original CID */
+ return new_cid | (orig_cid & MM_CID_ONCPU);
}
+ return orig_cid;
+}
- /*
- * cid concurrently allocated. Retry while forcing following
- * allocations to use the cid_lock to ensure forward progress.
- */
- WRITE_ONCE(use_cid_lock, 1);
- /*
- * Set use_cid_lock before allocation. Only care about program order
- * because this is only required for forward progress.
- */
- barrier();
- /*
- * Retry until it succeeds. It is guaranteed to eventually succeed once
- * all newcoming allocations observe the use_cid_lock flag set.
- */
- do {
- cid = __mm_cid_try_get(t, mm);
- cpu_relax();
- } while (cid < 0);
- /*
- * Allocate before clearing use_cid_lock. Only care about
- * program order because this is for forward progress.
- */
- barrier();
- WRITE_ONCE(use_cid_lock, 0);
-unlock:
- raw_spin_unlock(&cid_lock);
-end:
- mm_cid_snapshot_time(rq, mm);
+static __always_inline void mm_cid_update_task_cid(struct task_struct *t, unsigned int cid)
+{
+ if (t->mm_cid.cid != cid) {
+ t->mm_cid.cid = cid;
+ rseq_sched_set_ids_changed(t);
+ }
+}
- return cid;
+static __always_inline void mm_cid_update_pcpu_cid(struct mm_struct *mm, unsigned int cid)
+{
+ __this_cpu_write(mm->mm_cid.pcpu->cid, cid);
}
-static inline int mm_cid_get(struct rq *rq, struct task_struct *t,
- struct mm_struct *mm)
+static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid)
{
- struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- int cid;
+ unsigned int max_cids, tcid = t->mm_cid.cid;
+ struct mm_struct *mm = t->mm;
- lockdep_assert_rq_held(rq);
- cid = __this_cpu_read(pcpu_cid->cid);
- if (mm_cid_is_valid(cid)) {
- mm_cid_snapshot_time(rq, mm);
- return cid;
- }
- if (mm_cid_is_lazy_put(cid)) {
- if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET))
- __mm_cid_put(mm, mm_cid_clear_lazy_put(cid));
+ max_cids = READ_ONCE(mm->mm_cid.max_cids);
+ /* Optimize for the common case where both have the ONCPU bit set */
+ if (likely(cid_on_cpu(cpu_cid & tcid))) {
+ if (likely(cpu_cid_to_cid(cpu_cid) < max_cids)) {
+ mm_cid_update_task_cid(t, cpu_cid);
+ return;
+ }
+ /* Try to converge into the optimal CID space */
+ cpu_cid = mm_cid_converge(mm, cpu_cid, max_cids);
+ } else {
+ /* Hand over or drop the task owned CID */
+ if (cid_on_task(tcid)) {
+ if (cid_on_cpu(cpu_cid))
+ mm_unset_cid_on_task(t);
+ else
+ cpu_cid = cid_to_cpu_cid(tcid);
+ }
+ /* Still nothing, allocate a new one */
+ if (!cid_on_cpu(cpu_cid))
+ cpu_cid = cid_to_cpu_cid(mm_get_cid(mm));
}
- cid = __mm_cid_get(rq, t, mm);
- __this_cpu_write(pcpu_cid->cid, cid);
- __this_cpu_write(pcpu_cid->recent_cid, cid);
-
- return cid;
+ mm_cid_update_pcpu_cid(mm, cpu_cid);
+ mm_cid_update_task_cid(t, cpu_cid);
}
-static inline void switch_mm_cid(struct rq *rq,
- struct task_struct *prev,
- struct task_struct *next)
+static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid)
{
- /*
- * Provide a memory barrier between rq->curr store and load of
- * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition.
- *
- * Should be adapted if context_switch() is modified.
- */
- if (!next->mm) { // to kernel
- /*
- * user -> kernel transition does not guarantee a barrier, but
- * we can use the fact that it performs an atomic operation in
- * mmgrab().
- */
- if (prev->mm) // from user
- smp_mb__after_mmgrab();
- /*
- * kernel -> kernel transition does not change rq->curr->mm
- * state. It stays NULL.
- */
- } else { // to user
- /*
- * kernel -> user transition does not provide a barrier
- * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
- * Provide it here.
- */
- if (!prev->mm) { // from kernel
- smp_mb();
- } else { // from user
- /*
- * user->user transition relies on an implicit
- * memory barrier in switch_mm() when
- * current->mm changes. If the architecture
- * switch_mm() does not have an implicit memory
- * barrier, it is emitted here. If current->mm
- * is unchanged, no barrier is needed.
- */
- smp_mb__after_switch_mm();
+ unsigned int max_cids, tcid = t->mm_cid.cid;
+ struct mm_struct *mm = t->mm;
+
+ max_cids = READ_ONCE(mm->mm_cid.max_cids);
+ /* Optimize for the common case, where both have the ONCPU bit clear */
+ if (likely(cid_on_task(tcid | cpu_cid))) {
+ if (likely(tcid < max_cids)) {
+ mm_cid_update_pcpu_cid(mm, tcid);
+ return;
}
+ /* Try to converge into the optimal CID space */
+ tcid = mm_cid_converge(mm, tcid, max_cids);
+ } else {
+ /* Hand over or drop the CPU owned CID */
+ if (cid_on_cpu(cpu_cid)) {
+ if (cid_on_task(tcid))
+ mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu));
+ else
+ tcid = cpu_cid_to_cid(cpu_cid);
+ }
+ /* Still nothing, allocate a new one */
+ if (!cid_on_task(tcid))
+ tcid = mm_get_cid(mm);
+ /* Set the transition mode flag if required */
+ tcid |= READ_ONCE(mm->mm_cid.transit);
}
- if (prev->mm_cid_active) {
- mm_cid_snapshot_time(rq, prev->mm);
- mm_cid_put_lazy(prev);
- prev->mm_cid = -1;
- }
- if (next->mm_cid_active)
- next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next, next->mm);
+ mm_cid_update_pcpu_cid(mm, tcid);
+ mm_cid_update_task_cid(t, tcid);
+}
+
+static __always_inline void mm_cid_schedin(struct task_struct *next)
+{
+ struct mm_struct *mm = next->mm;
+ unsigned int cpu_cid;
+
+ if (!next->mm_cid.active)
+ return;
+
+ cpu_cid = __this_cpu_read(mm->mm_cid.pcpu->cid);
+ if (likely(!READ_ONCE(mm->mm_cid.percpu)))
+ mm_cid_from_task(next, cpu_cid);
+ else
+ mm_cid_from_cpu(next, cpu_cid);
+}
+
+static __always_inline void mm_cid_schedout(struct task_struct *prev)
+{
+ /* During mode transitions CIDs are temporary and need to be dropped */
+ if (likely(!cid_in_transit(prev->mm_cid.cid)))
+ return;
+
+ mm_drop_cid(prev->mm, cid_from_transit_cid(prev->mm_cid.cid));
+ prev->mm_cid.cid = MM_CID_UNSET;
+}
+
+static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next)
+{
+ mm_cid_schedout(prev);
+ mm_cid_schedin(next);
}
#else /* !CONFIG_SCHED_MM_CID: */
-static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { }
-static inline void sched_mm_cid_migrate_from(struct task_struct *t) { }
-static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { }
-static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
-static inline void init_sched_mm_cid(struct task_struct *t) { }
+static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next) { }
#endif /* !CONFIG_SCHED_MM_CID */
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
diff --git a/kernel/signal.c b/kernel/signal.c
index fe9190d84f28..e42b8bd6922f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3125,7 +3125,6 @@ void exit_signals(struct task_struct *tsk)
cgroup_threadgroup_change_begin(tsk);
if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
- sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
return;
@@ -3136,7 +3135,6 @@ void exit_signals(struct task_struct *tsk)
* From now this task is not visible for group-wide signals,
* see wants_signal(), do_signal_stop().
*/
- sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b97692854966..9dc526507875 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -355,6 +355,12 @@ unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_andnot);
+unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(({dst[idx] = bitmap1[idx] | bitmap2[idx]; dst[idx]; }), bits);
+}
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b7a0ae2a7b20..4255fcf9c6e5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
+#include <linux/rseq.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -4476,6 +4477,12 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu);
vcpu->wants_to_run = false;
+ /*
+ * FIXME: Remove this hack once all KVM architectures
+ * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
+ */
+ rseq_virt_userspace_exit();
+
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
Linus,
please pull the latest core/debugobjects branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core-debugobjects-2025-11-30
up to: 37de2dbc318e: debugobjects: Use LD_WAIT_CONFIG instead of LD_WAIT_SLEEP
Two small updates for debugobjects:
- Allow pool refill on RT enabled kernels before the scheduler is up
and running to prevent pool exhaustion
- Correct the lockdep override to prevent false positives.
Thanks,
tglx
------------------>
Sebastian Andrzej Siewior (2):
debugobjects: Allow to refill the pool before SYSTEM_SCHEDULING
debugobjects: Use LD_WAIT_CONFIG instead of LD_WAIT_SLEEP
lib/debugobjects.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 7f50c4480a4e..ecf8e7f978e3 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -714,13 +714,13 @@ static void debug_objects_fill_pool(void)
* raw_spinlock_t are basically the same type and this lock-type
* inversion works just fine.
*/
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible() || system_state < SYSTEM_SCHEDULING) {
/*
* Annotate away the spinlock_t inside raw_spinlock_t warning
- * by temporarily raising the wait-type to WAIT_SLEEP, matching
+ * by temporarily raising the wait-type to LD_WAIT_CONFIG, matching
* the preemptible() condition above.
*/
- static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
+ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_CONFIG);
lock_map_acquire_try(&fill_pool_map);
fill_pool();
lock_map_release(&fill_pool_map);
Linus,
please pull the latest irq/msi branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-msi-2025-11-30
up to: 9c1fbc56ca0a: irqchip/gic-its: Rework platform MSI deviceID detection
Updates for [PCI] MSI related code:
- Remove one variant of PCI/MSI management as all users have been
converted to use per device domains. That reduces the variants to two:
The modern and the real archaic legacy variant, which keeps the usual
suspects in the museum category alive.
- Rework the platform MSI device ID detection mechanism in the ARM GIC
world to address resource leaks, duplicated code and other details. This
requires a corresponding preparatory step in the PCI/iproc driver.
- Trivial core code cleanups
Thanks,
tglx
------------------>
Christophe JAILLET (1):
genirq/msi: Slightly simplify msi_domain_alloc()
Lorenzo Pieralisi (2):
PCI: iproc: Implement MSI controller node detection with of_msi_xlate()
irqchip/gic-its: Rework platform MSI deviceID detection
Nam Cao (1):
PCI/MSI: Delete pci_msi_create_irq_domain()
drivers/irqchip/irq-gic-its-msi-parent.c | 91 ++++++++------------------------
drivers/pci/controller/pcie-iproc.c | 22 ++------
drivers/pci/msi/irqdomain.c | 90 -------------------------------
include/linux/msi.h | 3 --
kernel/irq/msi.c | 2 +-
5 files changed, 29 insertions(+), 179 deletions(-)
diff --git a/drivers/irqchip/irq-gic-its-msi-parent.c b/drivers/irqchip/irq-gic-its-msi-parent.c
index eb1473f1448a..12f45228c867 100644
--- a/drivers/irqchip/irq-gic-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-its-msi-parent.c
@@ -142,83 +142,38 @@ static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
#define its_v5_pci_msi_prepare NULL
#endif /* !CONFIG_PCI_MSI */
-static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
- u32 *dev_id)
+static int of_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev, u32 *dev_id,
+ phys_addr_t *pa)
{
- int ret, index = 0;
+ struct of_phandle_iterator it;
+ int ret;
/* Suck the DeviceID out of the msi-parent property */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (args.np == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
- break;
- }
- index++;
- } while (!ret);
-
- if (ret) {
- struct device_node *np = NULL;
+ of_for_each_phandle(&it, ret, dev->of_node, "msi-parent", "#msi-cells", -1) {
+ /* GICv5 ITS domain matches the MSI controller node parent */
+ struct device_node *np __free(device_node) = pa ? of_get_parent(it.node)
+ : of_node_get(it.node);
- ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
- if (np)
- of_node_put(np);
- }
+ if (np == irq_domain_get_of_node(domain)) {
+ u32 args;
- return ret;
-}
+ if (WARN_ON(of_phandle_iterator_args(&it, &args, 1) != 1))
+ ret = -EINVAL;
-static int of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev,
- u32 *dev_id, phys_addr_t *pa)
-{
- int ret, index = 0;
- /*
- * Retrieve the DeviceID and the ITS translate frame node pointer
- * out of the msi-parent property.
- */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (ret)
- break;
- /*
- * The IRQ domain fwnode is the msi controller parent
- * in GICv5 (where the msi controller nodes are the
- * ITS translate frames).
- */
- if (args.np->parent == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
-
- ret = its_translate_frame_address(args.np, pa);
- if (ret)
- return -ENODEV;
- break;
- }
- index++;
- } while (!ret);
+ if (!ret && pa)
+ ret = its_translate_frame_address(it.node, pa);
- if (ret) {
- struct device_node *np = NULL;
+ if (!ret)
+ *dev_id = args;
- ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
- if (np) {
- ret = its_translate_frame_address(np, pa);
- of_node_put(np);
+ of_node_put(it.node);
+ return ret;
}
}
- return ret;
+ struct device_node *msi_ctrl __free(device_node) = NULL;
+
+ return of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &msi_ctrl, dev_id);
}
int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
@@ -234,7 +189,7 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
int ret;
if (dev->of_node)
- ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
+ ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, NULL);
else
ret = iort_pmsi_get_dev_id(dev, &dev_id);
if (ret)
@@ -262,7 +217,7 @@ static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev,
if (!dev->of_node)
return -ENODEV;
- ret = of_v5_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
+ ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
if (ret)
return ret;
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 22134e95574b..ccf71993ea35 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -17,6 +17,7 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
@@ -1337,29 +1338,16 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
- struct device_node *msi_node;
+ struct device_node *msi_node = NULL;
int ret;
/*
* Either the "msi-parent" or the "msi-map" phandle needs to exist
* for us to obtain the MSI node.
*/
-
- msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node) {
- const __be32 *msi_map = NULL;
- int len;
- u32 phandle;
-
- msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
- if (!msi_map)
- return -ENODEV;
-
- phandle = be32_to_cpup(msi_map + 1);
- msi_node = of_find_node_by_phandle(phandle);
- if (!msi_node)
- return -ENODEV;
- }
+ of_msi_xlate(pcie->dev, &msi_node, 0);
+ if (!msi_node)
+ return -ENODEV;
/*
* Certain revisions of the iProc PCIe controller require additional
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index ce741ed9dc3f..a329060287b5 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -49,96 +49,6 @@ static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *
__pci_write_msi_msg(desc, msg);
}
-/**
- * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
- * @desc: Pointer to the MSI descriptor
- *
- * The ID number is only used within the irqdomain.
- */
-static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return (irq_hw_number_t)desc->msi_index |
- pci_dev_id(dev) << 11 |
- ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
-}
-
-static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
- struct msi_desc *desc)
-{
- arg->desc = desc;
- arg->hwirq = pci_msi_domain_calc_hwirq(desc);
-}
-
-static struct msi_domain_ops pci_msi_domain_ops_default = {
- .set_desc = pci_msi_domain_set_desc,
-};
-
-static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- if (ops == NULL) {
- info->ops = &pci_msi_domain_ops_default;
- } else {
- if (ops->set_desc == NULL)
- ops->set_desc = pci_msi_domain_set_desc;
- }
-}
-
-static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = pci_msi_domain_write_msg;
- if (!chip->irq_mask)
- chip->irq_mask = pci_msi_mask_irq;
- if (!chip->irq_unmask)
- chip->irq_unmask = pci_msi_unmask_irq;
-}
-
-/**
- * pci_msi_create_irq_domain - Create a MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a MSI interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- pci_msi_domain_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- pci_msi_domain_update_chip_ops(info);
-
- /* Let the core code free MSI descriptors when freeing interrupts */
- info->flags |= MSI_FLAG_FREE_MSI_DESCS;
-
- info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS;
- if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
- info->flags |= MSI_FLAG_MUST_REACTIVATE;
-
- /* PCI-MSI is oneshot-safe */
- info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
- /* Let the core update the bus token */
- info->bus_token = DOMAIN_BUS_PCI_MSI;
-
- return msi_create_irq_domain(fwnode, info, parent);
-}
-EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
-
/*
* Per device MSI[-X] domain functionality
*/
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d415dd15a0a9..8003e3218c46 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -701,9 +701,6 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index e7ad99254841..68886881fe10 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -706,7 +706,7 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
int i, ret;
- if (irq_find_mapping(domain, hwirq) > 0)
+ if (irq_resolve_mapping(domain, hwirq))
return -EEXIST;
if (domain->parent) {
Linus,
please pull the latest irq/drivers branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-drivers-2025-11-30
up to: fc584d871c16: irqchip/meson-gpio: Add support for Amlogic S6 S7 and S7D SoCs
Boring updates for interrupt drivers:
- Support for a couple of new ARM64 and RISCV SoC variants and their
magic interrupt controllers which either can reuse existing code or
require quirks due to a botched hardware implementation.
- More section mismatch fixes.
- The usual cleanups and fixes all over the place.
Thanks,
tglx
------------------>
Charles Mirabile (4):
dt-bindings: interrupt-controller: Add UltraRISC DP1000 PLIC
irqchip/sifive-plic: Cache the interrupt enable state
irqchip/sifive-plic: Add support for UltraRISC DP1000 PLIC
irqchip/sifive-plic: Fix call to __plic_toggle() in M-Mode code path
Johan Hovold (16):
irqchip/bcm2712-mip: Fix OF node reference imbalance
irqchip/bcm2712-mip: Fix section mismatch
irqchip/irq-bcm7038-l1: Fix section mismatch
irqchip/irq-bcm7120-l2: Fix section mismatch
irqchip/irq-brcmstb-l2: Fix section mismatch
irqchip/imx-mu-msi: Fix section mismatch
irqchip/renesas-rzg2l: Fix section mismatch
irqchip/starfive-jh8100: Fix section mismatch
irqchip/qcom-irq-combiner: Fix section mismatch
irqchip: Drop leftover brackets
irqchip: Pass platform device to platform drivers
irqchip: Enable compile testing of Broadcom drivers
irqchip/meson-gpio: Drop unused module alias
irqchip/mvebu-pic: Drop unused module alias
irqchip/ts4800: Drop unused module alias
irqchip/qcom-irq-combiner: Rename driver structure
Junhui Liu (4):
dt-bindings: interrupt-controller: Add Anlogic DR1V90 PLIC
dt-bindings: interrupt-controller: Add Anlogic DR1V90 ACLINT MSWI
dt-bindings: interrupt-controller: Add Anlogic DR1V90 ACLINT SSWI
irqchip/aclint-sswi: Add Nuclei UX900 support
Krzysztof Kozlowski (1):
irqchip/irq-bcm7038-l1: Remove unused reg_mask_status()
Lucas Zampieri (1):
dt-bindings: vendor-prefixes: Add UltraRISC
Ryan Chen (1):
dt-bindings: interrupt-controller: aspeed,ast2700: Correct #interrupt-cells and interrupts count
Samuel Holland (3):
irqchip/riscv-imsic: Remove redundant irq_data lookups
irqchip/riscv-imsic: Embed the vector array in lpriv
irqchip/riscv-imsic: Inline imsic_vector_from_local_id()
Xianwei Zhao (2):
dt-bindings: interrupt-controller: Add support for Amlogic S6 S7 and S7D SoCs
irqchip/meson-gpio: Add support for Amlogic S6 S7 and S7D SoCs
.../amlogic,meson-gpio-intc.yaml | 3 +
.../interrupt-controller/aspeed,ast2700-intc.yaml | 13 +-
.../interrupt-controller/sifive,plic-1.0.0.yaml | 4 +
.../thead,c900-aclint-mswi.yaml | 17 ++-
.../thead,c900-aclint-sswi.yaml | 4 +
.../devicetree/bindings/vendor-prefixes.yaml | 2 +
drivers/irqchip/Kconfig | 6 +-
drivers/irqchip/irq-aclint-sswi.c | 3 +-
drivers/irqchip/irq-bcm2712-mip.c | 11 +-
drivers/irqchip/irq-bcm7038-l1.c | 17 +--
drivers/irqchip/irq-bcm7120-l2.c | 31 ++---
drivers/irqchip/irq-brcmstb-l2.c | 25 ++--
drivers/irqchip/irq-imx-mu-msi.c | 28 ++--
drivers/irqchip/irq-mchp-eic.c | 5 +-
drivers/irqchip/irq-meson-gpio.c | 17 ++-
drivers/irqchip/irq-mvebu-pic.c | 2 -
drivers/irqchip/irq-qcom-mpm.c | 6 +-
drivers/irqchip/irq-renesas-rzg2l.c | 37 ++---
drivers/irqchip/irq-renesas-rzv2h.c | 32 ++---
drivers/irqchip/irq-riscv-imsic-early.c | 11 +-
drivers/irqchip/irq-riscv-imsic-platform.c | 4 +-
drivers/irqchip/irq-riscv-imsic-state.c | 20 +--
drivers/irqchip/irq-riscv-imsic-state.h | 4 +-
drivers/irqchip/irq-sifive-plic.c | 149 +++++++++++++++++----
drivers/irqchip/irq-starfive-jh8100-intc.c | 6 +-
drivers/irqchip/irq-ts4800.c | 1 -
drivers/irqchip/irqchip.c | 10 +-
drivers/irqchip/qcom-irq-combiner.c | 6 +-
drivers/irqchip/qcom-pdc.c | 5 +-
include/linux/irqchip.h | 8 +-
30 files changed, 264 insertions(+), 223 deletions(-)
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
index 3d60d9e9e208..d0fad930de9d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
@@ -39,6 +39,9 @@ properties:
- amlogic,a4-gpio-ao-intc
- amlogic,a5-gpio-intc
- amlogic,c3-gpio-intc
+ - amlogic,s6-gpio-intc
+ - amlogic,s7-gpio-intc
+ - amlogic,s7d-gpio-intc
- amlogic,t7-gpio-intc
- const: amlogic,meson-gpio-intc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2700-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2700-intc.yaml
index 55636d06a674..999df5b905c5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2700-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2700-intc.yaml
@@ -25,13 +25,14 @@ properties:
interrupt-controller: true
'#interrupt-cells':
- const: 2
+ const: 1
description:
The first cell is the IRQ number, the second cell is the trigger
type as defined in interrupt.txt in this directory.
interrupts:
- maxItems: 6
+ minItems: 1
+ maxItems: 10
description: |
Depend to which INTC0 or INTC1 used.
INTC0 and INTC1 are two kinds of interrupt controller with enable and raw
@@ -74,13 +75,17 @@ examples:
interrupt-controller@12101b00 {
compatible = "aspeed,ast2700-intc-ic";
reg = <0 0x12101b00 0 0x10>;
- #interrupt-cells = <2>;
+ #interrupt-cells = <1>;
interrupt-controller;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index f683d696909b..6fdb7ae9e85a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -58,6 +58,7 @@ properties:
- const: andestech,nceplic100
- items:
- enum:
+ - anlogic,dr1v90-plic
- canaan,k210-plic
- eswin,eic7700-plic
- sifive,fu540-c000-plic
@@ -75,6 +76,9 @@ properties:
- sophgo,sg2044-plic
- thead,th1520-plic
- const: thead,c900-plic
+ - items:
+ - const: ultrarisc,dp1000-plic
+ - const: ultrarisc,cp100-plic
- items:
- const: sifive,plic-1.0.0
- const: riscv,plic0
diff --git a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
index d6fb08a54167..62fd220e126e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-mswi.yaml
@@ -4,18 +4,23 @@
$id: http://devicetree.org/schemas/interrupt-controller/thead,c900-aclint-mswi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Sophgo sg2042 CLINT Machine-level Software Interrupt Device
+title: ACLINT Machine-level Software Interrupt Device
maintainers:
- Inochi Amaoto <inochiama@outlook.com>
properties:
compatible:
- items:
- - enum:
- - sophgo,sg2042-aclint-mswi
- - sophgo,sg2044-aclint-mswi
- - const: thead,c900-aclint-mswi
+ oneOf:
+ - items:
+ - enum:
+ - sophgo,sg2042-aclint-mswi
+ - sophgo,sg2044-aclint-mswi
+ - const: thead,c900-aclint-mswi
+ - items:
+ - enum:
+ - anlogic,dr1v90-aclint-mswi
+ - const: nuclei,ux900-aclint-mswi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml
index c1ab865fcd64..d02c6886283a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml
@@ -30,6 +30,10 @@ properties:
- const: thead,c900-aclint-sswi
- items:
- const: mips,p8700-aclint-sswi
+ - items:
+ - enum:
+ - anlogic,dr1v90-aclint-sswi
+ - const: nuclei,ux900-aclint-sswi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index f1d1882009ba..647746e6f75f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1705,6 +1705,8 @@ patternProperties:
description: Universal Scientific Industrial Co., Ltd.
"^usr,.*":
description: U.S. Robotics Corporation
+ "^ultrarisc,.*":
+ description: UltraRISC Technology Co., Ltd.
"^ultratronik,.*":
description: Ultratronik GmbH
"^utoo,.*":
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a61c6dc63c29..9b7153777688 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -151,7 +151,7 @@ config BCM6345_L1_IRQ
config BCM7038_L1_IRQ
tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -159,14 +159,14 @@ config BCM7038_L1_IRQ
config BCM7120_L2_IRQ
tristate "Broadcom STB 7120-style L2 interrupt controller driver"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config BRCMSTB_L2_IRQ
tristate "Broadcom STB generic L2 interrupt controller driver"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
diff --git a/drivers/irqchip/irq-aclint-sswi.c b/drivers/irqchip/irq-aclint-sswi.c
index 93e28e9f281f..fee30f3bc5ac 100644
--- a/drivers/irqchip/irq-aclint-sswi.c
+++ b/drivers/irqchip/irq-aclint-sswi.c
@@ -175,7 +175,8 @@ static int __init generic_aclint_sswi_early_probe(struct device_node *node,
{
return generic_aclint_sswi_probe(&node->fwnode);
}
-IRQCHIP_DECLARE(generic_aclint_sswi, "mips,p8700-aclint-sswi", generic_aclint_sswi_early_probe);
+IRQCHIP_DECLARE(mips_p8700_sswi, "mips,p8700-aclint-sswi", generic_aclint_sswi_early_probe);
+IRQCHIP_DECLARE(nuclei_ux900_sswi, "nuclei,ux900-aclint-sswi", generic_aclint_sswi_early_probe);
/* THEAD variant */
#define THEAD_C9XX_CSR_SXSTATUS 0x5c0
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 9bd7bc0bf6d5..4761974ad650 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -232,17 +232,12 @@ static int mip_parse_dt(struct mip_priv *mip, struct device_node *np)
return ret;
}
-static int __init mip_of_msi_init(struct device_node *node, struct device_node *parent)
+static int mip_msi_probe(struct platform_device *pdev, struct device_node *parent)
{
- struct platform_device *pdev;
+ struct device_node *node = pdev->dev.of_node;
struct mip_priv *mip;
int ret;
- pdev = of_find_device_by_node(node);
- of_node_put(node);
- if (!pdev)
- return -EPROBE_DEFER;
-
mip = kzalloc(sizeof(*mip), GFP_KERNEL);
if (!mip)
return -ENOMEM;
@@ -285,7 +280,7 @@ static int __init mip_of_msi_init(struct device_node *node, struct device_node *
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(mip_msi)
-IRQCHIP_MATCH("brcm,bcm2712-mip", mip_of_msi_init)
+IRQCHIP_MATCH("brcm,bcm2712-mip", mip_msi_probe)
IRQCHIP_PLATFORM_DRIVER_END(mip_msi)
MODULE_DESCRIPTION("Broadcom BCM2712 MSI-X interrupt controller");
MODULE_AUTHOR("Phil Elwell <phil@raspberrypi.com>");
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 04fac0cc857f..ea1446c0a09c 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -82,12 +82,6 @@ static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
return (0 * intc->n_words + word) * sizeof(u32);
}
-static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
- unsigned int word)
-{
- return (1 * intc->n_words + word) * sizeof(u32);
-}
-
static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
unsigned int word)
{
@@ -219,9 +213,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
}
#endif
-static int __init bcm7038_l1_init_one(struct device_node *dn,
- unsigned int idx,
- struct bcm7038_l1_chip *intc)
+static int bcm7038_l1_init_one(struct device_node *dn, unsigned int idx,
+ struct bcm7038_l1_chip *intc)
{
struct resource res;
resource_size_t sz;
@@ -395,9 +388,9 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = {
.map = bcm7038_l1_map,
};
-static int __init bcm7038_l1_of_init(struct device_node *dn,
- struct device_node *parent)
+static int bcm7038_l1_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *dn = pdev->dev.of_node;
struct bcm7038_l1_chip *intc;
int idx, ret;
@@ -455,7 +448,7 @@ static int __init bcm7038_l1_of_init(struct device_node *dn,
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
-IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_probe)
IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index ff22c3104401..518c9d4366a5 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -143,8 +143,7 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
- struct bcm7120_l2_intc_data *data)
+static int bcm7120_l2_intc_iomap_7120(struct device_node *dn, struct bcm7120_l2_intc_data *data)
{
int ret;
@@ -177,8 +176,7 @@ static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
- struct bcm7120_l2_intc_data *data)
+static int bcm7120_l2_intc_iomap_3380(struct device_node *dn, struct bcm7120_l2_intc_data *data)
{
unsigned int gc_idx;
@@ -208,15 +206,14 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_probe(struct device_node *dn,
- struct device_node *parent,
+static int bcm7120_l2_intc_probe(struct platform_device *pdev, struct device_node *parent,
int (*iomap_regs_fn)(struct device_node *,
- struct bcm7120_l2_intc_data *),
+ struct bcm7120_l2_intc_data *),
const char *intc_name)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct device_node *dn = pdev->dev.of_node;
struct bcm7120_l2_intc_data *data;
- struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret = 0;
@@ -227,14 +224,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (!data)
return -ENOMEM;
- pdev = of_find_device_by_node(dn);
- if (!pdev) {
- ret = -ENODEV;
- goto out_free_data;
- }
-
data->num_parent_irqs = platform_irq_count(pdev);
- put_device(&pdev->dev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
@@ -334,22 +324,19 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (data->map_base[idx])
iounmap(data->map_base[idx]);
}
-out_free_data:
kfree(data);
return ret;
}
-static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
- struct device_node *parent)
+static int bcm7120_l2_intc_probe_7120(struct platform_device *pdev, struct device_node *parent)
{
- return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+ return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_7120,
"BCM7120 L2");
}
-static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
- struct device_node *parent)
+static int bcm7120_l2_intc_probe_3380(struct platform_device *pdev, struct device_node *parent)
{
- return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+ return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_3380,
"BCM3380 L2");
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 1bec5b2cd3f0..bb7078d6524f 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -138,13 +138,12 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
}
-static int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent,
- const struct brcmstb_intc_init_params
- *init_params)
+static int brcmstb_l2_intc_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct brcmstb_intc_init_params *init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
unsigned int set = 0;
+ struct device_node *np = pdev->dev.of_node;
struct brcmstb_l2_intc_data *data;
struct irq_chip_type *ct;
int ret;
@@ -257,23 +256,21 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
return ret;
}
-static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int brcmstb_l2_edge_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+ return brcmstb_l2_intc_probe(pdev, parent, &l2_edge_intc_init);
}
-static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int brcmstb_l2_lvl_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+ return brcmstb_l2_intc_probe(pdev, parent, &l2_lvl_intc_init);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
-IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_probe)
IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index d2a4e8a61a42..c598f2f52fc6 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -296,11 +296,9 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
},
};
-static int __init imx_mu_of_init(struct device_node *dn,
- struct device_node *parent,
- const struct imx_mu_dcfg *cfg)
+static int imx_mu_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct imx_mu_dcfg *cfg)
{
- struct platform_device *pdev = of_find_device_by_node(dn);
struct device_link *pd_link_a;
struct device_link *pd_link_b;
struct imx_mu_msi *msi_data;
@@ -416,31 +414,27 @@ static const struct dev_pm_ops imx_mu_pm_ops = {
imx_mu_runtime_resume, NULL)
};
-static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx7ulp_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx7ulp);
}
-static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx6sx_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx6sx);
}
-static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx8ulp_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx8ulp);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
-IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
-IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
-IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
+IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_probe)
+IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_probe)
+IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_probe)
IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
-
MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
MODULE_DESCRIPTION("Freescale MU MSI controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 516a3a0e359c..b513a899c085 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -199,8 +199,9 @@ static const struct irq_domain_ops mchp_eic_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+static int mchp_eic_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *node = pdev->dev.of_node;
struct irq_domain *parent_domain = NULL;
int ret, i;
@@ -273,7 +274,7 @@ static int mchp_eic_init(struct device_node *node, struct device_node *parent)
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
-IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_probe)
IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
MODULE_DESCRIPTION("Microchip External Interrupt Controller");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 7d177626d64b..3fcbb044ae60 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -174,6 +174,14 @@ static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
+static const struct meson_gpio_irq_params s6_params = {
+ INIT_MESON_S4_COMMON_DATA(100)
+};
+
+static const struct meson_gpio_irq_params s7_params = {
+ INIT_MESON_S4_COMMON_DATA(84)
+};
+
static const struct meson_gpio_irq_params c3_params = {
INIT_MESON_S4_COMMON_DATA(55)
};
@@ -195,6 +203,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
{ .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
{ .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
+ { .compatible = "amlogic,s6-gpio-intc", .data = &s6_params },
+ { .compatible = "amlogic,s7-gpio-intc", .data = &s7_params },
+ { .compatible = "amlogic,s7d-gpio-intc", .data = &s7_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@@ -572,8 +583,9 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i
return 0;
}
-static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
+static int meson_gpio_irq_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *node = pdev->dev.of_node;
struct irq_domain *domain, *parent_domain;
struct meson_gpio_irq_controller *ctl;
int ret;
@@ -630,10 +642,9 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
-IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_probe)
IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index cd8b73482b9f..10b85128183a 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -195,5 +195,3 @@ MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mvebu_pic");
-
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 8d569f7c5a7a..83f31ea657b7 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -320,9 +320,9 @@ static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
return false;
}
-static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
+static int qcom_mpm_probe(struct platform_device *pdev, struct device_node *parent)
{
- struct platform_device *pdev = of_find_device_by_node(np);
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct irq_domain *parent_domain;
struct generic_pm_domain *genpd;
@@ -478,7 +478,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
-IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
+IRQCHIP_MATCH("qcom,mpm", qcom_mpm_probe)
IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 2a54adeb4cc7..1bf19deb02c4 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -8,7 +8,6 @@
*/
#include <linux/bitfield.h>
-#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -528,18 +527,15 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
return 0;
}
-static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
- const struct irq_chip *irq_chip)
+static int rzg2l_irqc_common_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct irq_chip *irq_chip)
{
- struct platform_device *pdev = of_find_device_by_node(node);
- struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
struct irq_domain *irq_domain, *parent_domain;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct reset_control *resetn;
int ret;
- if (!pdev)
- return -ENODEV;
-
parent_domain = irq_find_host(parent);
if (!parent_domain)
return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
@@ -583,35 +579,22 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
register_syscore_ops(&rzg2l_irqc_syscore_ops);
- /*
- * Prevent the cleanup function from invoking put_device by assigning
- * NULL to dev.
- *
- * make coccicheck will complain about missing put_device calls, but
- * those are false positives, as dev will be automatically "put" via
- * __free_put_device on the failing path.
- * On the successful path we don't actually want to "put" dev.
- */
- dev = NULL;
-
return 0;
}
-static int __init rzg2l_irqc_init(struct device_node *node,
- struct device_node *parent)
+static int rzg2l_irqc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
+ return rzg2l_irqc_common_probe(pdev, parent, &rzg2l_irqc_chip);
}
-static int __init rzfive_irqc_init(struct device_node *node,
- struct device_node *parent)
+static int rzfive_irqc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
+ return rzg2l_irqc_common_probe(pdev, parent, &rzfive_irqc_chip);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
-IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
-IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
+IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_probe)
+IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_probe)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 9018d9c3911e..899a423b5da8 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -490,29 +490,15 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static void rzv2h_icu_put_device(void *data)
-{
- put_device(data);
-}
-
-static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
- const struct rzv2h_hw_info *hw_info)
+static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
+ struct device_node *node = pdev->dev.of_node;
struct rzv2h_icu_priv *rzv2h_icu_data;
- struct platform_device *pdev;
struct reset_control *resetn;
int ret;
- pdev = of_find_device_by_node(node);
- if (!pdev)
- return -ENODEV;
-
- ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
- &pdev->dev);
- if (ret < 0)
- return ret;
-
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
@@ -618,19 +604,19 @@ static const struct rzv2h_hw_info rzv2h_hw_params = {
.field_width = 8,
};
-static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+static int rzg3e_icu_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+ return rzv2h_icu_probe_common(pdev, parent, &rzg3e_hw_params);
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static int rzv2h_icu_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+ return rzv2h_icu_probe_common(pdev, parent, &rzv2h_hw_params);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
-IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
-IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_probe)
+IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_probe)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 2c4c682627b8..6bac67cc0b6d 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -91,9 +91,8 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
*/
static void imsic_handle_irq(struct irq_desc *desc)
{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
struct irq_chip *chip = irq_desc_get_chip(desc);
- int cpu = smp_processor_id();
- struct imsic_vector *vec;
unsigned long local_id;
/*
@@ -113,16 +112,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- if (unlikely(!imsic->base_domain))
- continue;
-
- vec = imsic_vector_from_local_id(cpu, local_id);
- if (!vec) {
+ if (unlikely(local_id > imsic->global.nr_ids)) {
pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
continue;
}
- generic_handle_irq(vec->irq);
+ generic_handle_irq(lpriv->vectors[local_id].irq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 643c8e459611..7228a33f6c37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -158,11 +158,11 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
tmp_vec.local_id = new_vec->local_id;
/* Point device to the temporary vector */
- imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ imsic_msi_update_msg(d, &tmp_vec);
}
/* Point device to the new vector */
- imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
+ imsic_msi_update_msg(d, new_vec);
/* Update irq descriptors with the new vector */
d->chip_data = new_vec;
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index dc95ad856d80..385368052d5c 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -434,16 +434,6 @@ void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
}
#endif
-struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
-{
- struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
-
- if (!lpriv || imsic->global.nr_ids < local_id)
- return NULL;
-
- return &lpriv->vectors[local_id];
-}
-
struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
@@ -487,7 +477,6 @@ static void __init imsic_local_cleanup(void)
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
bitmap_free(lpriv->dirty_bitmap);
- kfree(lpriv->vectors);
}
free_percpu(imsic->lpriv);
@@ -501,7 +490,8 @@ static int __init imsic_local_init(void)
int cpu, i;
/* Allocate per-CPU private state */
- imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
+ imsic->lpriv = __alloc_percpu(struct_size(imsic->lpriv, vectors, global->nr_ids + 1),
+ __alignof__(*imsic->lpriv));
if (!imsic->lpriv)
return -ENOMEM;
@@ -521,12 +511,6 @@ static int __init imsic_local_init(void)
timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
#endif
- /* Allocate vector array */
- lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
- GFP_KERNEL);
- if (!lpriv->vectors)
- goto fail_local_cleanup;
-
/* Setup vector array */
for (i = 0; i <= global->nr_ids; i++) {
vec = &lpriv->vectors[i];
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 57f951952b0c..6332501dcbd8 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -40,7 +40,7 @@ struct imsic_local_priv {
#endif
/* Local vector table */
- struct imsic_vector *vectors;
+ struct imsic_vector vectors[];
};
struct imsic_priv {
@@ -95,8 +95,6 @@ static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *ve
void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
-struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-
struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index cbd7697bc148..c5db7d6e3f7c 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -49,6 +49,8 @@
#define CONTEXT_ENABLE_BASE 0x2000
#define CONTEXT_ENABLE_SIZE 0x80
+#define PENDING_BASE 0x1000
+
/*
* Each hart context has a set of control registers associated with it. Right
* now there's only two: a source priority threshold over which the hart will
@@ -63,6 +65,7 @@
#define PLIC_ENABLE_THRESHOLD 0
#define PLIC_QUIRK_EDGE_INTERRUPT 0
+#define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1
struct plic_priv {
struct fwnode_handle *fwnode;
@@ -94,15 +97,22 @@ static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
-static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
+static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
+ u32 __iomem *base = handler->enable_base;
u32 hwirq_mask = 1 << (hwirq % 32);
+ int group = hwirq / 32;
+ u32 value;
+
+ value = readl(base + group);
if (enable)
- writel(readl(reg) | hwirq_mask, reg);
+ value |= hwirq_mask;
else
- writel(readl(reg) & ~hwirq_mask, reg);
+ value &= ~hwirq_mask;
+
+ handler->enable_save[group] = value;
+ writel(value, base + group);
}
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
@@ -110,7 +120,7 @@ static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
unsigned long flags;
raw_spin_lock_irqsave(&handler->enable_lock, flags);
- __plic_toggle(handler->enable_base, hwirq, enable);
+ __plic_toggle(handler, hwirq, enable);
raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
@@ -247,33 +257,16 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
static int plic_irq_suspend(void)
{
- unsigned int i, cpu;
- unsigned long flags;
- u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
/* irq ID 0 is reserved */
- for (i = 1; i < priv->nr_irqs; i++) {
+ for (unsigned int i = 1; i < priv->nr_irqs; i++) {
__assign_bit(i, priv->prio_save,
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
- for_each_present_cpu(cpu) {
- struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
-
- if (!handler->present)
- continue;
-
- raw_spin_lock_irqsave(&handler->enable_lock, flags);
- for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
- reg = handler->enable_base + i * sizeof(u32);
- handler->enable_save[i] = readl(reg);
- }
- raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
- }
-
return 0;
}
@@ -398,6 +391,98 @@ static void plic_handle_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *handler)
+{
+ u32 __iomem *pending = handler->priv->regs + PENDING_BASE;
+ u32 __iomem *enable = handler->enable_base;
+ u32 pending_irqs = 0;
+ int i, j;
+
+ /* Look for first pending interrupt */
+ for (i = 0; i < nr_irq_groups; i++) {
+ /* Any pending interrupts would be annihilated, so skip checking them */
+ if (!handler->enable_save[i])
+ continue;
+
+ pending_irqs = handler->enable_save[i] & readl_relaxed(pending + i);
+ if (pending_irqs)
+ break;
+ }
+
+ if (!pending_irqs)
+ return 0;
+
+ /* Isolate lowest set bit */
+ pending_irqs &= -pending_irqs;
+
+ /* Disable all interrupts but the first pending one */
+ for (j = 0; j < nr_irq_groups; j++) {
+ u32 new_mask = j == i ? pending_irqs : 0;
+
+ if (new_mask != handler->enable_save[j])
+ writel_relaxed(new_mask, enable + j);
+ }
+ return pending_irqs;
+}
+
+static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim)
+{
+ int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32);
+ u32 __iomem *enable = handler->enable_base;
+ irq_hw_number_t hwirq = 0;
+ u32 iso_mask;
+ int i;
+
+ guard(raw_spinlock)(&handler->enable_lock);
+
+ /* Existing enable state is already cached in enable_save */
+ iso_mask = cp100_isolate_pending_irq(nr_irq_groups, handler);
+ if (!iso_mask)
+ return 0;
+
+ /*
+ * Interrupts delievered to hardware still become pending, but only
+ * interrupts that are both pending and enabled can be claimed.
+ * Clearing the enable bit for all interrupts but the first pending
+ * one avoids a hardware bug that occurs during read from the claim
+ * register with more than one eligible interrupt.
+ */
+ hwirq = readl(claim);
+
+ /* Restore previous state */
+ for (i = 0; i < nr_irq_groups; i++) {
+ u32 written = i == hwirq / 32 ? iso_mask : 0;
+ u32 stored = handler->enable_save[i];
+
+ if (stored != written)
+ writel_relaxed(stored, enable + i);
+ }
+ return hwirq;
+}
+
+static void plic_handle_irq_cp100(struct irq_desc *desc)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = cp100_get_hwirq(handler, claim))) {
+ int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq);
+
+ if (unlikely(err)) {
+ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
+ handler->priv->fwnode, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
@@ -434,6 +519,8 @@ static const struct of_device_id plic_match[] = {
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
{ .compatible = "thead,c900-plic",
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "ultrarisc,cp100-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) },
{}
};
@@ -592,12 +679,11 @@ static int plic_probe(struct fwnode_handle *fwnode)
if (parent_hwirq != RV_IRQ_EXT) {
/* Disable S-mode enable bits if running in M-mode. */
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
- void __iomem *enable_base = priv->regs +
- CONTEXT_ENABLE_BASE +
- i * CONTEXT_ENABLE_SIZE;
+ u32 __iomem *enable_base = priv->regs + CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- __plic_toggle(enable_base, hwirq, 0);
+ for (int j = 0; j <= nr_irqs / 32; j++)
+ writel(0, enable_base + j);
}
continue;
}
@@ -668,12 +754,17 @@ static int plic_probe(struct fwnode_handle *fwnode)
}
if (global_setup) {
+ void (*handler_fn)(struct irq_desc *) = plic_handle_irq;
+
+ if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks))
+ handler_fn = plic_handle_irq_cp100;
+
/* Find parent domain and register chained handler */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
if (domain)
plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+ irq_set_chained_handler(plic_parent_irq, handler_fn);
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
index 2460798ec158..705361b4ebe0 100644
--- a/drivers/irqchip/irq-starfive-jh8100-intc.c
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -114,9 +114,9 @@ static void starfive_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int __init starfive_intc_init(struct device_node *intc,
- struct device_node *parent)
+static int starfive_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *intc = pdev->dev.of_node;
struct starfive_irq_chip *irqc;
struct reset_control *rst;
struct clk *clk;
@@ -199,7 +199,7 @@ static int __init starfive_intc_init(struct device_node *intc,
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc)
-IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init)
+IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_probe)
IRQCHIP_PLATFORM_DRIVER_END(starfive_intc)
MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller");
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 1e236d5b7516..2e4013c6834d 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -165,4 +165,3 @@ module_platform_driver(ts4800_ic_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 0ee7b6b71f5f..689c8e448901 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -36,11 +36,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
- of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+ platform_irq_probe_t irq_probe = of_device_get_match_data(&pdev->dev);
- if (!irq_init_cb) {
+ if (!irq_probe)
return -EINVAL;
- }
if (par_np == np)
par_np = NULL;
@@ -53,10 +52,9 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary.
*/
- if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
return -EPROBE_DEFER;
- }
- return irq_init_cb(np, par_np);
+ return irq_probe(pdev, par_np);
}
EXPORT_SYMBOL_GPL(platform_irqchip_probe);
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 18e696dc7f4d..09819007d08e 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -222,7 +222,7 @@ static int get_registers(struct platform_device *pdev, struct combiner *comb)
return 0;
}
-static int __init combiner_probe(struct platform_device *pdev)
+static int combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
int nregs;
@@ -266,11 +266,11 @@ static const struct acpi_device_id qcom_irq_combiner_ids[] = {
{ }
};
-static struct platform_driver qcom_irq_combiner_probe = {
+static struct platform_driver qcom_irq_combiner_driver = {
.driver = {
.name = "qcom-irq-combiner",
.acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
},
.probe = combiner_probe,
};
-builtin_platform_driver(qcom_irq_combiner_probe);
+builtin_platform_driver(qcom_irq_combiner_driver);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 52d77546aacb..518f7f0f3dab 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -350,9 +350,10 @@ static int pdc_setup_pin_mapping(struct device_node *np)
#define QCOM_PDC_SIZE 0x30000
-static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
+static int qcom_pdc_probe(struct platform_device *pdev, struct device_node *parent)
{
struct irq_domain *parent_domain, *pdc_domain;
+ struct device_node *node = pdev->dev.of_node;
resource_size_t res_size;
struct resource res;
int ret;
@@ -428,7 +429,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
-IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
+IRQCHIP_MATCH("qcom,pdc", qcom_pdc_probe)
IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
MODULE_LICENSE("GPL v2");
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index d5e6024cb2a8..bc4ddacd6ddc 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -17,12 +17,18 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+typedef int (*platform_irq_probe_t)(struct platform_device *, struct device_node *);
+
/* Undefined on purpose */
extern of_irq_init_cb_t typecheck_irq_init_cb;
+extern platform_irq_probe_t typecheck_irq_probe;
#define typecheck_irq_init_cb(fn) \
(__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+#define typecheck_irq_probe(fn) \
+ (__typecheck(typecheck_irq_probe, &fn) ? fn : fn)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
@@ -42,7 +48,7 @@ extern int platform_irqchip_probe(struct platform_device *pdev);
static const struct of_device_id drv_name##_irqchip_match_table[] = {
#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
- .data = typecheck_irq_init_cb(fn), },
+ .data = typecheck_irq_probe(fn), },
#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
Linus,
please pull the latest irq/core branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-core-2025-11-30
up to: 3de5e46e50ab: genirq: Remove cpumask availability check on kthread affinity setting
Updates for the interrupt core and treewide cleanups:
- Rework of the Per Processor Interrupt (PPI) management on ARM[64].
PPI support was built under the assumption that the systems are
homogenous so that the same CPU local device types are connected to
them. That's unfortunately wishful thinking and created horrible
workarounds.
This rework provides affinity management for PPIs so that they can be
individually configured in the firmware tables and mops up the related
drivers all over the place.
- Prevent CPUSET/isolation changes to arbitrarily affine interrupt
threads to random CPUs, which ignores user or driver settings.
- Plug a harmless race in the interrupt affinity proc interface, which
allows to see a half updated mask
- Adjust the priority of secondary interrupt threads on RT, so that the
combination of primary and secondary thread emulates the hardware
interrupt plus thread scenario. Having them at the same priority can
cause starvation issues in some drivers.
Thanks,
tglx
------------------>
Chengkaitao (1):
genirq: Use raw_spinlock_irq() in irq_set_affinity_notifier()
Frederic Weisbecker (3):
genirq: Prevent early spurious wake-ups of interrupt threads
genirq: Fix interrupt threads affinity vs. cpuset isolated partitions
genirq: Remove cpumask availability check on kthread affinity setting
Lukas Wunner (1):
genirq/manage: Reduce priority of forced secondary interrupt handler
Marc Zyngier (26):
irqdomain: Add firmware info reporting interface
ACPI: irq: Add interrupt affinity reporting interface
of/irq: Add interrupt affinity reporting interface
platform: Add firmware-agnostic irq and affinity retrieval interface
irqchip/gic-v3: Add FW info retrieval support
irqchip/apple-aic: Add FW info retrieval support
coresight: trbe: Convert to the new interrupt affinity retrieval API
perf: arm_pmu: Convert to the new interrupt affinity retrieval API
perf: arm_spe_pmu: Convert to new interrupt affinity retrieval API
irqchip/gic-v3: Switch high priority PPIs over to handle_percpu_devid_irq()
genirq: Kill handle_percpu_devid_fasteoi_nmi()
genirq: Merge irqaction::{dev_id,percpu_dev_id}
genirq: Factor-in percpu irqaction creation
genirq: Add affinity to percpu_devid interrupt requests
genirq: Update request_percpu_nmi() to take an affinity
genirq: Allow per-cpu interrupt sharing for non-overlapping affinities
genirq: Add request_percpu_irq_affinity() helper
perf: arm_spe_pmu: Request specific affinities for per CPU interrupts
coresight: trbe: Request specific affinities for per CPU interrupts
irqchip/gic-v3: Drop support for custom PPI partitions
irqchip/apple-aic: Drop support for custom PMU irq partitions
irqchip: Kill irq-partition-percpu
genirq: Kill irq_{g,s}et_percpu_devid_partition()
irqdomain: Kill of_node_to_fwnode() helper
perf: arm_pmu: Kill last use of per-CPU cpu_armpmu pointer
genirq: Fix percpu_devid irq affinity documentation
Muchun Song (1):
genirq/proc: Fix race in show_irq_affinity()
Will Deacon (1):
perf: arm_pmu: Request specific affinities for per CPU NMIs/interrupts
arch/arm64/kernel/smp.c | 2 +-
drivers/acpi/irq.c | 19 +++
drivers/base/platform.c | 71 ++++++--
drivers/hwtracing/coresight/coresight-trbe.c | 9 +-
drivers/irqchip/Kconfig | 4 -
drivers/irqchip/Makefile | 1 -
drivers/irqchip/irq-apple-aic.c | 62 ++++---
drivers/irqchip/irq-gic-v3.c | 224 ++++++-------------------
drivers/irqchip/irq-partition-percpu.c | 241 ---------------------------
drivers/of/irq.c | 20 +++
drivers/perf/arm_pmu.c | 49 +++---
drivers/perf/arm_pmu_acpi.c | 2 +-
drivers/perf/arm_pmu_platform.c | 20 ++-
drivers/perf/arm_pmuv3.c | 2 +-
drivers/perf/arm_spe_pmu.c | 13 +-
include/linux/acpi.h | 7 +
include/linux/interrupt.h | 25 ++-
include/linux/irq.h | 5 -
include/linux/irqchip/irq-partition-percpu.h | 53 ------
include/linux/irqdesc.h | 1 -
include/linux/irqdomain.h | 33 +++-
include/linux/of_irq.h | 7 +
include/linux/perf/arm_pmu.h | 6 +-
include/linux/platform_device.h | 2 +
include/linux/sched.h | 1 +
kernel/irq/chip.c | 33 +---
kernel/irq/handle.c | 10 +-
kernel/irq/irqdesc.c | 24 +--
kernel/irq/irqdomain.c | 32 +++-
kernel/irq/manage.c | 172 ++++++++++++-------
kernel/irq/proc.c | 2 +
kernel/sched/syscalls.c | 13 ++
32 files changed, 478 insertions(+), 687 deletions(-)
delete mode 100644 drivers/irqchip/irq-partition-percpu.c
delete mode 100644 include/linux/irqchip/irq-partition-percpu.h
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 68cea3a4a35c..6fb838eee2e7 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -1094,7 +1094,7 @@ static void ipi_setup_sgi(int ipi)
irq = ipi_irq_base + ipi;
if (ipi_should_be_nmi(ipi)) {
- err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
+ err = request_percpu_nmi(irq, ipi_handler, "IPI", NULL, &irq_stat);
WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
} else {
err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 76a856c32c4d..d1595156c86a 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -300,6 +300,25 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
EXPORT_SYMBOL_GPL(acpi_irq_get);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ struct irq_fwspec_info info;
+ struct irq_fwspec fwspec;
+ unsigned long flags;
+
+ if (acpi_irq_parse_one(handle, index, &fwspec, &flags))
+ return NULL;
+
+ if (irq_populate_fwspec_info(&fwspec, &info))
+ return NULL;
+
+ if (!(info.flags & IRQ_FWSPEC_INFO_AFFINITY_VALID))
+ return NULL;
+
+ return info.affinity;
+}
+
/**
* acpi_set_irq_model - Setup the GSI irqdomain information
* @model: the value assigned to acpi_irq_model
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 09450349cf32..b45d41b018ca 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -150,25 +150,37 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
+static const struct cpumask *get_irq_affinity(struct platform_device *dev,
+ unsigned int num)
+{
+ const struct cpumask *mask = NULL;
+#ifndef CONFIG_SPARC
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
+
+ if (is_of_node(fwnode))
+ mask = of_irq_get_affinity(to_of_node(fwnode), num);
+ else if (is_acpi_device_node(fwnode))
+ mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num);
+#endif
+
+ return mask ?: cpu_possible_mask;
+}
+
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
+ * platform_get_irq_affinity - get an optional IRQ and its affinity for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt
*
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * For example::
- *
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
+ * Gets an interupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. Optional affinity information is provided in the
+ * affinity pointer if available, and NULL otherwise.
*
- * Return: non-zero IRQ number on success, negative error number on failure.
+ * Return: non-zero interrupt number on success, negative error number on failure.
*/
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+int platform_get_irq_affinity(struct platform_device *dev, unsigned int num,
+ const struct cpumask **affinity)
{
int ret;
#ifdef CONFIG_SPARC
@@ -236,8 +248,37 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
out:
if (WARN(!ret, "0 is an invalid IRQ number\n"))
return -EINVAL;
+
+ if (ret > 0 && affinity)
+ *affinity = get_irq_affinity(dev, num);
+
return ret;
}
+EXPORT_SYMBOL_GPL(platform_get_irq_affinity);
+
+/**
+ * platform_get_irq_optional - get an optional interrupt for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ *
+ * Gets an interrupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. This is the same as platform_get_irq(), except
+ * that it does not print an error message if an interrupt can not be
+ * obtained.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero interrupt number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ return platform_get_irq_affinity(dev, num, NULL);
+}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 43643d2c5bdd..9f64f463339d 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -1474,9 +1474,10 @@ static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
static int arm_trbe_probe_irq(struct platform_device *pdev,
struct trbe_drvdata *drvdata)
{
+ const struct cpumask *affinity;
int ret;
- drvdata->irq = platform_get_irq(pdev, 0);
+ drvdata->irq = platform_get_irq_affinity(pdev, 0, &affinity);
if (drvdata->irq < 0) {
pr_err("IRQ not found for the platform device\n");
return drvdata->irq;
@@ -1487,14 +1488,14 @@ static int arm_trbe_probe_irq(struct platform_device *pdev,
return -EINVAL;
}
- if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
- return -EINVAL;
+ cpumask_copy(&drvdata->supported_cpus, affinity);
drvdata->handle = alloc_percpu(struct perf_output_handle *);
if (!drvdata->handle)
return -ENOMEM;
- ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
+ ret = request_percpu_irq_affinity(drvdata->irq, arm_trbe_irq_handler, DRVNAME,
+ affinity, drvdata->handle);
if (ret) {
free_percpu(drvdata->handle);
return ret;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index a61c6dc63c29..5dddb4c9442a 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -36,7 +36,6 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN_HIERARCHY
- select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select HAVE_ARM_SMCCC_DISCOVERY
select IRQ_MSI_IOMMU
@@ -451,9 +450,6 @@ config LS_SCFG_MSI
depends on PCI_MSI
select IRQ_MSI_LIB
-config PARTITION_PERCPU
- bool
-
config STM32MP_EXTI
tristate "STM32MP extended interrupts and event controller"
depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 3de083f5484c..6a229443efe0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
-obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \
irq-gic-v5-iwb.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 032d66dceb8e..795b3db4554a 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -578,16 +578,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
- (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
- int irq;
- if (cpumask_test_cpu(smp_processor_id(),
- &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
- irq = AIC_CPU_PMU_P;
- else
- irq = AIC_CPU_PMU_E;
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT))
generic_handle_domain_irq(aic_irqc->hw_domain,
- AIC_FIQ_HWIRQ(irq));
- }
+ AIC_FIQ_HWIRQ(AIC_CPU_PMU_P));
if (static_branch_likely(&use_fast_ipi) &&
(FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
@@ -632,18 +625,7 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
handle_fasteoi_irq, NULL, NULL);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} else {
- int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
-
- switch (fiq) {
- case AIC_CPU_PMU_P:
- case AIC_CPU_PMU_E:
- irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
- break;
- default:
- irq_set_percpu_devid(irq);
- break;
- }
-
+ irq_set_percpu_devid(irq);
irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
handle_percpu_devid_irq, NULL, NULL);
}
@@ -651,6 +633,33 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
return 0;
}
+static int aic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ const struct cpumask *mask;
+ u32 intid;
+
+ info->flags = 0;
+ info->affinity = NULL;
+
+ if (fwspec->param[0] != AIC_FIQ)
+ return 0;
+
+ if (fwspec->param_count == 3)
+ intid = fwspec->param[1];
+ else
+ intid = fwspec->param[2];
+
+ if (aic_irqc->fiq_aff[intid])
+ mask = &aic_irqc->fiq_aff[intid]->aff;
+ else
+ mask = cpu_possible_mask;
+
+ info->affinity = mask;
+ info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
+
+ return 0;
+}
+
static int aic_irq_domain_translate(struct irq_domain *id,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -705,6 +714,10 @@ static int aic_irq_domain_translate(struct irq_domain *id,
break;
}
}
+
+ /* Merge the two PMUs on a single interrupt */
+ if (*hwirq == AIC_CPU_PMU_E)
+ *hwirq = AIC_CPU_PMU_P;
break;
default:
return -EINVAL;
@@ -750,9 +763,10 @@ static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops aic_irq_domain_ops = {
- .translate = aic_irq_domain_translate,
- .alloc = aic_irq_domain_alloc,
- .free = aic_irq_domain_free,
+ .translate = aic_irq_domain_translate,
+ .alloc = aic_irq_domain_alloc,
+ .free = aic_irq_domain_free,
+ .get_fwspec_info = aic_irq_get_fwspec_info,
};
/*
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 3de351e66ee8..6607ab58f72e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -26,7 +26,6 @@
#include <linux/irqchip/arm-gic-common.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/irqchip/arm-gic-v3-prio.h>
-#include <linux/irqchip/irq-partition-percpu.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/arm-smccc.h>
@@ -46,8 +45,6 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
-#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
-
static struct cpumask broken_rdists __read_mostly __maybe_unused;
struct redist_region {
@@ -68,7 +65,13 @@ struct gic_chip_data {
u64 flags;
bool has_rss;
unsigned int ppi_nr;
- struct partition_desc **ppi_descs;
+ struct partition_affinity *parts;
+ unsigned int nr_parts;
+};
+
+struct partition_affinity {
+ cpumask_t mask;
+ struct fwnode_handle *partition_id;
};
#define T241_CHIPS_MAX 4
@@ -228,9 +231,6 @@ static void __init gic_prio_init(void)
!cpus_have_group0);
}
-/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
-static refcount_t *rdist_nmi_refs;
-
static struct gic_kvm_info gic_v3_kvm_info __initdata;
static DEFINE_PER_CPU(bool, has_rss);
@@ -594,36 +594,6 @@ static void gic_irq_set_prio(struct irq_data *d, u8 prio)
writeb_relaxed(prio, base + offset + index);
}
-static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
-{
- switch (__get_intid_range(hwirq)) {
- case PPI_RANGE:
- return hwirq - 16;
- case EPPI_RANGE:
- return hwirq - EPPI_BASE_INTID + 16;
- default:
- unreachable();
- }
-}
-
-static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
-{
- switch (__get_intid_range(hwirq)) {
- case SGI_RANGE:
- case PPI_RANGE:
- return hwirq;
- case EPPI_RANGE:
- return hwirq - EPPI_BASE_INTID + 32;
- default:
- unreachable();
- }
-}
-
-static u32 gic_get_rdist_index(struct irq_data *d)
-{
- return __gic_get_rdist_index(d->hwirq);
-}
-
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
@@ -644,20 +614,8 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /*
- * Setting up a percpu interrupt as NMI, only switch handler
- * for first NMI
- */
- if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
- refcount_set(&rdist_nmi_refs[idx], 1);
- desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
- }
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_nmi;
- }
gic_irq_set_prio(d, dist_prio_nmi);
@@ -684,15 +642,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /* Tearing down NMI, only switch handler for last NMI */
- if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
- desc->handle_irq = handle_percpu_devid_irq;
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_irq;
- }
gic_irq_set_prio(d, dist_prio_irq);
}
@@ -1666,13 +1617,6 @@ static int gic_irq_domain_translate(struct irq_domain *d,
case GIC_IRQ_TYPE_LPI: /* LPI */
*hwirq = fwspec->param[1];
break;
- case GIC_IRQ_TYPE_PARTITION:
- *hwirq = fwspec->param[1];
- if (fwspec->param[1] >= 16)
- *hwirq += EPPI_BASE_INTID - 16;
- else
- *hwirq += 16;
- break;
default:
return -EINVAL;
}
@@ -1681,10 +1625,8 @@ static int gic_irq_domain_translate(struct irq_domain *d,
/*
* Make it clear that broken DTs are... broken.
- * Partitioned PPIs are an unfortunate exception.
*/
- WARN_ON(*type == IRQ_TYPE_NONE &&
- fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
+ WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
@@ -1741,33 +1683,12 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
}
-static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
- irq_hw_number_t hwirq)
-{
- enum gic_intid_range range;
-
- if (!gic_data.ppi_descs)
- return false;
-
- if (!is_of_node(fwspec->fwnode))
- return false;
-
- if (fwspec->param_count < 4 || !fwspec->param[3])
- return false;
-
- range = __get_intid_range(hwirq);
- if (range != PPI_RANGE && range != EPPI_RANGE)
- return false;
-
- return true;
-}
-
static int gic_irq_domain_select(struct irq_domain *d,
struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
- unsigned int type, ppi_idx;
irq_hw_number_t hwirq;
+ unsigned int type;
int ret;
/* Not for us */
@@ -1786,60 +1707,61 @@ static int gic_irq_domain_select(struct irq_domain *d,
if (WARN_ON_ONCE(ret))
return 0;
- if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
- return d == gic_data.domain;
-
- /*
- * If this is a PPI and we have a 4th (non-null) parameter,
- * then we need to match the partition domain.
- */
- ppi_idx = __gic_get_ppi_index(hwirq);
- return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
+ return d == gic_data.domain;
}
-static const struct irq_domain_ops gic_irq_domain_ops = {
- .translate = gic_irq_domain_translate,
- .alloc = gic_irq_domain_alloc,
- .free = gic_irq_domain_free,
- .select = gic_irq_domain_select,
-};
-
-static int partition_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq,
- unsigned int *type)
+static int gic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
{
- unsigned long ppi_intid;
- struct device_node *np;
- unsigned int ppi_idx;
- int ret;
-
- if (!gic_data.ppi_descs)
- return -ENOMEM;
+ const struct cpumask *mask = NULL;
- np = of_find_node_by_phandle(fwspec->param[3]);
- if (WARN_ON(!np))
- return -EINVAL;
+ info->flags = 0;
+ info->affinity = NULL;
- ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
- if (WARN_ON_ONCE(ret))
+ /* ACPI is not capable of describing PPI affinity -- yet */
+ if (!is_of_node(fwspec->fwnode))
return 0;
- ppi_idx = __gic_get_ppi_index(ppi_intid);
- ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
- of_fwnode_handle(np));
- if (ret < 0)
- return ret;
+ /* If the specifier provides an affinity, use it */
+ if (fwspec->param_count == 4 && fwspec->param[3]) {
+ struct fwnode_handle *fw;
+
+ switch (fwspec->param[0]) {
+ case 1: /* PPI */
+ case 3: /* EPPI */
+ break;
+ default:
+ return 0;
+ }
+
+ fw = of_fwnode_handle(of_find_node_by_phandle(fwspec->param[3]));
+ if (!fw)
+ return -ENOENT;
+
+ for (int i = 0; i < gic_data.nr_parts; i++) {
+ if (gic_data.parts[i].partition_id == fw) {
+ mask = &gic_data.parts[i].mask;
+ break;
+ }
+ }
+
+ if (!mask)
+ return -ENOENT;
+ } else {
+ mask = cpu_possible_mask;
+ }
- *hwirq = ret;
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ info->affinity = mask;
+ info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
return 0;
}
-static const struct irq_domain_ops partition_domain_ops = {
- .translate = partition_domain_translate,
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .translate = gic_irq_domain_translate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
.select = gic_irq_domain_select,
+ .get_fwspec_info = gic_irq_get_fwspec_info,
};
static bool gic_enable_quirk_msm8996(void *data)
@@ -2030,19 +1952,9 @@ static const struct gic_quirk gic_quirks[] = {
static void gic_enable_nmi_support(void)
{
- int i;
-
if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
- rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
- sizeof(*rdist_nmi_refs), GFP_KERNEL);
- if (!rdist_nmi_refs)
- return;
-
- for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
- refcount_set(&rdist_nmi_refs[i], 0);
-
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
@@ -2174,12 +2086,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (!parts_node)
return;
- gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
- if (!gic_data.ppi_descs)
- goto out_put_node;
-
nr_parts = of_get_child_count(parts_node);
-
if (!nr_parts)
goto out_put_node;
@@ -2232,29 +2139,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
part_idx++;
}
- for (i = 0; i < gic_data.ppi_nr; i++) {
- unsigned int irq;
- struct partition_desc *desc;
- struct irq_fwspec ppi_fwspec = {
- .fwnode = gic_data.fwnode,
- .param_count = 3,
- .param = {
- [0] = GIC_IRQ_TYPE_PARTITION,
- [1] = i,
- [2] = IRQ_TYPE_NONE,
- },
- };
-
- irq = irq_create_fwspec_mapping(&ppi_fwspec);
- if (WARN_ON(!irq))
- continue;
- desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
- irq, &partition_domain_ops);
- if (WARN_ON(!desc))
- continue;
-
- gic_data.ppi_descs[i] = desc;
- }
+ gic_data.parts = parts;
+ gic_data.nr_parts = nr_parts;
out_put_node:
of_node_put(parts_node);
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
deleted file mode 100644
index 4441ffe149ea..000000000000
--- a/drivers/irqchip/irq-partition-percpu.c
+++ /dev/null
@@ -1,241 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqchip/irq-partition-percpu.h>
-#include <linux/irqdomain.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-struct partition_desc {
- int nr_parts;
- struct partition_affinity *parts;
- struct irq_domain *domain;
- struct irq_desc *chained_desc;
- unsigned long *bitmap;
- struct irq_domain_ops ops;
-};
-
-static bool partition_check_cpu(struct partition_desc *part,
- unsigned int cpu, unsigned int hwirq)
-{
- return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
-}
-
-static void partition_irq_mask(struct irq_data *d)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_mask)
- chip->irq_mask(data);
-}
-
-static void partition_irq_unmask(struct irq_data *d)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_unmask)
- chip->irq_unmask(data);
-}
-
-static int partition_irq_set_irqchip_state(struct irq_data *d,
- enum irqchip_irq_state which,
- bool val)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_set_irqchip_state)
- return chip->irq_set_irqchip_state(data, which, val);
-
- return -EINVAL;
-}
-
-static int partition_irq_get_irqchip_state(struct irq_data *d,
- enum irqchip_irq_state which,
- bool *val)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_get_irqchip_state)
- return chip->irq_get_irqchip_state(data, which, val);
-
- return -EINVAL;
-}
-
-static int partition_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (chip->irq_set_type)
- return chip->irq_set_type(data, type);
-
- return -EINVAL;
-}
-
-static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
-}
-
-static struct irq_chip partition_irq_chip = {
- .irq_mask = partition_irq_mask,
- .irq_unmask = partition_irq_unmask,
- .irq_set_type = partition_irq_set_type,
- .irq_get_irqchip_state = partition_irq_get_irqchip_state,
- .irq_set_irqchip_state = partition_irq_set_irqchip_state,
- .irq_print_chip = partition_irq_print_chip,
-};
-
-static void partition_handle_irq(struct irq_desc *desc)
-{
- struct partition_desc *part = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cpu = smp_processor_id();
- int hwirq;
-
- chained_irq_enter(chip, desc);
-
- for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
- if (partition_check_cpu(part, cpu, hwirq))
- break;
- }
-
- if (unlikely(hwirq == part->nr_parts))
- handle_bad_irq(desc);
- else
- generic_handle_domain_irq(part->domain, hwirq);
-
- chained_irq_exit(chip, desc);
-}
-
-static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *arg)
-{
- int ret;
- irq_hw_number_t hwirq;
- unsigned int type;
- struct irq_fwspec *fwspec = arg;
- struct partition_desc *part;
-
- BUG_ON(nr_irqs != 1);
- ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- part = domain->host_data;
-
- set_bit(hwirq, part->bitmap);
- irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
- partition_handle_irq, part);
- irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
- irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
- handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(virq, IRQ_NOAUTOEN);
-
- return 0;
-}
-
-static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d;
-
- BUG_ON(nr_irqs != 1);
-
- d = irq_domain_get_irq_data(domain, virq);
- irq_set_handler(virq, NULL);
- irq_domain_reset_irq_data(d);
-}
-
-int partition_translate_id(struct partition_desc *desc, void *partition_id)
-{
- struct partition_affinity *part = NULL;
- int i;
-
- for (i = 0; i < desc->nr_parts; i++) {
- if (desc->parts[i].partition_id == partition_id) {
- part = &desc->parts[i];
- break;
- }
- }
-
- if (WARN_ON(!part)) {
- pr_err("Failed to find partition\n");
- return -EINVAL;
- }
-
- return i;
-}
-
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- struct partition_desc *desc;
- struct irq_domain *d;
-
- BUG_ON(!ops->select || !ops->translate);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
-
- desc->ops = *ops;
- desc->ops.free = partition_domain_free;
- desc->ops.alloc = partition_domain_alloc;
-
- d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
- if (!d)
- goto out;
- desc->domain = d;
-
- desc->bitmap = bitmap_zalloc(nr_parts, GFP_KERNEL);
- if (WARN_ON(!desc->bitmap))
- goto out;
-
- desc->chained_desc = irq_to_desc(chained_irq);
- desc->nr_parts = nr_parts;
- desc->parts = parts;
-
- return desc;
-out:
- if (d)
- irq_domain_remove(d);
- kfree(desc);
-
- return NULL;
-}
-
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- if (dsc)
- return dsc->domain;
-
- return NULL;
-}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 65c3c23255b7..168fde921bd2 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -479,6 +479,26 @@ int of_irq_get(struct device_node *dev, int index)
}
EXPORT_SYMBOL_GPL(of_irq_get);
+const struct cpumask *of_irq_get_affinity(struct device_node *dev, int index)
+{
+ struct of_phandle_args oirq;
+ struct irq_fwspec_info info;
+ struct irq_fwspec fwspec;
+ int rc;
+
+ rc = of_irq_parse_one(dev, index, &oirq);
+ if (rc)
+ return NULL;
+
+ of_phandle_args_to_fwspec(oirq.np, oirq.args, oirq.args_count,
+ &fwspec);
+
+ if (irq_populate_fwspec_info(&fwspec, &info))
+ return NULL;
+
+ return info.affinity;
+}
+
/**
* of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
* @dev: pointer to device tree node
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 5c310e803dd7..f7abd1333963 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -26,7 +26,8 @@
#include <asm/irq_regs.h>
-static int armpmu_count_irq_users(const int irq);
+static int armpmu_count_irq_users(const struct cpumask *affinity,
+ const int irq);
struct pmu_irq_ops {
void (*enable_pmuirq)(unsigned int irq);
@@ -64,7 +65,9 @@ static void armpmu_enable_percpu_pmuirq(unsigned int irq)
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
void __percpu *devid)
{
- if (armpmu_count_irq_users(irq) == 1)
+ struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+ if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_irq(irq, devid);
}
@@ -89,7 +92,9 @@ static void armpmu_disable_percpu_pmunmi(unsigned int irq)
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
void __percpu *devid)
{
- if (armpmu_count_irq_users(irq) == 1)
+ struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+ if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_nmi(irq, devid);
}
@@ -99,7 +104,6 @@ static const struct pmu_irq_ops percpu_pmunmi_ops = {
.free_pmuirq = armpmu_free_percpu_pmunmi
};
-DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
@@ -580,11 +584,11 @@ static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-static int armpmu_count_irq_users(const int irq)
+static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq)
{
int cpu, count = 0;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) == irq)
count++;
}
@@ -592,12 +596,13 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
-static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+static const struct pmu_irq_ops *
+armpmu_find_irq_ops(const struct cpumask *affinity, int irq)
{
const struct pmu_irq_ops *ops = NULL;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) != irq)
continue;
@@ -609,22 +614,25 @@ static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
return ops;
}
-void armpmu_free_irq(int irq, int cpu)
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
- per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
+ per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
per_cpu(cpu_irq, cpu) = 0;
per_cpu(cpu_irq_ops, cpu) = NULL;
}
-int armpmu_request_irq(int irq, int cpu)
+int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
{
int err = 0;
+ struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
+ const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus :
+ cpu_possible_mask; /* ACPI */
const irq_handler_t handler = armpmu_dispatch_irq;
const struct pmu_irq_ops *irq_ops;
@@ -646,25 +654,24 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
IRQF_NO_THREAD;
- err = request_nmi(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&cpu_armpmu, cpu));
+ err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&cpu_armpmu, cpu));
+ armpmu);
irq_ops = &pmuirq_ops;
} else {
has_nmi = true;
irq_ops = &pmunmi_ops;
}
- } else if (armpmu_count_irq_users(irq) == 0) {
- err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
+ } else if (armpmu_count_irq_users(affinity, irq) == 0) {
+ err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &cpu_armpmu);
+ err = request_percpu_irq_affinity(irq, handler, "arm-pmu",
+ affinity, pcpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
has_nmi = true;
@@ -672,7 +679,7 @@ int armpmu_request_irq(int irq, int cpu)
}
} else {
/* Per cpudevid irq was already requested by another CPU */
- irq_ops = armpmu_find_irq_ops(irq);
+ irq_ops = armpmu_find_irq_ops(affinity, irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;
@@ -717,8 +724,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (pmu->reset)
pmu->reset(pmu);
- per_cpu(cpu_armpmu, cpu) = pmu;
-
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq)
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
@@ -738,8 +743,6 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
if (irq)
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
- per_cpu(cpu_armpmu, cpu) = NULL;
-
return 0;
}
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 05dda19c5359..e80f76d95e68 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -218,7 +218,7 @@ static int arm_pmu_acpi_parse_irqs(void)
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
- err = armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(&probed_pmus, irq, cpu);
if (err)
goto out_err;
}
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 118170a5cede..1c9e50a13201 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -42,14 +42,13 @@ static int probe_current_pmu(struct arm_pmu *pmu,
return ret;
}
-static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
+static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq,
+ const struct cpumask *affinity)
{
- int cpu, ret;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ int cpu;
- ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
- if (ret)
- return ret;
+ cpumask_copy(&pmu->supported_cpus, affinity);
for_each_cpu(cpu, &pmu->supported_cpus)
per_cpu(hw_events->irq, cpu) = irq;
@@ -115,9 +114,12 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
}
if (num_irqs == 1) {
- int irq = platform_get_irq(pdev, 0);
+ const struct cpumask *affinity;
+ int irq;
+
+ irq = platform_get_irq_affinity(pdev, 0, &affinity);
if ((irq > 0) && irq_is_percpu_devid(irq))
- return pmu_parse_percpu_irq(pmu, irq);
+ return pmu_parse_percpu_irq(pmu, irq, affinity);
}
if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
@@ -163,7 +165,7 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
if (!irq)
continue;
- err = armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
if (err)
break;
}
@@ -179,7 +181,7 @@ static void armpmu_free_irqs(struct arm_pmu *armpmu)
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
- armpmu_free_irq(irq, cpu);
+ armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
}
}
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 69c5cc8f5606..ca8d706d4022 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -1064,7 +1064,7 @@ static int armv8pmu_user_event_idx(struct perf_event *event)
static void armv8pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
struct task_struct *task, bool sched_in)
{
- struct arm_pmu *armpmu = *this_cpu_ptr(&cpu_armpmu);
+ struct arm_pmu *armpmu = to_arm_pmu(pmu_ctx->pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
if (!hw_events->branch_users)
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index fa50645fedda..87908f0712c0 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1259,8 +1259,8 @@ static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
return -ENXIO;
/* Request our PPIs (note that the IRQ is still disabled) */
- ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
- spe_pmu->handle);
+ ret = request_percpu_irq_affinity(spe_pmu->irq, arm_spe_pmu_irq_handler,
+ DRVNAME, mask, spe_pmu->handle);
if (ret)
return ret;
@@ -1287,8 +1287,10 @@ static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
{
struct platform_device *pdev = spe_pmu->pdev;
- int irq = platform_get_irq(pdev, 0);
+ const struct cpumask *affinity;
+ int irq;
+ irq = platform_get_irq_affinity(pdev, 0, &affinity);
if (irq < 0)
return -ENXIO;
@@ -1297,10 +1299,7 @@ static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
return -EINVAL;
}
- if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
- dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
- return -EINVAL;
- }
+ cpumask_copy(&spe_pmu->supported_cpus, affinity);
spe_pmu->irq = irq;
return 0;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 5ff5d99f6ead..607db773b672 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1509,12 +1509,19 @@ static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index);
#else
static inline
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
{
return -EINVAL;
}
+static inline const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_ACPI_LPIT
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 51b6484c0493..266f2b39213a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -109,6 +109,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @name: name of the device
* @dev_id: cookie to identify the device
* @percpu_dev_id: cookie to identify the device
+ * @affinity: CPUs this irqaction is allowed to run on
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @flags: flags (see IRQF_* above)
@@ -121,8 +122,11 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
*/
struct irqaction {
irq_handler_t handler;
- void *dev_id;
- void __percpu *percpu_dev_id;
+ union {
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ };
+ const struct cpumask *affinity;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
@@ -179,7 +183,7 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *percpu_dev_id);
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -190,12 +194,21 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
- devname, percpu_dev_id);
+ devname, NULL, percpu_dev_id);
+}
+
+static inline int __must_check
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *percpu_dev_id)
+{
+ return __request_percpu_irq(irq, handler, 0,
+ devname, affinity, percpu_dev_id);
}
extern int __must_check
-request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *devname, void __percpu *dev);
+request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id);
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c67e76fbcc07..4a9f1d7b08c3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -655,7 +655,6 @@ extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern void handle_fasteoi_nmi(struct irq_desc *desc);
-extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
@@ -719,10 +718,6 @@ static inline void irq_set_chip_and_handler(unsigned int irq,
}
extern int irq_set_percpu_devid(unsigned int irq);
-extern int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity);
-extern int irq_get_percpu_devid_partition(unsigned int irq,
- struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
deleted file mode 100644
index b35ee22c278f..000000000000
--- a/include/linux/irqchip/irq-partition-percpu.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-
-#include <linux/fwnode.h>
-#include <linux/cpumask_types.h>
-#include <linux/irqdomain.h>
-
-struct partition_affinity {
- cpumask_t mask;
- void *partition_id;
-};
-
-struct partition_desc;
-
-#ifdef CONFIG_PARTITION_PERCPU
-int partition_translate_id(struct partition_desc *desc, void *partition_id);
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops);
-struct irq_domain *partition_get_domain(struct partition_desc *dsc);
-#else
-static inline int partition_translate_id(struct partition_desc *desc,
- void *partition_id)
-{
- return -EINVAL;
-}
-
-static inline
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- return NULL;
-}
-
-static inline
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- return NULL;
-}
-#endif
-
-#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fd091c35d572..37e0b5b5600a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -82,7 +82,6 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 4a86e6b915dd..952d3c8dd6b7 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -44,6 +44,23 @@ struct irq_fwspec {
u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
+/**
+ * struct irq_fwspec_info - firmware provided IRQ information structure
+ *
+ * @flags: Information validity flags
+ * @affinity: Affinity mask for this interrupt
+ *
+ * This structure reports firmware-specific information about an
+ * interrupt. The only significant information is the affinity of a
+ * per-CPU interrupt, but this is designed to be extended as required.
+ */
+struct irq_fwspec_info {
+ unsigned long flags;
+ const struct cpumask *affinity;
+};
+
+#define IRQ_FWSPEC_INFO_AFFINITY_VALID BIT(0)
+
/* Conversion function from of_phandle_args fields to fwspec */
void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
unsigned int count, struct irq_fwspec *fwspec);
@@ -69,6 +86,9 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
* @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
* linux irq type value (@out_type). This is a generalised @xlate
* (over struct irq_fwspec) and is preferred if provided.
+ * @get_fwspec_info:
+ * Given @fwspec, report additional firmware-provided information in
+ * @info. Optional.
* @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
@@ -96,6 +116,7 @@ struct irq_domain_ops {
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *out_hwirq, unsigned int *out_type);
+ int (*get_fwspec_info)(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
void (*debug_show)(struct seq_file *m, struct irq_domain *d,
@@ -602,6 +623,8 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_bas
int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
+
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
@@ -685,6 +708,10 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
return false;
}
+static inline int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#ifdef CONFIG_GENERIC_MSI_IRQ
@@ -703,12 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
-/* Deprecated functions. Will be removed in the merge window */
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
-
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1db8543dfc8a..1c2bc0281807 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -43,6 +43,8 @@ extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
+extern const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
@@ -76,6 +78,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index)
+{
+ return NULL;
+}
static inline int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 93c9a26492fc..bab26a7d79f4 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -132,8 +132,6 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-DECLARE_PER_CPU(struct arm_pmu *, cpu_armpmu);
-
u64 armpmu_event_update(struct perf_event *event);
int armpmu_event_set_period(struct perf_event *event);
@@ -190,8 +188,8 @@ bool arm_pmu_irq_is_nmi(void);
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irq(int irq, int cpu);
-void armpmu_free_irq(int irq, int cpu);
+int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 074754c23d33..ad66333ce85c 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -102,6 +102,8 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev,
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
+extern int platform_get_irq_affinity(struct platform_device *, unsigned int,
+ const struct cpumask **);
extern int platform_irq_count(struct platform_device *);
extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
struct irq_affinity *affd,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cbb7340c5866..cd6be74d87b8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1901,6 +1901,7 @@ extern int sched_setscheduler(struct task_struct *, int, const struct sched_para
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_fifo_secondary(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3ffa0d80ddd1..19e0a87a2663 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -897,8 +897,9 @@ void handle_percpu_irq(struct irq_desc *desc)
void handle_percpu_devid_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int cpu = smp_processor_id();
+ struct irqaction *action;
irqreturn_t res;
/*
@@ -910,12 +911,15 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
+ for (action = desc->action; action; action = action->next)
+ if (cpumask_test_cpu(cpu, action->affinity))
+ break;
+
if (likely(action)) {
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
} else {
- unsigned int cpu = smp_processor_id();
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
if (enabled)
@@ -929,31 +933,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-/**
- * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
- * dev ids
- * @desc: the interrupt description structure for this irq
- *
- * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
- * as a percpu pointer.
- */
-void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irqaction *action = desc->action;
- unsigned int irq = irq_desc_get_irq(desc);
- irqreturn_t res;
-
- __kstat_incr_irqs_this_cpu(desc);
-
- trace_irq_handler_entry(irq, action);
- res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
- trace_irq_handler_exit(irq, action, res);
-
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
-}
-
static void
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
int is_chained, const char *name)
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e103451243a0..786f5570a640 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,7 +133,15 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
*/
atomic_inc(&desc->threads_active);
- wake_up_process(action->thread);
+ /*
+ * This might be a premature wakeup before the thread reached the
+ * thread function and set the IRQTF_READY bit. It's waiting in
+ * kthread code with state UNINTERRUPTIBLE. Once it reaches the
+ * thread function it waits with INTERRUPTIBLE. The wakeup is not
+ * lost in that case because the thread is guaranteed to observe
+ * the RUN flag before it goes to sleep in wait_for_interrupt().
+ */
+ wake_up_state(action->thread, TASK_INTERRUPTIBLE);
}
static DEFINE_STATIC_KEY_FALSE(irqhandler_duration_check_enabled);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index db714d3014b5..6acf268f005b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -879,8 +879,7 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
chip_bus_sync_unlock(desc);
}
-int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity)
+int irq_set_percpu_devid(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -892,31 +891,10 @@ int irq_set_percpu_devid_partition(unsigned int irq,
if (!desc->percpu_enabled)
return -ENOMEM;
- desc->percpu_affinity = affinity ? : cpu_possible_mask;
-
irq_set_percpu_devid_flags(irq);
return 0;
}
-int irq_set_percpu_devid(unsigned int irq)
-{
- return irq_set_percpu_devid_partition(irq, NULL);
-}
-
-int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (!desc || !desc->percpu_enabled)
- return -EINVAL;
-
- if (affinity)
- cpumask_copy(affinity, desc->percpu_affinity);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
-
void kstat_incr_irq_this_cpu(unsigned int irq)
{
kstat_incr_irqs_this_cpu(irq_to_desc(irq));
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index dc473faadcc8..2652c4cfd877 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -867,13 +867,9 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
}
EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec);
-unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+static struct irq_domain *fwspec_to_domain(struct irq_fwspec *fwspec)
{
struct irq_domain *domain;
- struct irq_data *irq_data;
- irq_hw_number_t hwirq;
- unsigned int type = IRQ_TYPE_NONE;
- int virq;
if (fwspec->fwnode) {
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
@@ -883,6 +879,32 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
domain = irq_default_domain;
}
+ return domain;
+}
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ struct irq_domain *domain = fwspec_to_domain(fwspec);
+
+ memset(info, 0, sizeof(*info));
+
+ if (!domain || !domain->ops->get_fwspec_info)
+ return 0;
+
+ return domain->ops->get_fwspec_info(fwspec, info);
+}
+#endif
+
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+{
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_domain *domain;
+ struct irq_data *irq_data;
+ irq_hw_number_t hwirq;
+ int virq;
+
+ domain = fwspec_to_domain(fwspec);
if (!domain) {
pr_warn("no irq domain found for %s !\n",
of_node_full_name(to_of_node(fwspec->fwnode)));
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c94837382037..1615b6476210 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -547,7 +547,7 @@ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *noti
INIT_WORK(¬ify->work, irq_affinity_notify);
}
- scoped_guard(raw_spinlock_irqsave, &desc->lock) {
+ scoped_guard(raw_spinlock_irq, &desc->lock) {
old_notify = desc->affinity_notify;
desc->affinity_notify = notify;
}
@@ -1001,7 +1001,6 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
- bool valid = false;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
@@ -1018,21 +1017,13 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *a
}
scoped_guard(raw_spinlock_irq, &desc->lock) {
- /*
- * This code is triggered unconditionally. Check the affinity
- * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
- */
- if (cpumask_available(desc->irq_common_data.affinity)) {
- const struct cpumask *m;
+ const struct cpumask *m;
- m = irq_data_get_effective_affinity_mask(&desc->irq_data);
- cpumask_copy(mask, m);
- valid = true;
- }
+ m = irq_data_get_effective_affinity_mask(&desc->irq_data);
+ cpumask_copy(mask, m);
}
- if (valid)
- set_cpus_allowed_ptr(current, mask);
+ set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
@@ -1239,7 +1230,10 @@ static int irq_thread(void *data)
irq_thread_set_ready(desc, action);
- sched_set_fifo(current);
+ if (action->handler == irq_forced_secondary_handler)
+ sched_set_fifo_secondary(current);
+ else
+ sched_set_fifo(current);
if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags))
@@ -1405,19 +1399,39 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
* references an already freed task_struct.
*/
new->thread = get_task_struct(t);
+
/*
- * Tell the thread to set its affinity. This is
- * important for shared interrupt handlers as we do
- * not invoke setup_affinity() for the secondary
- * handlers as everything is already set up. Even for
- * interrupts marked with IRQF_NO_BALANCE this is
- * correct as we want the thread to move to the cpu(s)
- * on which the requesting code placed the interrupt.
+ * The affinity can not be established yet, but it will be once the
+ * interrupt is enabled. Delay and defer the actual setting to the
+ * thread itself once it is ready to run. In the meantime, prevent
+ * it from ever being re-affined directly by cpuset or
+ * housekeeping. The proper way to do it is to re-affine the whole
+ * vector.
*/
- set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ kthread_bind_mask(t, cpu_possible_mask);
+
+ /*
+ * Ensure the thread adjusts the affinity once it reaches the
+ * thread function.
+ */
+ new->thread_flags = BIT(IRQTF_AFFINITY);
+
return 0;
}
+static bool valid_percpu_irqaction(struct irqaction *old, struct irqaction *new)
+{
+ do {
+ if (cpumask_intersects(old->affinity, new->affinity) ||
+ old->percpu_dev_id == new->percpu_dev_id)
+ return false;
+
+ old = old->next;
+ } while (old);
+
+ return true;
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@@ -1438,6 +1452,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct irqaction *old, **old_ptr;
unsigned long flags, thread_mask = 0;
int ret, nested, shared = 0;
+ bool per_cpu_devid;
if (!desc)
return -EINVAL;
@@ -1447,6 +1462,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!try_module_get(desc->owner))
return -ENODEV;
+ per_cpu_devid = irq_settings_is_per_cpu_devid(desc);
+
new->irq = irq;
/*
@@ -1554,13 +1571,20 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
unsigned int oldtype;
- if (irq_is_nmi(desc)) {
+ if (irq_is_nmi(desc) && !per_cpu_devid) {
pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
new->name, irq, desc->irq_data.chip->name);
ret = -EINVAL;
goto out_unlock;
}
+ if (per_cpu_devid && !valid_percpu_irqaction(old, new)) {
+ pr_err("Overlapping affinities for %s (irq %d) on irqchip %s.\n",
+ new->name, irq, desc->irq_data.chip->name);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* If nobody did set the configuration before, inherit
* the one provided by the requester.
@@ -1711,7 +1735,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!(new->flags & IRQF_NO_AUTOEN) &&
irq_settings_can_autoenable(desc)) {
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
- } else {
+ } else if (!per_cpu_devid) {
/*
* Shared interrupts do not go well with disabling
* auto enable. The sharing interrupt might request
@@ -2346,7 +2370,7 @@ void disable_percpu_nmi(unsigned int irq)
static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction *action;
+ struct irqaction *action, **action_ptr;
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -2354,21 +2378,33 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
return NULL;
scoped_guard(raw_spinlock_irqsave, &desc->lock) {
- action = desc->action;
- if (!action || action->percpu_dev_id != dev_id) {
- WARN(1, "Trying to free already-free IRQ %d\n", irq);
- return NULL;
+ action_ptr = &desc->action;
+ for (;;) {
+ action = *action_ptr;
+
+ if (!action) {
+ WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ return NULL;
+ }
+
+ if (action->percpu_dev_id == dev_id)
+ break;
+
+ action_ptr = &action->next;
}
- if (!cpumask_empty(desc->percpu_enabled)) {
- WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
- irq, cpumask_first(desc->percpu_enabled));
+ if (cpumask_intersects(desc->percpu_enabled, action->affinity)) {
+ WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq,
+ cpumask_first_and(desc->percpu_enabled, action->affinity));
return NULL;
}
/* Found it - now remove it from the list of entries: */
- desc->action = NULL;
- desc->istate &= ~IRQS_NMI;
+ *action_ptr = action->next;
+
+ /* Demote from NMI if we killed the last action */
+ if (!desc->action)
+ desc->istate &= ~IRQS_NMI;
}
unregister_handler_proc(irq, action);
@@ -2442,17 +2478,49 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
return retval;
}
+static
+struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long flags,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *dev_id)
+{
+ struct irqaction *action;
+
+ if (!affinity)
+ affinity = cpu_possible_mask;
+
+ action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ action->handler = handler;
+ action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
+ action->name = devname;
+ action->percpu_dev_id = dev_id;
+ action->affinity = affinity;
+
+ /*
+ * We allow some form of sharing for non-overlapping affinity
+ * masks. Obviously, covering all CPUs prevents any sharing in
+ * the first place.
+ */
+ if (!cpumask_equal(affinity, cpu_possible_mask))
+ action->flags |= IRQF_SHARED;
+
+ return action;
+}
+
/**
* __request_percpu_irq - allocate a percpu interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @flags: Interrupt type flags (IRQF_TIMER only)
* @devname: An ascii name for the claiming device
+ * @affinity: A cpumask describing the target CPUs for this interrupt
* @dev_id: A percpu cookie passed back to the handler function
*
- * This call allocates interrupt resources and enables the interrupt on the
- * local CPU. If the interrupt is supposed to be enabled on other CPUs, it
- * has to be done on each CPU using enable_percpu_irq().
+ * This call allocates interrupt resources, but doesn't enable the interrupt
+ * on any CPU, as all percpu-devid interrupts are flagged with IRQ_NOAUTOEN.
+ * It has to be done on each CPU using enable_percpu_irq().
*
* @dev_id must be globally unique. It is a per-cpu variable, and
* the handler gets called with the interrupted CPU's instance of
@@ -2460,7 +2528,7 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
*/
int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *dev_id)
+ const cpumask_t *affinity, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -2477,15 +2545,10 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
if (flags && flags != IRQF_TIMER)
return -EINVAL;
- action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ action = create_percpu_irqaction(handler, flags, devname, affinity, dev_id);
if (!action)
return -ENOMEM;
- action->handler = handler;
- action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
- action->name = devname;
- action->percpu_dev_id = dev_id;
-
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0) {
kfree(action);
@@ -2508,6 +2571,7 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* @name: An ascii name for the claiming device
+ * @affinity: A cpumask describing the target CPUs for this interrupt
* @dev_id: A percpu cookie passed back to the handler function
*
* This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
@@ -2524,8 +2588,8 @@ EXPORT_SYMBOL_GPL(__request_percpu_irq);
* If the interrupt line cannot be used to deliver NMIs, function
* will fail returning a negative value.
*/
-int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *name, void __percpu *dev_id)
+int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -2542,20 +2606,16 @@ int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
!irq_supports_nmi(desc))
return -EINVAL;
- /* The line cannot already be NMI */
- if (irq_is_nmi(desc))
+ /* The line cannot be NMI already if the new request covers all CPUs */
+ if (irq_is_nmi(desc) &&
+ (!affinity || cpumask_equal(affinity, cpu_possible_mask)))
return -EINVAL;
- action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+ action = create_percpu_irqaction(handler, IRQF_NO_THREAD | IRQF_NOBALANCING,
+ name, affinity, dev_id);
if (!action)
return -ENOMEM;
- action->handler = handler;
- action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
- | IRQF_NOBALANCING;
- action->name = name;
- action->percpu_dev_id = dev_id;
-
retval = irq_chip_pm_get(&desc->irq_data);
if (retval < 0)
goto err_out;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 29c2404e743b..77258eafbf63 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -48,6 +48,8 @@ static int show_irq_affinity(int type, struct seq_file *m)
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask;
+ guard(raw_spinlock_irq)(&desc->lock);
+
switch (type) {
case AFFINITY:
case AFFINITY_LIST:
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 77ae87f36e84..48347950ac48 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -856,6 +856,19 @@ void sched_set_fifo_low(struct task_struct *p)
}
EXPORT_SYMBOL_GPL(sched_set_fifo_low);
+/*
+ * Used when the primary interrupt handler is forced into a thread, in addition
+ * to the (always threaded) secondary handler. The secondary handler gets a
+ * slightly lower priority so that the primary handler can preempt it, thereby
+ * emulating the behavior of a non-PREEMPT_RT system where the primary handler
+ * runs in hard interrupt context.
+ */
+void sched_set_fifo_secondary(struct task_struct *p)
+{
+ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 - 1 };
+ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
+}
+
void sched_set_normal(struct task_struct *p, int nice)
{
struct sched_attr attr = {
Linus,
please pull the latest timers/clocksource branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-clocksource-2025-11-30
up to: 2437f798809d: Merge tag 'timers-v6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/daniel.lezcano/linux into timers/clocksource
Updates for clocksource and clockevent drivers:
- A new driver for the Realtel system timer
- Prevent the unbinding of timers when the drivers do not support that.
- Expand the timer counter readout for the SPRD driver to 64 bit to allow
IOT devices suspend times of more than 36 hours, which is the current
limit of the 32-bi readout
- The usual small cleanups, fixes and enhancements all over the place.
Thanks,
tglx
------------------>
Enlin Mu (2):
clocksource/drivers/sprd: Enable register for timer counter from 32 bit to 64 bit
clocksource/drivers/rda: Add sched_clock_register for RDA8810PL SoC
Hao-Wen Ting (2):
dt-bindings: timer: Add Realtek SYSTIMER
clocksource/drivers: Add Realtek system timer driver
Haotian Zhang (1):
clocksource/drivers/ralink: Fix resource leaks in init error path
Johan Hovold (6):
clocksource/drivers/stm: Fix double deregistration on probe failure
clocksource/drivers/nxp-stm: Fix section mismatches
clocksource/drivers/arm_arch_timer_mmio: Prevent driver unbind
clocksource/drivers/nxp-pit: Prevent driver unbind
clocksource/drivers/nxp-stm: Prevent driver unbind
clocksource/drivers/stm32-lp: Drop unused module alias
Niklas Söderlund (1):
clocksource/drivers/sh_cmt: Always leave device running after probe
Stephen Eta Zhou (1):
clocksource/drivers/timer-sp804: Fix read_current_timer() issue when clock source is not registered
.../bindings/timer/realtek,rtd1625-systimer.yaml | 47 +++++++
MAINTAINERS | 5 +
drivers/clocksource/Kconfig | 11 ++
drivers/clocksource/Makefile | 1 +
drivers/clocksource/arm_arch_timer_mmio.c | 2 +
drivers/clocksource/sh_cmt.c | 36 +----
drivers/clocksource/timer-nxp-pit.c | 3 +-
drivers/clocksource/timer-nxp-stm.c | 23 ++--
drivers/clocksource/timer-ralink.c | 11 +-
drivers/clocksource/timer-rda.c | 9 +-
drivers/clocksource/timer-realtek.c | 150 +++++++++++++++++++++
drivers/clocksource/timer-sp804.c | 24 ++++
drivers/clocksource/timer-sprd.c | 24 +++-
drivers/clocksource/timer-stm32-lp.c | 1 -
14 files changed, 291 insertions(+), 56 deletions(-)
create mode 100644 Documentation/devicetree/bindings/timer/realtek,rtd1625-systimer.yaml
create mode 100644 drivers/clocksource/timer-realtek.c
diff --git a/Documentation/devicetree/bindings/timer/realtek,rtd1625-systimer.yaml b/Documentation/devicetree/bindings/timer/realtek,rtd1625-systimer.yaml
new file mode 100644
index 000000000000..e08d3d2d306b
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/realtek,rtd1625-systimer.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/realtek,rtd1625-systimer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek System Timer
+
+maintainers:
+ - Hao-Wen Ting <haowen.ting@realtek.com>
+
+description:
+ The Realtek SYSTIMER (System Timer) is a 64-bit global hardware counter operating
+ at a fixed 1MHz frequency. Thanks to its compare match interrupt capability,
+ the timer natively supports oneshot mode for tick broadcast functionality.
+
+properties:
+ compatible:
+ oneOf:
+ - const: realtek,rtd1625-systimer
+ - items:
+ - const: realtek,rtd1635-systimer
+ - const: realtek,rtd1625-systimer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@89420 {
+ compatible = "realtek,rtd1635-systimer",
+ "realtek,rtd1625-systimer";
+ reg = <0x89420 0x18>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 3da2c26a796b..b72b9873b90e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -21669,6 +21669,11 @@ S: Maintained
F: Documentation/devicetree/bindings/spi/realtek,rtl9301-snand.yaml
F: drivers/spi/spi-realtek-rtl-snand.c
+REALTEK SYSTIMER DRIVER
+M: Hao-Wen Ting <haowen.ting@realtek.com>
+S: Maintained
+F: drivers/clocksource/timer-realtek.c
+
REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index ffcd23668763..aa59e5b13351 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -782,4 +782,15 @@ config NXP_STM_TIMER
Enables the support for NXP System Timer Module found in the
s32g NXP platform series.
+config RTK_SYSTIMER
+ bool "Realtek SYSTIMER support"
+ depends on ARM || ARM64
+ depends on ARCH_REALTEK || COMPILE_TEST
+ select TIMER_OF
+ help
+ This option enables the driver that registers the global 1 MHz hardware
+ counter as a clock event device on Realtek SoCs. Make sure to enable
+ this option only when building for a Realtek platform or for compilation
+ testing.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index ec4452ee958f..b46376af6b49 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -95,3 +95,4 @@ obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
+obj-$(CONFIG_RTK_SYSTIMER) += timer-realtek.o
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
index ebe1987d651e..d10362692fdd 100644
--- a/drivers/clocksource/arm_arch_timer_mmio.c
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -426,6 +426,7 @@ static struct platform_driver arch_timer_mmio_drv = {
.driver = {
.name = "arch-timer-mmio",
.of_match_table = arch_timer_mmio_of_table,
+ .suppress_bind_attrs = true,
},
.probe = arch_timer_mmio_probe,
};
@@ -434,6 +435,7 @@ builtin_platform_driver(arch_timer_mmio_drv);
static struct platform_driver arch_timer_mmio_acpi_drv = {
.driver = {
.name = "gtdt-arm-mmio-timer",
+ .suppress_bind_attrs = true,
},
.probe = arch_timer_mmio_probe,
};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 385eb94bbe7c..791b298c995b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -355,14 +355,6 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
- /* enable clock */
- ret = clk_enable(ch->cmt->clk);
- if (ret) {
- dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
- ch->index);
- goto err0;
- }
-
/* make sure channel is disabled */
sh_cmt_start_stop_ch(ch, 0);
@@ -384,19 +376,12 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
if (ret || sh_cmt_read_cmcnt(ch)) {
dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
ch->index);
- ret = -ETIMEDOUT;
- goto err1;
+ return -ETIMEDOUT;
}
/* enable channel */
sh_cmt_start_stop_ch(ch, 1);
return 0;
- err1:
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
- err0:
- return ret;
}
static void sh_cmt_disable(struct sh_cmt_channel *ch)
@@ -407,9 +392,6 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
/* disable interrupts in CMT block */
sh_cmt_write_cmcsr(ch, 0);
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
}
@@ -583,8 +565,6 @@ static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
int ret = 0;
unsigned long flags;
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
-
raw_spin_lock_irqsave(&ch->lock, flags);
if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
@@ -619,8 +599,6 @@ static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
sh_cmt_disable(ch);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
@@ -630,10 +608,8 @@ static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
@@ -656,10 +632,8 @@ static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
ch->flags &= ~FLAG_CLOCKEVENT;
- if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
sh_cmt_disable(ch);
- pm_runtime_put(&ch->cmt->pdev->dev);
- }
/* adjust the timeout to maximum if only clocksource left */
if (ch->flags & FLAG_CLOCKSOURCE)
@@ -1134,8 +1108,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
- clk_disable(cmt->clk);
-
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1183,8 +1155,6 @@ static int sh_cmt_probe(struct platform_device *pdev)
out:
if (cmt->has_clockevent || cmt->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
- else
- pm_runtime_idle(&pdev->dev);
return 0;
}
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
index 2d0a3554b6bf..d1740f18f718 100644
--- a/drivers/clocksource/timer-nxp-pit.c
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -374,9 +374,10 @@ static struct platform_driver nxp_pit_driver = {
.driver = {
.name = "nxp-pit",
.of_match_table = pit_timer_of_match,
+ .suppress_bind_attrs = true,
},
.probe = pit_timer_probe,
};
-module_platform_driver(nxp_pit_driver);
+builtin_platform_driver(nxp_pit_driver);
TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
index bbc40623728f..1ab907233f48 100644
--- a/drivers/clocksource/timer-nxp-stm.c
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -177,15 +177,15 @@ static void nxp_stm_clocksource_resume(struct clocksource *cs)
nxp_stm_clocksource_enable(cs);
}
-static void __init devm_clocksource_unregister(void *data)
+static void devm_clocksource_unregister(void *data)
{
struct stm_timer *stm_timer = data;
clocksource_unregister(&stm_timer->cs);
}
-static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, struct clk *clk)
+static int nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
{
int ret;
@@ -208,10 +208,8 @@ static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer
return ret;
ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
- if (ret) {
- clocksource_unregister(&stm_timer->cs);
+ if (ret)
return ret;
- }
stm_sched_clock = stm_timer;
@@ -298,9 +296,9 @@ static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
nxp_stm_module_get(stm_timer);
}
-static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, int irq,
- struct clk *clk, int cpu)
+static int nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
{
stm_timer->base = base;
stm_timer->rate = clk_get_rate(clk);
@@ -388,7 +386,7 @@ static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+static int nxp_stm_timer_probe(struct platform_device *pdev)
{
struct stm_timer *stm_timer;
struct device *dev = &pdev->dev;
@@ -484,14 +482,15 @@ static const struct of_device_id nxp_stm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
-static struct platform_driver nxp_stm_probe = {
+static struct platform_driver nxp_stm_driver = {
.probe = nxp_stm_timer_probe,
.driver = {
.name = "nxp-stm",
.of_match_table = nxp_stm_of_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(nxp_stm_probe);
+builtin_platform_driver(nxp_stm_driver);
MODULE_DESCRIPTION("NXP System Timer Module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
index 6ecdb4228f76..68434d9ed910 100644
--- a/drivers/clocksource/timer-ralink.c
+++ b/drivers/clocksource/timer-ralink.c
@@ -130,14 +130,15 @@ static int __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%pOFn: request_irq failed", np);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16,
clocksource_mmio_readl_up);
if (ret)
- return ret;
+ goto err_free_irq;
clockevents_register_device(&systick.dev);
@@ -145,6 +146,12 @@ static int __init ralink_systick_init(struct device_node *np)
np, systick.dev.mult, systick.dev.shift);
return 0;
+
+err_free_irq:
+ irq_dispose_mapping(systick.dev.irq);
+err_iounmap:
+ iounmap(systick.membase);
+ return ret;
}
TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-rda.c b/drivers/clocksource/timer-rda.c
index fd1199c189bf..0be8e05970e2 100644
--- a/drivers/clocksource/timer-rda.c
+++ b/drivers/clocksource/timer-rda.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
#include "timer-of.h"
@@ -153,7 +154,7 @@ static struct timer_of rda_ostimer_of = {
},
};
-static u64 rda_hwtimer_read(struct clocksource *cs)
+static u64 rda_hwtimer_clocksource_read(void)
{
void __iomem *base = timer_of_base(&rda_ostimer_of);
u32 lo, hi;
@@ -167,6 +168,11 @@ static u64 rda_hwtimer_read(struct clocksource *cs)
return ((u64)hi << 32) | lo;
}
+static u64 rda_hwtimer_read(struct clocksource *cs)
+{
+ return rda_hwtimer_clocksource_read();
+}
+
static struct clocksource rda_hwtimer_clocksource = {
.name = "rda-timer",
.rating = 400,
@@ -185,6 +191,7 @@ static int __init rda_timer_init(struct device_node *np)
return ret;
clocksource_register_hz(&rda_hwtimer_clocksource, rate);
+ sched_clock_register(rda_hwtimer_clocksource_read, 64, rate);
clockevents_config_and_register(&rda_ostimer_of.clkevt, rate,
0x2, UINT_MAX);
diff --git a/drivers/clocksource/timer-realtek.c b/drivers/clocksource/timer-realtek.c
new file mode 100644
index 000000000000..4f0439de9939
--- /dev/null
+++ b/drivers/clocksource/timer-realtek.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Realtek Semiconductor Corp.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqflags.h>
+#include <linux/interrupt.h>
+#include "timer-of.h"
+
+#define ENBL 1
+#define DSBL 0
+
+#define SYSTIMER_RATE 1000000
+#define SYSTIMER_MIN_DELTA 0x64
+#define SYSTIMER_MAX_DELTA ULONG_MAX
+
+/* SYSTIMER Register Offset (RTK Internal Use) */
+#define TS_LW_OFST 0x0
+#define TS_HW_OFST 0x4
+#define TS_CMP_VAL_LW_OFST 0x8
+#define TS_CMP_VAL_HW_OFST 0xC
+#define TS_CMP_CTRL_OFST 0x10
+#define TS_CMP_STAT_OFST 0x14
+
+/* SYSTIMER CMP CTRL REG Mask */
+#define TS_CMP_EN_MASK 0x1
+#define TS_WR_EN0_MASK 0x2
+
+static void __iomem *systimer_base;
+
+static u64 rtk_ts64_read(void)
+{
+ u32 low, high;
+ u64 ts;
+
+ /* Caution: Read LSB word (TS_LW_OFST) first then MSB (TS_HW_OFST) */
+ low = readl(systimer_base + TS_LW_OFST);
+ high = readl(systimer_base + TS_HW_OFST);
+ ts = ((u64)high << 32) | low;
+
+ return ts;
+}
+
+static void rtk_cmp_value_write(u64 value)
+{
+ u32 high, low;
+
+ low = value & 0xFFFFFFFF;
+ high = value >> 32;
+
+ writel(high, systimer_base + TS_CMP_VAL_HW_OFST);
+ writel(low, systimer_base + TS_CMP_VAL_LW_OFST);
+}
+
+static inline void rtk_cmp_en_write(bool cmp_en)
+{
+ u32 val;
+
+ val = TS_WR_EN0_MASK;
+ if (cmp_en == ENBL)
+ val |= TS_CMP_EN_MASK;
+
+ writel(val, systimer_base + TS_CMP_CTRL_OFST);
+}
+
+static int rtk_syst_clkevt_next_event(unsigned long cycles, struct clock_event_device *clkevt)
+{
+ u64 cmp_val;
+
+ rtk_cmp_en_write(DSBL);
+ cmp_val = rtk_ts64_read();
+
+ /* Set CMP value to current timestamp plus delta_us */
+ rtk_cmp_value_write(cmp_val + cycles);
+ rtk_cmp_en_write(ENBL);
+ return 0;
+}
+
+static irqreturn_t rtk_ts_match_intr_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ void __iomem *reg_base;
+ u32 val;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ val = readl(reg_base) & TS_CMP_EN_MASK;
+ writel(val | TS_CMP_EN_MASK, reg_base);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int rtk_syst_shutdown(struct clock_event_device *clkevt)
+{
+ void __iomem *reg_base;
+ u64 cmp_val = 0;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+ /* Set compare value to 0 */
+ rtk_cmp_value_write(cmp_val);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ writel(TS_CMP_EN_MASK, reg_base);
+ return 0;
+}
+
+static struct timer_of rtk_timer_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "rtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ .features = CLOCK_EVT_FEAT_DYNIRQ |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = rtk_syst_clkevt_next_event,
+ .set_state_oneshot = rtk_syst_shutdown,
+ .set_state_shutdown = rtk_syst_shutdown,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = rtk_ts_match_intr_handler,
+ },
+};
+
+static int __init rtk_systimer_init(struct device_node *node)
+{
+ int ret;
+
+ ret = timer_of_init(node, &rtk_timer_to);
+ if (ret)
+ return ret;
+
+ systimer_base = timer_of_base(&rtk_timer_to);
+ clockevents_config_and_register(&rtk_timer_to.clkevt, SYSTIMER_RATE,
+ SYSTIMER_MIN_DELTA, SYSTIMER_MAX_DELTA);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(rtk_systimer, "realtek,rtd1625-systimer", rtk_systimer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index cd1916c05325..e82a95ea4724 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -21,6 +21,10 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
#include "timer-sp.h"
/* Hisilicon 64-bit timer(a variant of ARM SP804) */
@@ -102,6 +106,23 @@ static u64 notrace sp804_read(void)
return ~readl_relaxed(sched_clkevt->value);
}
+#ifdef CONFIG_ARM
+static struct delay_timer delay;
+static unsigned long sp804_read_delay_timer_read(void)
+{
+ return sp804_read();
+}
+
+static void sp804_register_delay_timer(int freq)
+{
+ delay.freq = freq;
+ delay.read_current_timer = sp804_read_delay_timer_read;
+ register_current_timer_delay(&delay);
+}
+#else
+static inline void sp804_register_delay_timer(int freq) {}
+#endif
+
static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
@@ -114,6 +135,8 @@ static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (rate < 0)
return -EINVAL;
+ sp804_register_delay_timer(rate);
+
clkevt = sp804_clkevt_get(base);
writel(0, clkevt->ctrl);
@@ -318,6 +341,7 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
if (ret)
goto err;
}
+
initialized = true;
return 0;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index 430cb99d8d79..2c07dd2af760 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -30,6 +30,7 @@
#define TIMER_VALUE_SHDW_HI 0x1c
#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+#define TIMER_VALUE_HI_MASK GENMASK(31, 0)
static void sprd_timer_enable(void __iomem *base, u32 flag)
{
@@ -162,15 +163,26 @@ static struct timer_of suspend_to = {
static u64 sprd_suspend_timer_read(struct clocksource *cs)
{
- return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
- TIMER_VALUE_SHDW_LO) & cs->mask;
+ u32 lo, hi;
+
+ do {
+ hi = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_HI);
+ lo = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO);
+ } while (hi != readl_relaxed(timer_of_base(&suspend_to) + TIMER_VALUE_SHDW_HI));
+
+ return ~(((u64)hi << 32) | lo);
}
static int sprd_suspend_timer_enable(struct clocksource *cs)
{
- sprd_timer_update_counter(timer_of_base(&suspend_to),
- TIMER_VALUE_LO_MASK);
- sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+ writel_relaxed(TIMER_VALUE_LO_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_LO);
+ writel_relaxed(TIMER_VALUE_HI_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_HI);
+ sprd_timer_enable(timer_of_base(&suspend_to),
+ TIMER_CTL_PERIOD_MODE|TIMER_CTL_64BIT_WIDTH);
return 0;
}
@@ -186,7 +198,7 @@ static struct clocksource suspend_clocksource = {
.read = sprd_suspend_timer_read,
.enable = sprd_suspend_timer_enable,
.disable = sprd_suspend_timer_disable,
- .mask = CLOCKSOURCE_MASK(32),
+ .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index c2a699f5c1dd..3d804128c765 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -289,5 +289,4 @@ static struct platform_driver stm32_clkevent_lp_driver = {
};
module_platform_driver(stm32_clkevent_lp_driver);
-MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
Linus,
please pull the latest timers/core branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-core-2025-11-30
up to: 7dec062cfcf2: timers/migration: Exclude isolated cpus from hierarchy
Update to the time/timers core:
- Prevent a thundering herd problem when the timekeeper CPU is delayed
and a large number of CPUs compete to acquire jiffies_lock to do the
update. Limit it to one CPU with a separate "uncontended" atomic
variable.
- A set of improvements for the timer migration mechanism:
- Support imbalanced NUMA trees correctly
- Support dynamic exclusion of CPUs from the migrator duty to allow the
cpuset/isolation mechanism to exclude them from handling timers of
remote idle CPUs.
- The usual small updates, cleanups and enhancements
Thanks,
tglx
------------------>
Frederic Weisbecker (6):
timers/migration: Convert "while" loops to use "for"
timers/migration: Remove locking on group connection
timers/migration: Fix imbalanced NUMA trees
timers/migration: Assert that hotplug preparing CPU is part of stable active hierarchy
timers/migration: Remove unused "cpu" parameter from tmigr_get_group()
timers/migration: Remove dead code handling idle CPU checking for remote timers
Gabriele Monaco (6):
timers/migration: Rename 'online' bit to 'available'
timers/migration: Add mask for CPUs available in the hierarchy
timers/migration: Use scoped_guard on available flag set/clear
cgroup/cpuset: Rename update_unbound_workqueue_cpumask() to update_isolation_cpumasks()
sched/isolation: Force housekeeping if isolcpus and nohz_full don't leave any
timers/migration: Exclude isolated cpus from hierarchy
Jianyun Gao (1):
time: Fix a few typos in time[r] related code comments
Steve Wahl (1):
tick/sched: Limit non-timekeeper CPUs calling jiffies update
Sunday Adelodun (1):
time: tick-oneshot: Add missing Return and parameter descriptions to kernel-doc
Thomas Weißschuh (2):
hrtimer: Store time as ktime_t in restart block
selftests/timers/nanosleep: Add tests for return of remaining time
Wake Liu (1):
selftests/timers: Clean up kernel version check in posix_timers
Yury Norov (1):
cpumask: Add initialiser to use cleanup helpers
include/linux/cpumask.h | 2 +
include/linux/delay.h | 8 +-
include/linux/restart_block.h | 2 +-
include/linux/timer.h | 9 +
include/trace/events/timer_migration.h | 4 +-
kernel/cgroup/cpuset.c | 15 +-
kernel/sched/isolation.c | 23 ++
kernel/time/hrtimer.c | 4 +-
kernel/time/posix-cpu-timers.c | 4 +-
kernel/time/posix-timers.c | 2 +-
kernel/time/tick-oneshot.c | 20 +-
kernel/time/tick-sched.c | 30 +-
kernel/time/timer_migration.c | 487 +++++++++++++++++---------
kernel/time/timer_migration.h | 2 +-
tools/testing/selftests/timers/nanosleep.c | 55 +++
tools/testing/selftests/timers/posix_timers.c | 32 +-
16 files changed, 503 insertions(+), 196 deletions(-)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index ff8f41ab7ce6..68be522449ec 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1005,6 +1005,7 @@ static __always_inline unsigned int cpumask_size(void)
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
#define __cpumask_var_read_mostly __read_mostly
+#define CPUMASK_VAR_NULL NULL
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
@@ -1051,6 +1052,7 @@ static __always_inline bool cpumask_available(cpumask_var_t mask)
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
+#define CPUMASK_VAR_NULL {}
static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 89866bab100d..46412c00033a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -68,7 +68,7 @@ void usleep_range_state(unsigned long min, unsigned long max,
* @min: Minimum time in microseconds to sleep
* @max: Maximum time in microseconds to sleep
*
- * For basic information please refere to usleep_range_state().
+ * For basic information please refer to usleep_range_state().
*
* The task will be in the state TASK_UNINTERRUPTIBLE during the sleep.
*/
@@ -82,10 +82,10 @@ static inline void usleep_range(unsigned long min, unsigned long max)
* @min: Minimum time in microseconds to sleep
* @max: Maximum time in microseconds to sleep
*
- * For basic information please refere to usleep_range_state().
+ * For basic information please refer to usleep_range_state().
*
* The sleeping task has the state TASK_IDLE during the sleep to prevent
- * contribution to the load avarage.
+ * contribution to the load average.
*/
static inline void usleep_range_idle(unsigned long min, unsigned long max)
{
@@ -96,7 +96,7 @@ static inline void usleep_range_idle(unsigned long min, unsigned long max)
* ssleep - wrapper for seconds around msleep
* @seconds: Requested sleep duration in seconds
*
- * Please refere to msleep() for detailed information.
+ * Please refer to msleep() for detailed information.
*/
static inline void ssleep(unsigned int seconds)
{
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 7e50bbc94e47..36ddfa1ec301 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -43,7 +43,7 @@ struct restart_block {
struct __kernel_timespec __user *rmtp;
struct old_timespec32 __user *compat_rmtp;
};
- u64 expires;
+ ktime_t expires;
} nanosleep;
/* For poll */
struct {
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 0414d9e6b4fc..62e1cea71125 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -188,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask);
+#else
+static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
index 47db5eaf2f9a..61171b13c687 100644
--- a/include/trace/events/timer_migration.h
+++ b/include/trace/events/timer_migration.h
@@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
TP_PROTO(struct tmigr_cpu *tmc),
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
TP_PROTO(struct tmigr_cpu *tmc),
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 27adb04df675..bfc3b319e1c0 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1339,7 +1339,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+static void update_isolation_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1350,6 +1350,9 @@ static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
+ WARN_ON_ONCE(ret < 0);
}
/**
@@ -1470,7 +1473,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
list_add(&cs->remote_sibling, &remote_children);
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1511,7 +1514,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_effective_exclusive_cpumask(cs, NULL, NULL);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
/*
@@ -1580,7 +1583,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
if (adding || deleting)
cpuset_force_rebuild();
@@ -1943,7 +1946,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
WARN_ON_ONCE(parent->nr_subparts < 0);
}
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2968,7 +2971,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index a4cf17b1fab0..3ad0d6df6a0a 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -167,6 +167,29 @@ static int __init housekeeping_setup(char *str, unsigned long flags)
}
}
+ /*
+ * Check the combination of nohz_full and isolcpus=domain,
+ * necessary to avoid problems with the timer migration
+ * hierarchy. managed_irq is ignored by this check since it
+ * isn't considered in the timer migration logic.
+ */
+ iter_flags = housekeeping.flags & (HK_FLAG_KERNEL_NOISE | HK_FLAG_DOMAIN);
+ type = find_first_bit(&iter_flags, HK_TYPE_MAX);
+ /*
+ * Pass the check if none of these flags were previously set or
+ * are not in the current selection.
+ */
+ iter_flags = flags & (HK_FLAG_KERNEL_NOISE | HK_FLAG_DOMAIN);
+ first_cpu = (type == HK_TYPE_MAX || !iter_flags) ? 0 :
+ cpumask_first_and_and(cpu_present_mask,
+ housekeeping_staging, housekeeping.cpumasks[type]);
+ if (first_cpu >= min(nr_cpu_ids, setup_max_cpus)) {
+ pr_warn("Housekeeping: must include one present CPU "
+ "neither in nohz_full= nor in isolcpus=domain, "
+ "ignoring setting %s\n", str);
+ goto free_housekeeping_staging;
+ }
+
iter_flags = flags & ~housekeeping.flags;
for_each_set_bit(type, &iter_flags, HK_TYPE_MAX)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 7e7b2b471bae..9c77e5c72556 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -2145,7 +2145,7 @@ static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
int ret;
hrtimer_setup_sleeper_on_stack(&t, restart->nanosleep.clockid, HRTIMER_MODE_ABS);
- hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+ hrtimer_set_expires(&t.timer, restart->nanosleep.expires);
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
@@ -2172,7 +2172,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
restart = ¤t->restart_block;
restart->nanosleep.clockid = t.timer.base->clockid;
- restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
+ restart->nanosleep.expires = hrtimer_get_expires(&t.timer);
set_restart_fn(restart, hrtimer_nanosleep_restart);
out:
destroy_hrtimer_on_stack(&t.timer);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2e5b89d7d866..0de2bb7cbec0 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1557,7 +1557,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
* Report back to the user the time still remaining.
*/
restart = ¤t->restart_block;
- restart->nanosleep.expires = expires;
+ restart->nanosleep.expires = ns_to_ktime(expires);
if (restart->nanosleep.type != TT_NONE)
error = nanosleep_copyout(restart, &it.it_value);
}
@@ -1599,7 +1599,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
clockid_t which_clock = restart_block->nanosleep.clockid;
struct timespec64 t;
- t = ns_to_timespec64(restart_block->nanosleep.expires);
+ t = ktime_to_timespec64(restart_block->nanosleep.expires);
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index aa3120104a51..36dbb8146517 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -1242,7 +1242,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
* sys_clock_settime(). The kernel internal timekeeping is always using
* nanoseconds precision independent of the clocksource device which is
* used to read the time from. The resolution of that device only
- * affects the presicion of the time returned by sys_clock_gettime().
+ * affects the precision of the time returned by sys_clock_gettime().
*
* Returns:
* 0 Success. @tp contains the resolution
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 5e2c2c26b3cc..ffee943d796d 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -19,6 +19,10 @@
/**
* tick_program_event - program the CPU local timer device for the next event
+ * @expires: the time at which the next timer event should occur
+ * @force: flag to force reprograming even if the event time hasn't changed
+ *
+ * Return: 0 on success, negative error code on failure
*/
int tick_program_event(ktime_t expires, int force)
{
@@ -57,6 +61,13 @@ void tick_resume_oneshot(void)
/**
* tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz)
+ * @newdev: Pointer to the clock event device to configure
+ * @handler: Function to be called when the event device triggers an interrupt
+ * @next_event: Initial expiry time for the next event (in ktime)
+ *
+ * Configures the specified clock event device for onshot mode,
+ * assigns the given handler as its event callback, and programs
+ * the device to trigger at the specified next event time.
*/
void tick_setup_oneshot(struct clock_event_device *newdev,
void (*handler)(struct clock_event_device *),
@@ -69,6 +80,10 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
/**
* tick_switch_to_oneshot - switch to oneshot mode
+ * @handler: function to call when an event occurs on the tick device
+ *
+ * Return: 0 on success, -EINVAL if the tick device is not present,
+ * not functional, or does not support oneshot mode.
*/
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
{
@@ -101,7 +116,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
/**
* tick_oneshot_mode_active - check whether the system is in oneshot mode
*
- * returns 1 when either nohz or highres are enabled. otherwise 0.
+ * Return: 1 when either nohz or highres are enabled, otherwise 0.
*/
int tick_oneshot_mode_active(void)
{
@@ -120,6 +135,9 @@ int tick_oneshot_mode_active(void)
* tick_init_highres - switch to high resolution mode
*
* Called with interrupts disabled.
+ *
+ * Return: 0 on success, -EINVAL if the tick device cannot switch
+ * to oneshot/high-resolution mode.
*/
int tick_init_highres(void)
{
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c527b421c865..3ff3eb1f90d0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -201,6 +201,27 @@ static inline void tick_sched_flag_clear(struct tick_sched *ts,
ts->flags &= ~flag;
}
+/*
+ * Allow only one non-timekeeper CPU at a time update jiffies from
+ * the timer tick.
+ *
+ * Returns true if update was run.
+ */
+static bool tick_limited_update_jiffies64(struct tick_sched *ts, ktime_t now)
+{
+ static atomic_t in_progress;
+ int inp;
+
+ inp = atomic_read(&in_progress);
+ if (inp || !atomic_try_cmpxchg(&in_progress, &inp, 1))
+ return false;
+
+ if (ts->last_tick_jiffies == jiffies)
+ tick_do_update_jiffies64(now);
+ atomic_set(&in_progress, 0);
+ return true;
+}
+
#define MAX_STALLED_JIFFIES 5
static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
@@ -239,10 +260,11 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
ts->stalled_jiffies = 0;
ts->last_tick_jiffies = READ_ONCE(jiffies);
} else {
- if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
- tick_do_update_jiffies64(now);
- ts->stalled_jiffies = 0;
- ts->last_tick_jiffies = READ_ONCE(jiffies);
+ if (++ts->stalled_jiffies >= MAX_STALLED_JIFFIES) {
+ if (tick_limited_update_jiffies64(ts, now)) {
+ ts->stalled_jiffies = 0;
+ ts->last_tick_jiffies = READ_ONCE(jiffies);
+ }
}
}
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index c0c54dc5314c..18dda1aa782d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -420,14 +421,54 @@ static struct list_head *tmigr_level_list __read_mostly;
static unsigned int tmigr_hierarchy_levels __read_mostly;
static unsigned int tmigr_crossnode_level __read_mostly;
+static struct tmigr_group *tmigr_root;
+
static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
+/*
+ * CPUs available for timer migration.
+ * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
+ * Additionally tmigr_available_mutex serializes set/clear operations with each other.
+ */
+static cpumask_var_t tmigr_available_cpumask;
+static DEFINE_MUTEX(tmigr_available_mutex);
+
+/* Enabled during late initcall */
+static DEFINE_STATIC_KEY_FALSE(tmigr_exclude_isolated);
+
#define TMIGR_NONE 0xFF
#define BIT_CNT 8
static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
{
- return !(tmc->tmgroup && tmc->online);
+ return !(tmc->tmgroup && tmc->available);
+}
+
+/*
+ * Returns true if @cpu should be excluded from the hierarchy as isolated.
+ * Domain isolated CPUs don't participate in timer migration, nohz_full CPUs
+ * are still part of the hierarchy but become idle (from a tick and timer
+ * migration perspective) when they stop their tick. This lets the timekeeping
+ * CPU handle their global timers. Marking also isolated CPUs as idle would be
+ * too costly, hence they are completely excluded from the hierarchy.
+ * This check is necessary, for instance, to prevent offline isolated CPUs from
+ * being incorrectly marked as available once getting back online.
+ *
+ * This function returns false during early boot and the isolation logic is
+ * enabled only after isolated CPUs are marked as unavailable at late boot.
+ * The tick CPU can be isolated at boot, however we cannot mark it as
+ * unavailable to avoid having no global migrator for the nohz_full CPUs. This
+ * should be ensured by the callers of this function: implicitly from hotplug
+ * callbacks and explicitly in tmigr_init_isolation() and
+ * tmigr_isolated_exclude_cpumask().
+ */
+static inline bool tmigr_is_isolated(int cpu)
+{
+ if (!static_branch_unlikely(&tmigr_exclude_isolated))
+ return false;
+ return (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN) ||
+ cpuset_cpu_is_isolated(cpu)) &&
+ housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE);
}
/*
@@ -502,11 +543,6 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
* @now: timer base monotonic
* @check: is set if there is the need to handle remote timers;
* required in tmigr_requires_handle_remote() only
- * @tmc_active: this flag indicates, whether the CPU which triggers
- * the hierarchy walk is !idle in the timer migration
- * hierarchy. When the CPU is idle and the whole hierarchy is
- * idle, only the first event of the top level has to be
- * considered.
*/
struct tmigr_walk {
u64 nextexp;
@@ -517,16 +553,13 @@ struct tmigr_walk {
unsigned long basej;
u64 now;
bool check;
- bool tmc_active;
};
typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
-static void __walk_groups(up_f up, struct tmigr_walk *data,
- struct tmigr_cpu *tmc)
+static void __walk_groups_from(up_f up, struct tmigr_walk *data,
+ struct tmigr_group *child, struct tmigr_group *group)
{
- struct tmigr_group *child = NULL, *group = tmc->tmgroup;
-
do {
WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
@@ -544,6 +577,12 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
} while (group);
}
+static void __walk_groups(up_f up, struct tmigr_walk *data,
+ struct tmigr_cpu *tmc)
+{
+ __walk_groups_from(up, data, NULL, tmc->tmgroup);
+}
+
static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
@@ -708,7 +747,7 @@ void tmigr_cpu_activate(void)
/*
* Returns true, if there is nothing to be propagated to the next level
*
- * @data->firstexp is set to expiry of first gobal event of the (top level of
+ * @data->firstexp is set to expiry of first global event of the (top level of
* the) hierarchy, but only when hierarchy is completely idle.
*
* The child and group states need to be read under the lock, to prevent a race
@@ -926,7 +965,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* updated the event takes care when hierarchy is completely
* idle. Otherwise the migrator does it as the event is enqueued.
*/
- if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
+ if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
now < tmc->cpuevt.nextevt.expires) {
raw_spin_unlock_irq(&tmc->lock);
return;
@@ -973,7 +1012,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
* (See also section "Required event and timerqueue update after a
* remote expiry" in the documentation at the top)
*/
- if (!tmc->online || !tmc->idle) {
+ if (!tmc->available || !tmc->idle) {
timer_unlock_remote_bases(cpu);
goto unlock;
}
@@ -1113,15 +1152,6 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
*/
if (!tmigr_check_migrator(group, childmask))
return true;
-
- /*
- * When there is a parent group and the CPU which triggered the
- * hierarchy walk is not active, proceed the walk to reach the top level
- * group before reading the next_expiry value.
- */
- if (group->parent && !data->tmc_active)
- return false;
-
/*
* The lock is required on 32bit architectures to read the variable
* consistently with a concurrent writer. On 64bit the lock is not
@@ -1166,7 +1196,6 @@ bool tmigr_requires_handle_remote(void)
data.now = get_jiffies_update(&jif);
data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX;
- data.tmc_active = !tmc->idle;
data.check = false;
/*
@@ -1432,38 +1461,43 @@ static long tmigr_trigger_active(void *unused)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
- WARN_ON_ONCE(!tmc->online || tmc->idle);
+ WARN_ON_ONCE(!tmc->available || tmc->idle);
return 0;
}
-static int tmigr_cpu_offline(unsigned int cpu)
+static int tmigr_clear_cpu_available(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
int migrator;
u64 firstexp;
- raw_spin_lock_irq(&tmc->lock);
- tmc->online = false;
- WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+ guard(mutex)(&tmigr_available_mutex);
- /*
- * CPU has to handle the local events on his own, when on the way to
- * offline; Therefore nextevt value is set to KTIME_MAX
- */
- firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
- trace_tmigr_cpu_offline(tmc);
- raw_spin_unlock_irq(&tmc->lock);
+ cpumask_clear_cpu(cpu, tmigr_available_cpumask);
+ scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (!tmc->available)
+ return 0;
+ tmc->available = false;
+ WRITE_ONCE(tmc->wakeup, KTIME_MAX);
+
+ /*
+ * CPU has to handle the local events on his own, when on the way to
+ * offline; Therefore nextevt value is set to KTIME_MAX
+ */
+ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
+ trace_tmigr_cpu_unavailable(tmc);
+ }
if (firstexp != KTIME_MAX) {
- migrator = cpumask_any_but(cpu_online_mask, cpu);
+ migrator = cpumask_any(tmigr_available_cpumask);
work_on_cpu(migrator, tmigr_trigger_active, NULL);
}
return 0;
}
-static int tmigr_cpu_online(unsigned int cpu)
+static int tmigr_set_cpu_available(unsigned int cpu)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
@@ -1471,16 +1505,123 @@ static int tmigr_cpu_online(unsigned int cpu)
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
- raw_spin_lock_irq(&tmc->lock);
- trace_tmigr_cpu_online(tmc);
- tmc->idle = timer_base_is_idle();
- if (!tmc->idle)
- __tmigr_cpu_activate(tmc);
- tmc->online = true;
- raw_spin_unlock_irq(&tmc->lock);
+ if (tmigr_is_isolated(cpu))
+ return 0;
+
+ guard(mutex)(&tmigr_available_mutex);
+
+ cpumask_set_cpu(cpu, tmigr_available_cpumask);
+ scoped_guard(raw_spinlock_irq, &tmc->lock) {
+ if (tmc->available)
+ return 0;
+ trace_tmigr_cpu_available(tmc);
+ tmc->idle = timer_base_is_idle();
+ if (!tmc->idle)
+ __tmigr_cpu_activate(tmc);
+ tmc->available = true;
+ }
return 0;
}
+static void tmigr_cpu_isolate(struct work_struct *ignored)
+{
+ tmigr_clear_cpu_available(smp_processor_id());
+}
+
+static void tmigr_cpu_unisolate(struct work_struct *ignored)
+{
+ tmigr_set_cpu_available(smp_processor_id());
+}
+
+/**
+ * tmigr_isolated_exclude_cpumask - Exclude given CPUs from hierarchy
+ * @exclude_cpumask: the cpumask to be excluded from timer migration hierarchy
+ *
+ * This function can be called from cpuset code to provide the new set of
+ * isolated CPUs that should be excluded from the hierarchy.
+ * Online CPUs not present in exclude_cpumask but already excluded are brought
+ * back to the hierarchy.
+ * Functions to isolate/unisolate need to be called locally and can sleep.
+ */
+int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ struct work_struct __percpu *works __free(free_percpu) =
+ alloc_percpu(struct work_struct);
+ cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ if (!works)
+ return -ENOMEM;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * First set previously isolated CPUs as available (unisolate).
+ * This cpumask contains only CPUs that switched to available now.
+ */
+ cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
+ cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
+
+ for_each_cpu(cpu, cpumask) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_unisolate);
+ schedule_work_on(cpu, work);
+ }
+ for_each_cpu(cpu, cpumask)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ /*
+ * Then clear previously available CPUs (isolate).
+ * This cpumask contains only CPUs that switched to not available now.
+ * There cannot be overlap with the newly available ones.
+ */
+ cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
+ cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE));
+ /*
+ * Handle this here and not in the cpuset code because exclude_cpumask
+ * might include also the tick CPU if included in isolcpus.
+ */
+ for_each_cpu(cpu, cpumask) {
+ if (!tick_nohz_cpu_hotpluggable(cpu)) {
+ cpumask_clear_cpu(cpu, cpumask);
+ break;
+ }
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, tmigr_cpu_isolate);
+ schedule_work_on(cpu, work);
+ }
+ for_each_cpu(cpu, cpumask)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ return 0;
+}
+
+static int __init tmigr_init_isolation(void)
+{
+ cpumask_var_t cpumask __free(free_cpumask_var) = CPUMASK_VAR_NULL;
+
+ static_branch_enable(&tmigr_exclude_isolated);
+
+ if (!housekeeping_enabled(HK_TYPE_DOMAIN))
+ return 0;
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_andnot(cpumask, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+
+ /* Protect against RCU torture hotplug testing */
+ guard(cpus_read_lock)();
+ return tmigr_isolated_exclude_cpumask(cpumask);
+}
+late_initcall(tmigr_init_isolation);
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
@@ -1498,21 +1639,6 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
s.seq = 0;
atomic_set(&group->migr_state, s.state);
- /*
- * If this is a new top-level, prepare its groupmask in advance.
- * This avoids accidents where yet another new top-level is
- * created in the future and made visible before the current groupmask.
- */
- if (list_empty(&tmigr_level_list[lvl])) {
- group->groupmask = BIT(0);
- /*
- * The previous top level has prepared its groupmask already,
- * simply account it as the first child.
- */
- if (lvl > 0)
- group->num_children = 1;
- }
-
timerqueue_init_head(&group->events);
timerqueue_init(&group->groupevt.nextevt);
group->groupevt.nextevt.expires = KTIME_MAX;
@@ -1520,8 +1646,7 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
group->groupevt.ignore = true;
}
-static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
- unsigned int lvl)
+static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl)
{
struct tmigr_group *tmp, *group = NULL;
@@ -1567,25 +1692,51 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
return group;
}
+static bool tmigr_init_root(struct tmigr_group *group, bool activate)
+{
+ if (!group->parent && group != tmigr_root) {
+ /*
+ * This is the new top-level, prepare its groupmask in advance
+ * to avoid accidents where yet another new top-level is
+ * created in the future and made visible before this groupmask.
+ */
+ group->groupmask = BIT(0);
+ WARN_ON_ONCE(activate);
+
+ return true;
+ }
+
+ return false;
+
+}
+
static void tmigr_connect_child_parent(struct tmigr_group *child,
struct tmigr_group *parent,
bool activate)
{
- struct tmigr_walk data;
-
- raw_spin_lock_irq(&child->lock);
- raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
+ if (tmigr_init_root(parent, activate)) {
+ /*
+ * The previous top level had prepared its groupmask already,
+ * simply account it in advance as the first child. If some groups
+ * have been created between the old and new root due to node
+ * mismatch, the new root's child will be intialized accordingly.
+ */
+ parent->num_children = 1;
+ }
- if (activate) {
+ /* Connecting old root to new root ? */
+ if (!parent->parent && activate) {
/*
- * @child is the old top and @parent the new one. In this
- * case groupmask is pre-initialized and @child already
- * accounted, along with its new sibling corresponding to the
- * CPU going up.
+ * @child is the old top, or in case of node mismatch, some
+ * intermediate group between the old top and the new one in
+ * @parent. In this case the @child must be pre-accounted above
+ * as the first child. Its new inactive sibling corresponding
+ * to the CPU going up has been accounted as the second child.
*/
- WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2);
+ WARN_ON_ONCE(parent->num_children != 2);
+ child->groupmask = BIT(0);
} else {
- /* Adding @child for the CPU going up to @parent. */
+ /* Common case adding @child for the CPU going up to @parent. */
child->groupmask = BIT(parent->num_children++);
}
@@ -1596,87 +1747,61 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
*/
smp_store_release(&child->parent, parent);
- raw_spin_unlock(&parent->lock);
- raw_spin_unlock_irq(&child->lock);
-
trace_tmigr_connect_child_parent(child);
-
- if (!activate)
- return;
-
- /*
- * To prevent inconsistent states, active children need to be active in
- * the new parent as well. Inactive children are already marked inactive
- * in the parent group:
- *
- * * When new groups were created by tmigr_setup_groups() starting from
- * the lowest level (and not higher then one level below the current
- * top level), then they are not active. They will be set active when
- * the new online CPU comes active.
- *
- * * But if a new group above the current top level is required, it is
- * mandatory to propagate the active state of the already existing
- * child to the new parent. So tmigr_connect_child_parent() is
- * executed with the formerly top level group (child) and the newly
- * created group (parent).
- *
- * * It is ensured that the child is active, as this setup path is
- * executed in hotplug prepare callback. This is exectued by an
- * already connected and !idle CPU. Even if all other CPUs go idle,
- * the CPU executing the setup will be responsible up to current top
- * level group. And the next time it goes inactive, it will release
- * the new childmask and parent to subsequent walkers through this
- * @child. Therefore propagate active state unconditionally.
- */
- data.childmask = child->groupmask;
-
- /*
- * There is only one new level per time (which is protected by
- * tmigr_mutex). When connecting the child and the parent and set the
- * child active when the parent is inactive, the parent needs to be the
- * uppermost level. Otherwise there went something wrong!
- */
- WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
}
-static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
+static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
+ struct tmigr_group *start, bool activate)
{
struct tmigr_group *group, *child, **stack;
- int top = 0, err = 0, i = 0;
- struct list_head *lvllist;
+ int i, top = 0, err = 0, start_lvl = 0;
+ bool root_mismatch = false;
stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL);
if (!stack)
return -ENOMEM;
- do {
- group = tmigr_get_group(cpu, node, i);
+ if (start) {
+ stack[start->level] = start;
+ start_lvl = start->level + 1;
+ }
+
+ if (tmigr_root)
+ root_mismatch = tmigr_root->numa_node != node;
+
+ for (i = start_lvl; i < tmigr_hierarchy_levels; i++) {
+ group = tmigr_get_group(node, i);
if (IS_ERR(group)) {
err = PTR_ERR(group);
+ i--;
break;
}
top = i;
- stack[i++] = group;
+ stack[i] = group;
/*
* When booting only less CPUs of a system than CPUs are
- * available, not all calculated hierarchy levels are required.
+ * available, not all calculated hierarchy levels are required,
+ * unless a node mismatch is detected.
*
* The loop is aborted as soon as the highest level, which might
* be different from tmigr_hierarchy_levels, contains only a
- * single group.
+ * single group, unless the nodes mismatch below tmigr_crossnode_level
*/
- if (group->parent || list_is_singular(&tmigr_level_list[i - 1]))
+ if (group->parent)
break;
+ if ((!root_mismatch || i >= tmigr_crossnode_level) &&
+ list_is_singular(&tmigr_level_list[i]))
+ break;
+ }
- } while (i < tmigr_hierarchy_levels);
-
- /* Assert single root */
- WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_list[top]));
+ /* Assert single root without parent */
+ if (WARN_ON_ONCE(i >= tmigr_hierarchy_levels))
+ return -EINVAL;
- while (i > 0) {
- group = stack[--i];
+ for (; i >= start_lvl; i--) {
+ group = stack[i];
if (err < 0) {
list_del(&group->list);
@@ -1692,12 +1817,10 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
if (i == 0) {
struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
- raw_spin_lock_irq(&group->lock);
-
tmc->tmgroup = group;
tmc->groupmask = BIT(group->num_children++);
- raw_spin_unlock_irq(&group->lock);
+ tmigr_init_root(group, activate);
trace_tmigr_connect_cpu_parent(tmc);
@@ -1705,42 +1828,58 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
continue;
} else {
child = stack[i - 1];
- /* Will be activated at online time */
- tmigr_connect_child_parent(child, group, false);
+ tmigr_connect_child_parent(child, group, activate);
}
+ }
- /* check if uppermost level was newly created */
- if (top != i)
- continue;
-
- WARN_ON_ONCE(top == 0);
+ if (err < 0)
+ goto out;
- lvllist = &tmigr_level_list[top];
+ if (activate) {
+ struct tmigr_walk data;
+ union tmigr_state state;
/*
- * Newly created root level should have accounted the upcoming
- * CPU's child group and pre-accounted the old root.
+ * To prevent inconsistent states, active children need to be active in
+ * the new parent as well. Inactive children are already marked inactive
+ * in the parent group:
+ *
+ * * When new groups were created by tmigr_setup_groups() starting from
+ * the lowest level, then they are not active. They will be set active
+ * when the new online CPU comes active.
+ *
+ * * But if new groups above the current top level are required, it is
+ * mandatory to propagate the active state of the already existing
+ * child to the new parents. So tmigr_active_up() activates the
+ * new parents while walking up from the old root to the new.
+ *
+ * * It is ensured that @start is active, as this setup path is
+ * executed in hotplug prepare callback. This is executed by an
+ * already connected and !idle CPU. Even if all other CPUs go idle,
+ * the CPU executing the setup will be responsible up to current top
+ * level group. And the next time it goes inactive, it will release
+ * the new childmask and parent to subsequent walkers through this
+ * @child. Therefore propagate active state unconditionally.
*/
- if (group->num_children == 2 && list_is_singular(lvllist)) {
- /*
- * The target CPU must never do the prepare work, except
- * on early boot when the boot CPU is the target. Otherwise
- * it may spuriously activate the old top level group inside
- * the new one (nevertheless whether old top level group is
- * active or not) and/or release an uninitialized childmask.
- */
- WARN_ON_ONCE(cpu == raw_smp_processor_id());
-
- lvllist = &tmigr_level_list[top - 1];
- list_for_each_entry(child, lvllist, list) {
- if (child->parent)
- continue;
+ state.state = atomic_read(&start->migr_state);
+ WARN_ON_ONCE(!state.active);
+ WARN_ON_ONCE(!start->parent);
+ data.childmask = start->groupmask;
+ __walk_groups_from(tmigr_active_up, &data, start, start->parent);
+ }
- tmigr_connect_child_parent(child, group, true);
- }
+ /* Root update */
+ if (list_is_singular(&tmigr_level_list[top])) {
+ group = list_first_entry(&tmigr_level_list[top],
+ typeof(*group), list);
+ WARN_ON_ONCE(group->parent);
+ if (tmigr_root) {
+ /* Old root should be the same or below */
+ WARN_ON_ONCE(tmigr_root->level > top);
}
+ tmigr_root = group;
}
-
+out:
kfree(stack);
return err;
@@ -1748,12 +1887,31 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
static int tmigr_add_cpu(unsigned int cpu)
{
+ struct tmigr_group *old_root = tmigr_root;
int node = cpu_to_node(cpu);
int ret;
- mutex_lock(&tmigr_mutex);
- ret = tmigr_setup_groups(cpu, node);
- mutex_unlock(&tmigr_mutex);
+ guard(mutex)(&tmigr_mutex);
+
+ ret = tmigr_setup_groups(cpu, node, NULL, false);
+
+ /* Root has changed? Connect the old one to the new */
+ if (ret >= 0 && old_root && old_root != tmigr_root) {
+ /*
+ * The target CPU must never do the prepare work, except
+ * on early boot when the boot CPU is the target. Otherwise
+ * it may spuriously activate the old top level group inside
+ * the new one (nevertheless whether old top level group is
+ * active or not) and/or release an uninitialized childmask.
+ */
+ WARN_ON_ONCE(cpu == raw_smp_processor_id());
+ /*
+ * The (likely) current CPU is expected to be online in the hierarchy,
+ * otherwise the old root may not be active as expected.
+ */
+ WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
+ ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
+ }
return ret;
}
@@ -1798,6 +1956,11 @@ static int __init tmigr_init(void)
if (ncpus == 1)
return 0;
+ if (!zalloc_cpumask_var(&tmigr_available_cpumask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* Calculate the required hierarchy levels. Unfortunately there is no
* reliable information available, unless all possible CPUs have been
@@ -1847,7 +2010,7 @@ static int __init tmigr_init(void)
goto err;
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
- tmigr_cpu_online, tmigr_cpu_offline);
+ tmigr_set_cpu_available, tmigr_clear_cpu_available);
if (ret)
goto err;
diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h
index ae19f70f8170..70879cde6fdd 100644
--- a/kernel/time/timer_migration.h
+++ b/kernel/time/timer_migration.h
@@ -97,7 +97,7 @@ struct tmigr_group {
*/
struct tmigr_cpu {
raw_spinlock_t lock;
- bool online;
+ bool available;
bool idle;
bool remote;
struct tmigr_group *tmgroup;
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index 252c6308c569..10badae13ebe 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -116,6 +116,56 @@ int nanosleep_test(int clockid, long long ns)
return 0;
}
+static void dummy_event_handler(int val)
+{
+ /* No action needed */
+}
+
+static int nanosleep_test_remaining(int clockid)
+{
+ struct timespec rqtp = {}, rmtp = {};
+ struct itimerspec itimer = {};
+ struct sigaction sa = {};
+ timer_t timer;
+ int ret;
+
+ sa.sa_handler = dummy_event_handler;
+ ret = sigaction(SIGALRM, &sa, NULL);
+ if (ret)
+ return -1;
+
+ ret = timer_create(clockid, NULL, &timer);
+ if (ret)
+ return -1;
+
+ itimer.it_value.tv_nsec = NSEC_PER_SEC / 4;
+ ret = timer_settime(timer, 0, &itimer, NULL);
+ if (ret)
+ return -1;
+
+ rqtp.tv_nsec = NSEC_PER_SEC / 2;
+ ret = clock_nanosleep(clockid, 0, &rqtp, &rmtp);
+ if (ret != EINTR)
+ return -1;
+
+ ret = timer_delete(timer);
+ if (ret)
+ return -1;
+
+ sa.sa_handler = SIG_DFL;
+ ret = sigaction(SIGALRM, &sa, NULL);
+ if (ret)
+ return -1;
+
+ if (!in_order((struct timespec) {}, rmtp))
+ return -1;
+
+ if (!in_order(rmtp, rqtp))
+ return -1;
+
+ return 0;
+}
+
int main(int argc, char **argv)
{
long long length;
@@ -150,6 +200,11 @@ int main(int argc, char **argv)
}
length *= 100;
}
+ ret = nanosleep_test_remaining(clockid);
+ if (ret < 0) {
+ ksft_test_result_fail("%-31s\n", clockstring(clockid));
+ ksft_exit_fail();
+ }
ksft_test_result_pass("%-31s\n", clockstring(clockid));
next:
ret = 0;
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index f0eceb0faf34..a563c438ac79 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -18,6 +18,7 @@
#include <time.h>
#include <include/vdso/time64.h>
#include <pthread.h>
+#include <stdbool.h>
#include "../kselftest.h"
@@ -670,8 +671,14 @@ static void check_timer_create_exact(void)
int main(int argc, char **argv)
{
+ bool run_sig_ign_tests = ksft_min_kernel_version(6, 13);
+
ksft_print_header();
- ksft_set_plan(19);
+ if (run_sig_ign_tests) {
+ ksft_set_plan(19);
+ } else {
+ ksft_set_plan(10);
+ }
ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
ksft_print_msg("based timers if other threads run on the CPU...\n");
@@ -695,15 +702,20 @@ int main(int argc, char **argv)
check_timer_create(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_timer_distribution();
- check_sig_ign(0);
- check_sig_ign(1);
- check_rearm();
- check_delete();
- check_sigev_none(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
- check_sigev_none(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
- check_gettime(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
- check_gettime(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
- check_gettime(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
+ if (run_sig_ign_tests) {
+ check_sig_ign(0);
+ check_sig_ign(1);
+ check_rearm();
+ check_delete();
+ check_sigev_none(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
+ check_sigev_none(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
+ check_gettime(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
+ check_gettime(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
+ check_gettime(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
+ } else {
+ ksft_print_msg("Skipping SIG_IGN tests on kernel < 6.13\n");
+ }
+
check_overrun(CLOCK_MONOTONIC, "CLOCK_MONOTONIC");
check_overrun(CLOCK_PROCESS_CPUTIME_ID, "CLOCK_PROCESS_CPUTIME_ID");
check_overrun(CLOCK_THREAD_CPUTIME_ID, "CLOCK_THREAD_CPUTIME_ID");
© 2016 - 2025 Red Hat, Inc.