Enabling build with generic entry/exit framework for powerpc
architecture requires few necessary steps.
Introducing minor infrastructure updates to prepare for future generic
framework handling:
- Add syscall_work field to struct thread_info for SYSCALL_WORK_* flags.
- Provide arch_syscall_is_vdso_sigreturn() stub, returning false.
- Add on_thread_stack() helper to test whether the current stack pointer
lies within the task’s kernel stack.
No functional change is intended with this patch.
Signed-off-by: Mukesh Kumar Chaurasiya <mchauras@linux.ibm.com>
---
arch/powerpc/include/asm/entry-common.h | 11 +++++++++++
arch/powerpc/include/asm/stacktrace.h | 8 ++++++++
arch/powerpc/include/asm/syscall.h | 5 +++++
arch/powerpc/include/asm/thread_info.h | 1 +
4 files changed, 25 insertions(+)
create mode 100644 arch/powerpc/include/asm/entry-common.h
diff --git a/arch/powerpc/include/asm/entry-common.h b/arch/powerpc/include/asm/entry-common.h
new file mode 100644
index 0000000000000..3af16d821d07e
--- /dev/null
+++ b/arch/powerpc/include/asm/entry-common.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_PPC_ENTRY_COMMON_H
+#define _ASM_PPC_ENTRY_COMMON_H
+
+#ifdef CONFIG_GENERIC_IRQ_ENTRY
+
+#include <asm/stacktrace.h>
+
+#endif /* CONFIG_GENERIC_IRQ_ENTRY */
+#endif /* _ASM_PPC_ENTRY_COMMON_H */
diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
index 6149b53b3bc8e..3f0a242468813 100644
--- a/arch/powerpc/include/asm/stacktrace.h
+++ b/arch/powerpc/include/asm/stacktrace.h
@@ -8,6 +8,14 @@
#ifndef _ASM_POWERPC_STACKTRACE_H
#define _ASM_POWERPC_STACKTRACE_H
+#include <linux/sched.h>
+
void show_user_instructions(struct pt_regs *regs);
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer)
+ & ~(THREAD_SIZE -1));
+}
+
#endif /* _ASM_POWERPC_STACKTRACE_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 4b3c52ed6e9d2..834fcc4f7b543 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -139,4 +139,9 @@ static inline int syscall_get_arch(struct task_struct *task)
else
return AUDIT_ARCH_PPC64;
}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 2785c7462ebf7..d0e87c9bae0b0 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -54,6 +54,7 @@
struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
#ifdef CONFIG_SMP
unsigned int cpu;
#endif
--
2.51.0
On 9/9/25 2:32 AM, Mukesh Kumar Chaurasiya wrote:
> Enabling build with generic entry/exit framework for powerpc
> architecture requires few necessary steps.
>
> Introducing minor infrastructure updates to prepare for future generic
> framework handling:
>
> - Add syscall_work field to struct thread_info for SYSCALL_WORK_* flags.
> - Provide arch_syscall_is_vdso_sigreturn() stub, returning false.
> - Add on_thread_stack() helper to test whether the current stack pointer
> lies within the task’s kernel stack.
>
> No functional change is intended with this patch.
>
> Signed-off-by: Mukesh Kumar Chaurasiya <mchauras@linux.ibm.com>
> ---
> arch/powerpc/include/asm/entry-common.h | 11 +++++++++++
> arch/powerpc/include/asm/stacktrace.h | 8 ++++++++
> arch/powerpc/include/asm/syscall.h | 5 +++++
> arch/powerpc/include/asm/thread_info.h | 1 +
> 4 files changed, 25 insertions(+)
> create mode 100644 arch/powerpc/include/asm/entry-common.h
>
> diff --git a/arch/powerpc/include/asm/entry-common.h b/arch/powerpc/include/asm/entry-common.h
> new file mode 100644
> index 0000000000000..3af16d821d07e
> --- /dev/null
> +++ b/arch/powerpc/include/asm/entry-common.h
> @@ -0,0 +1,11 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef _ASM_PPC_ENTRY_COMMON_H
> +#define _ASM_PPC_ENTRY_COMMON_H
> +
> +#ifdef CONFIG_GENERIC_IRQ_ENTRY
> +
> +#include <asm/stacktrace.h>
> +
> +#endif /* CONFIG_GENERIC_IRQ_ENTRY */
> +#endif /* _ASM_PPC_ENTRY_COMMON_H */
> diff --git a/arch/powerpc/include/asm/stacktrace.h b/arch/powerpc/include/asm/stacktrace.h
> index 6149b53b3bc8e..3f0a242468813 100644
> --- a/arch/powerpc/include/asm/stacktrace.h
> +++ b/arch/powerpc/include/asm/stacktrace.h
> @@ -8,6 +8,14 @@
> #ifndef _ASM_POWERPC_STACKTRACE_H
> #define _ASM_POWERPC_STACKTRACE_H
>
> +#include <linux/sched.h>
nit:
Is sched.h needed? I don't see any reference here.
It compiled for me without it.
> +
> void show_user_instructions(struct pt_regs *regs);
>
> +static inline bool on_thread_stack(void)
> +{
> + return !(((unsigned long)(current->stack) ^ current_stack_pointer)
> + & ~(THREAD_SIZE -1));
> +}
> +
> #endif /* _ASM_POWERPC_STACKTRACE_H */
> diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
> index 4b3c52ed6e9d2..834fcc4f7b543 100644
> --- a/arch/powerpc/include/asm/syscall.h
> +++ b/arch/powerpc/include/asm/syscall.h
> @@ -139,4 +139,9 @@ static inline int syscall_get_arch(struct task_struct *task)
> else
> return AUDIT_ARCH_PPC64;
> }
> +
> +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
> +{
> + return false;
> +}
> #endif /* _ASM_SYSCALL_H */
> diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
> index 2785c7462ebf7..d0e87c9bae0b0 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -54,6 +54,7 @@
> struct thread_info {
> int preempt_count; /* 0 => preemptable,
> <0 => BUG */
> + unsigned long syscall_work; /* SYSCALL_WORK_ flags */
Can this go after cpu ? it would be 8 byte aligned then. Since it is in
fast path, might help.
> #ifdef CONFIG_SMP
> unsigned int cpu;
> #endif
On 9/13/25 6:19 PM, Shrikanth Hegde wrote:
>
>
> On 9/9/25 2:32 AM, Mukesh Kumar Chaurasiya wrote:
>> Enabling build with generic entry/exit framework for powerpc
>> architecture requires few necessary steps.
>>
>> Introducing minor infrastructure updates to prepare for future generic
>> framework handling:
>>
>> - Add syscall_work field to struct thread_info for SYSCALL_WORK_* flags.
>> - Provide arch_syscall_is_vdso_sigreturn() stub, returning false.
>> - Add on_thread_stack() helper to test whether the current stack pointer
>> lies within the task’s kernel stack.
>>
>> No functional change is intended with this patch.
>>
>> Signed-off-by: Mukesh Kumar Chaurasiya <mchauras@linux.ibm.com>
>> ---
>> arch/powerpc/include/asm/entry-common.h | 11 +++++++++++
>> arch/powerpc/include/asm/stacktrace.h | 8 ++++++++
>> arch/powerpc/include/asm/syscall.h | 5 +++++
>> arch/powerpc/include/asm/thread_info.h | 1 +
>> 4 files changed, 25 insertions(+)
>> create mode 100644 arch/powerpc/include/asm/entry-common.h
>>
>> diff --git a/arch/powerpc/include/asm/entry-common.h
>> b/arch/powerpc/include/asm/entry-common.h
>> new file mode 100644
>> index 0000000000000..3af16d821d07e
>> --- /dev/null
>> +++ b/arch/powerpc/include/asm/entry-common.h
>> @@ -0,0 +1,11 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +
>> +#ifndef _ASM_PPC_ENTRY_COMMON_H
>> +#define _ASM_PPC_ENTRY_COMMON_H
>> +
>> +#ifdef CONFIG_GENERIC_IRQ_ENTRY
>> +
>> +#include <asm/stacktrace.h>
>> +
>> +#endif /* CONFIG_GENERIC_IRQ_ENTRY */
>> +#endif /* _ASM_PPC_ENTRY_COMMON_H */
>> diff --git a/arch/powerpc/include/asm/stacktrace.h
>> b/arch/powerpc/include/asm/stacktrace.h
>> index 6149b53b3bc8e..3f0a242468813 100644
>> --- a/arch/powerpc/include/asm/stacktrace.h
>> +++ b/arch/powerpc/include/asm/stacktrace.h
>> @@ -8,6 +8,14 @@
>> #ifndef _ASM_POWERPC_STACKTRACE_H
>> #define _ASM_POWERPC_STACKTRACE_H
>> +#include <linux/sched.h>
>
> nit:
>
> Is sched.h needed? I don't see any reference here.
> It compiled for me without it.
>
Will remove this in next revision.
>> +
>> void show_user_instructions(struct pt_regs *regs);
>> +static inline bool on_thread_stack(void)
>> +{
>> + return !(((unsigned long)(current->stack) ^ current_stack_pointer)
>> + & ~(THREAD_SIZE -1));
>> +}
>> +
>> #endif /* _ASM_POWERPC_STACKTRACE_H */
>> diff --git a/arch/powerpc/include/asm/syscall.h
>> b/arch/powerpc/include/asm/syscall.h
>> index 4b3c52ed6e9d2..834fcc4f7b543 100644
>> --- a/arch/powerpc/include/asm/syscall.h
>> +++ b/arch/powerpc/include/asm/syscall.h
>> @@ -139,4 +139,9 @@ static inline int syscall_get_arch(struct
>> task_struct *task)
>> else
>> return AUDIT_ARCH_PPC64;
>> }
>> +
>> +static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
>> +{
>> + return false;
>> +}
>> #endif /* _ASM_SYSCALL_H */
>> diff --git a/arch/powerpc/include/asm/thread_info.h
>> b/arch/powerpc/include/asm/thread_info.h
>> index 2785c7462ebf7..d0e87c9bae0b0 100644
>> --- a/arch/powerpc/include/asm/thread_info.h
>> +++ b/arch/powerpc/include/asm/thread_info.h
>> @@ -54,6 +54,7 @@
>> struct thread_info {
>> int preempt_count; /* 0 => preemptable,
>> <0 => BUG */
>> + unsigned long syscall_work; /* SYSCALL_WORK_ flags */
>
> Can this go after cpu ? it would be 8 byte aligned then. Since it is
> in fast path, might help.
>
Oh yeah, will move this in next revision.
Thanks,
Mukesh
>> #ifdef CONFIG_SMP
>> unsigned int cpu;
>> #endif
>
© 2016 - 2026 Red Hat, Inc.