On Wed, Jun 8, 2022 at 4:47 PM Peter Zijlstra <peterz@infradead.org> wrote:
>
> vmlinux.o: warning: objtool: io_idle+0xc: call to __inb.isra.0() leaves .noinstr.text section
> vmlinux.o: warning: objtool: acpi_idle_enter+0xfe: call to num_online_cpus() leaves .noinstr.text section
> vmlinux.o: warning: objtool: acpi_idle_enter+0x115: call to acpi_idle_fallback_to_c1.isra.0() leaves .noinstr.text section
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> ---
> arch/x86/include/asm/shared/io.h | 4 ++--
> drivers/acpi/processor_idle.c | 2 +-
> include/linux/cpumask.h | 4 ++--
> 3 files changed, 5 insertions(+), 5 deletions(-)
>
> --- a/arch/x86/include/asm/shared/io.h
> +++ b/arch/x86/include/asm/shared/io.h
> @@ -5,13 +5,13 @@
> #include <linux/types.h>
>
> #define BUILDIO(bwl, bw, type) \
> -static inline void __out##bwl(type value, u16 port) \
> +static __always_inline void __out##bwl(type value, u16 port) \
> { \
> asm volatile("out" #bwl " %" #bw "0, %w1" \
> : : "a"(value), "Nd"(port)); \
> } \
> \
> -static inline type __in##bwl(u16 port) \
> +static __always_inline type __in##bwl(u16 port) \
> { \
> type value; \
> asm volatile("in" #bwl " %w1, %" #bw "0" \
> --- a/drivers/acpi/processor_idle.c
> +++ b/drivers/acpi/processor_idle.c
> @@ -593,7 +593,7 @@ static int acpi_idle_play_dead(struct cp
> return 0;
> }
>
> -static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
> +static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
> {
> return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
> !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
> --- a/include/linux/cpumask.h
> +++ b/include/linux/cpumask.h
> @@ -908,9 +908,9 @@ static inline const struct cpumask *get_
> * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
> * region.
> */
> -static inline unsigned int num_online_cpus(void)
> +static __always_inline unsigned int num_online_cpus(void)
> {
> - return atomic_read(&__num_online_cpus);
> + return arch_atomic_read(&__num_online_cpus);
> }
> #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
> #define num_present_cpus() cpumask_weight(cpu_present_mask)
>
>