From nobody Sun Dec 14 19:13:31 2025 Received: from szxga05-in.huawei.com (szxga05-in.huawei.com [45.249.212.191]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1806D289349 for ; Thu, 22 May 2025 11:12:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.191 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1747912334; cv=none; b=JGhaJHrEZsh1Vk33Zf3MKLeS8R967AUA+uJD5WVRvFI9sYkW1QaqnMaPznCFiNbpewE80RE8SXht2Fn+GmiwEDhPq5ewJlYHMSxpRcnwBvlndjFvZBjweqOxvgL9fcUFF+2Q6XisYa+XmfHJ1zhAfwrUjaBdEoKbKZvmh+buK1o= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1747912334; c=relaxed/simple; bh=0jbWSeWR8R8oDbMMJ4+irFq8XnQcrQQeU8RP/GzlMro=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=DZ90Cy9vRmwah09qxAnm5E2l4otittJIaGde/0mp7z2EtuseOybtTAZKJvStGr5GqLK1xjxDSCk6BPcUnYAtce74ufDYCzEuMtJ3fe00hnCc2qiVeILvj2YanH4N2W7fkdY7VYn1L5Cn5Lzu+RAeDwG8FGuRHQCgmXzPxxGj7J8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.191 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.163.17]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4b35Cy24XPz1R7k2; Thu, 22 May 2025 19:09:50 +0800 (CST) Received: from dggemv712-chm.china.huawei.com (unknown [10.1.198.32]) by mail.maildlp.com (Postfix) with ESMTPS id EC8DD1A0188; Thu, 22 May 2025 19:12:02 +0800 (CST) Received: from kwepemq200011.china.huawei.com (7.202.195.155) by dggemv712-chm.china.huawei.com (10.1.198.32) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 22 May 2025 19:12:02 +0800 Received: from huawei.com (10.67.174.28) by kwepemq200011.china.huawei.com (7.202.195.155) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Thu, 22 May 2025 19:12:00 +0800 From: Liao Chang To: , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , CC: , , Subject: [PATCH v5 2/2] arm64: Deprecate the old daifflags helpers Date: Thu, 22 May 2025 10:56:58 +0000 Message-ID: <20250522105658.1338331-3-liaochang1@huawei.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20250522105658.1338331-1-liaochang1@huawei.com> References: <20250522105658.1338331-1-liaochang1@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: kwepems200001.china.huawei.com (7.221.188.67) To kwepemq200011.china.huawei.com (7.202.195.155) Content-Type: text/plain; charset="utf-8" Replacing all of the daifflags helpers used in the arm64 architecture code with the counterparts defined in exception_mask.h Signed-off-by: Liao Chang CC: Mark Brown CC: Mark Rutland CC: Marc Zyngier CC: Jonathan Cameron --- arch/arm64/include/asm/daifflags.h | 144 --------------------------- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/kernel/acpi.c | 10 +- arch/arm64/kernel/debug-monitors.c | 6 +- arch/arm64/kernel/entry-common.c | 2 +- arch/arm64/kernel/hibernate.c | 6 +- arch/arm64/kernel/irq.c | 4 +- arch/arm64/kernel/machine_kexec.c | 4 +- arch/arm64/kernel/probes/kprobes.c | 2 +- arch/arm64/kernel/setup.c | 4 +- arch/arm64/kernel/signal.c | 2 +- arch/arm64/kernel/smp.c | 10 +- arch/arm64/kernel/suspend.c | 6 +- arch/arm64/kernel/traps.c | 2 +- arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 +- arch/arm64/kvm/hyp/vhe/switch.c | 4 +- arch/arm64/mm/fault.c | 2 +- arch/arm64/mm/mmu.c | 6 +- 19 files changed, 39 insertions(+), 183 deletions(-) delete mode 100644 arch/arm64/include/asm/daifflags.h diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/da= ifflags.h deleted file mode 100644 index fbb5c99eb2f9..000000000000 --- a/arch/arm64/include/asm/daifflags.h +++ /dev/null @@ -1,144 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 ARM Ltd. - */ -#ifndef __ASM_DAIFFLAGS_H -#define __ASM_DAIFFLAGS_H - -#include - -#include -#include -#include -#include - -#define DAIF_PROCCTX 0 -#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT) -#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) -#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) - - -/* mask/save/unmask/restore all exceptions, including interrupts. */ -static inline void local_daif_mask(void) -{ - WARN_ON(system_has_prio_mask_debugging() && - (read_sysreg_s(SYS_ICC_PMR_EL1) =3D=3D (GIC_PRIO_IRQOFF | - GIC_PRIO_PSR_I_SET))); - - asm volatile( - "msr daifset, #0xf // local_daif_mask\n" - : - : - : "memory"); - - /* Don't really care for a dsb here, we don't intend to enable IRQs */ - if (system_uses_irq_prio_masking()) - gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - - trace_hardirqs_off(); -} - -static inline unsigned long local_daif_save_flags(void) -{ - unsigned long flags; - - flags =3D read_sysreg(daif); - - if (system_uses_irq_prio_masking()) { - /* If IRQs are masked with PMR, reflect it in the flags */ - if (read_sysreg_s(SYS_ICC_PMR_EL1) !=3D GIC_PRIO_IRQON) - flags |=3D PSR_I_BIT | PSR_F_BIT; - } - - return flags; -} - -static inline unsigned long local_daif_save(void) -{ - unsigned long flags; - - flags =3D local_daif_save_flags(); - - local_daif_mask(); - - return flags; -} - -static inline void local_daif_restore(unsigned long flags) -{ - bool irq_disabled =3D flags & PSR_I_BIT; - - WARN_ON(system_has_prio_mask_debugging() && - (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) !=3D (PSR_I_BIT | PSR_F_BI= T)); - - if (!irq_disabled) { - trace_hardirqs_on(); - - if (system_uses_irq_prio_masking()) { - gic_write_pmr(GIC_PRIO_IRQON); - pmr_sync(); - } - } else if (system_uses_irq_prio_masking()) { - u64 pmr; - - if (!(flags & PSR_A_BIT)) { - /* - * If interrupts are disabled but we can take - * asynchronous errors, we can take NMIs - */ - flags &=3D ~(PSR_I_BIT | PSR_F_BIT); - pmr =3D GIC_PRIO_IRQOFF; - } else { - pmr =3D GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET; - } - - /* - * There has been concern that the write to daif - * might be reordered before this write to PMR. - * From the ARM ARM DDI 0487D.a, section D1.7.1 - * "Accessing PSTATE fields": - * Writes to the PSTATE fields have side-effects on - * various aspects of the PE operation. All of these - * side-effects are guaranteed: - * - Not to be visible to earlier instructions in - * the execution stream. - * - To be visible to later instructions in the - * execution stream - * - * Also, writes to PMR are self-synchronizing, so no - * interrupts with a lower priority than PMR is signaled - * to the PE after the write. - * - * So we don't need additional synchronization here. - */ - gic_write_pmr(pmr); - } - - write_sysreg(flags, daif); - - if (irq_disabled) - trace_hardirqs_off(); -} - -/* - * Called by synchronous exception handlers to restore the DAIF bits that = were - * modified by taking an exception. - */ -static inline void local_daif_inherit(struct pt_regs *regs) -{ - unsigned long flags =3D regs->pstate & DAIF_MASK; - - if (interrupts_enabled(regs)) - trace_hardirqs_on(); - - if (system_uses_irq_prio_masking()) - gic_write_pmr(regs->pmr); - - /* - * We can't use local_daif_restore(regs->pstate) here as - * system_has_prio_mask_debugging() won't restore the I bit if it can - * use the pmr instead. - */ - write_sysreg(flags, daif); -} -#endif diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm= _host.h index 08ba91e6fb03..0af825f11efc 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/= mmu_context.h index 0dbe3b29049b..6284ce3fd3b4 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -19,7 +19,7 @@ =20 #include #include -#include +#include #include #include #include diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index b9a66fc146c9..3f3a4ff95d5d 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include =20 int acpi_noirq =3D 1; /* skip ACPI IRQ initialization */ @@ -397,7 +397,7 @@ int apei_claim_sea(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) return err; =20 - current_flags =3D local_daif_save_flags(); + current_flags =3D local_exception_save_flags(); =20 /* current_flags isn't useful here as daif doesn't tell us about pNMI */ return_to_irqs_enabled =3D !irqs_disabled_flags(arch_local_save_flags()); @@ -409,7 +409,7 @@ int apei_claim_sea(struct pt_regs *regs) * SEA can interrupt SError, mask it and describe this as an NMI so * that APEI defers the handling. */ - local_daif_restore(DAIF_ERRCTX); + serror_entry_exception_mask(); nmi_enter(); err =3D ghes_notify_sea(); nmi_exit(); @@ -420,7 +420,7 @@ int apei_claim_sea(struct pt_regs *regs) */ if (!err) { if (return_to_irqs_enabled) { - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_exception_restore(procctx_noirq.flags); __irq_enter(); irq_work_run(); __irq_exit(); @@ -430,7 +430,7 @@ int apei_claim_sea(struct pt_regs *regs) } } =20 - local_daif_restore(current_flags); + local_exception_restore(current_flags); =20 return err; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-m= onitors.c index 58f047de3e1c..97dc7cafccd6 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -19,7 +19,7 @@ =20 #include #include -#include +#include #include #include #include @@ -37,9 +37,9 @@ u8 debug_monitors_arch(void) static void mdscr_write(u32 mdscr) { unsigned long flags; - flags =3D local_daif_save(); + flags =3D local_exception_save(); write_sysreg(mdscr, mdscr_el1); - local_daif_restore(flags); + local_exception_restore(flags); } NOKPROBE_SYMBOL(mdscr_write); =20 diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-com= mon.c index 528d1bfbb5ed..221df3937da6 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -16,7 +16,7 @@ #include =20 #include -#include +#include #include #include #include diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 18749e9a6c2d..cb1c9d00d840 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -341,7 +341,7 @@ int swsusp_arch_suspend(void) return -EBUSY; } =20 - flags =3D local_daif_save(); + flags =3D local_exception_save(); =20 if (__cpu_suspend_enter(&state)) { /* make the crash dump kernel image visible/saveable */ @@ -391,7 +391,7 @@ int swsusp_arch_suspend(void) spectre_v4_enable_mitigation(NULL); } =20 - local_daif_restore(flags); + local_exception_restore(flags); =20 return ret; } diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 85087e2df564..f92497e9849d 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -132,6 +132,6 @@ void __init init_IRQ(void) * the PMR/PSR pair to a consistent state. */ WARN_ON(read_sysreg(daif) & PSR_A_BIT); - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_exception_restore(procctx_noirq.flags); } } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_= kexec.c index 6f121a0164a4..e415d7a74269 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -17,7 +17,7 @@ =20 #include #include -#include +#include #include #include #include @@ -176,7 +176,7 @@ void machine_kexec(struct kimage *kimage) =20 pr_info("Bye!\n"); =20 - local_daif_mask(); + local_exception_mask(); =20 /* * Both restart and kernel_reloc will shutdown the MMU, disable data diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/= kprobes.c index d9e462eafb95..9405d8110525 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -23,7 +23,7 @@ #include =20 #include -#include +#include #include #include #include diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 85104587f849..9e52c4649a43 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include #include @@ -311,7 +311,7 @@ void __init __no_sanitize_address setup_arch(char **cmd= line_p) * IRQ and FIQ will be unmasked after the root irqchip has been * detected and initialized. */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_exception_restore(procctx_noirq.flags); =20 /* * TTBR0 is only used for the identity mapping at this stage. Make it diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index a7c37afb4ebe..07c2950c3298 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -21,7 +21,7 @@ #include #include =20 -#include +#include #include #include #include diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3b3f6b56e733..86594601fe37 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include @@ -271,7 +271,7 @@ asmlinkage notrace void secondary_start_kernel(void) * as the root irqchip has already been detected and initialized we can * unmask IRQ and FIQ at the same time. */ - local_daif_restore(DAIF_PROCCTX); + local_exception_restore(procctx.flags); =20 /* * OK, it's off to the idle thread for us @@ -378,7 +378,7 @@ void __noreturn cpu_die(void) =20 idle_task_exit(); =20 - local_daif_mask(); + local_exception_mask(); =20 /* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */ cpuhp_ap_report_dead(); @@ -873,7 +873,7 @@ static void __noreturn local_cpu_stop(unsigned int cpu) { set_cpu_online(cpu, false); =20 - local_daif_mask(); + local_exception_mask(); sdei_mask_local_cpu(); cpu_park_loop(); } @@ -899,7 +899,7 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int = cpu, struct pt_regs *regs * interrupt us. It's better to prevent the NMI and let the IRQ * finish since the pt_regs will be better. */ - local_daif_mask(); + local_exception_mask(); =20 crash_save_cpu(regs, cpu); =20 diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index eaaff94329cd..754a56f08e66 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include @@ -122,7 +122,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned l= ong)) * hardirqs should be firmly off by now. This really ought to use * something like raw_local_daif_save(). */ - flags =3D local_daif_save(); + flags =3D local_exception_save(); =20 /* * Function graph tracer state gets inconsistent when the kernel @@ -168,7 +168,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned l= ong)) * restored, so from this point onwards, debugging is fully * reenabled if it was enabled when core started shutdown. */ - local_daif_restore(flags); + local_exception_restore(flags); =20 return ret; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 529cff825531..62562e9fb4ba 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-s= r.c index ed363aa3027e..2e60f61a094c 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -437,7 +437,7 @@ u64 __vgic_v3_get_gic_config(void) * EL2. */ if (has_vhe()) - flags =3D local_daif_save(); + flags =3D local_exception_save(); =20 /* * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates @@ -457,7 +457,7 @@ u64 __vgic_v3_get_gic_config(void) isb(); =20 if (has_vhe()) - local_daif_restore(flags); + local_exception_restore(flags); =20 val =3D (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63); val |=3D read_gicreg(ICH_VTR_EL2); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switc= h.c index 731a0378ed13..ea938b0d6920 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -644,7 +644,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { int ret; =20 - local_daif_mask(); + local_exception_mask(); =20 /* * Having IRQs masked via PMR when entering the guest means the GIC @@ -663,7 +663,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) * local_daif_restore() takes care to properly restore PSTATE.DAIF * and the GIC PMR if the host is using IRQ priorities. */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_exception_restore(procctx_noirq.flags); =20 /* * When we exit from the guest we change a number of CPU configuration diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index ec0a337891dd..34441e023b58 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ea6695d53fb9..a712f9b02d58 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1533,7 +1533,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; - unsigned long daif; + unsigned long flags; =20 /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ phys_addr_t ttbr1 =3D phys_to_ttbr(virt_to_phys(pgdp)); @@ -1549,9 +1549,9 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) * We really don't want to take *any* exceptions while TTBR1 is * in the process of being replaced so mask everything. */ - daif =3D local_daif_save(); + flags =3D local_exception_save(); replace_phys(ttbr1); - local_daif_restore(daif); + local_exception_restore(flags); =20 cpu_uninstall_idmap(); } --=20 2.34.1