From nobody Tue Dec 16 19:56:43 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8CCC7C07545 for ; Tue, 24 Oct 2023 13:22:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234544AbjJXNWg (ORCPT ); Tue, 24 Oct 2023 09:22:36 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41320 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234695AbjJXNVq (ORCPT ); Tue, 24 Oct 2023 09:21:46 -0400 Received: from galois.linutronix.de (Galois.linutronix.de [193.142.43.55]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A58C91728; Tue, 24 Oct 2023 06:21:02 -0700 (PDT) Date: Tue, 24 Oct 2023 13:21:00 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1698153661; h=from:from:sender:sender:reply-to:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=2mUaKN+1VHCw6G06UDHp5hM9uyCVeZQZ+qilCr9Stwg=; b=ge+KXS1yJa6LXLrKzd2Rzoh02T0uoO5umVPuXB1Suf544IZ5MAncZR1CJfvB6nhbx1O1Ms nHd7OjsatteGTriMtjlAGfcHr+DMKiElkdH9kP6JsTaaBbeaqXkYHawo6mkHzE5sUIoiOg dZsfPFhzCKvzbuW5+XygfZtqGlHKkdwLUI5s+P4e1VW1tKbPX2xgAU+f1rHw2UaJUO1iuj rlxgQYrj1bBfzrxmHiDUds+ZFlBQfsQUgoooMpMNb9W2Y7J/vlUluaJB34kREbsSrDgRma BVSTAPnAp9xAxqd4Ohb02AV0NpOcurfVE+m2Qf74tIzxsPBsq5K6dXK7WuBFPg== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1698153661; h=from:from:sender:sender:reply-to:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=2mUaKN+1VHCw6G06UDHp5hM9uyCVeZQZ+qilCr9Stwg=; b=/BSyrnBSrHXJDaI4wfqXLNhnDcZa5KfvZBpqLciu8cFStmrNsJziZ0jEcZgSms5lUJuf7y qNEAQkTijKeJ8yBg== From: "tip-bot2 for Thomas Gleixner" Sender: tip-bot2@linutronix.de Reply-to: linux-kernel@vger.kernel.org To: linux-tip-commits@vger.kernel.org Subject: [tip: x86/microcode] x86/microcode/amd: Use cached microcode for AP load Cc: Thomas Gleixner , "Borislav Petkov (AMD)" , x86@kernel.org, linux-kernel@vger.kernel.org In-Reply-To: <20231017211723.243426023@linutronix.de> References: <20231017211723.243426023@linutronix.de> MIME-Version: 1.0 Message-ID: <169815366020.3135.1361653882661471432.tip-bot2@tip-bot2> Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org The following commit has been merged into the x86/microcode branch of tip: Commit-ID: 5af05b8d51a8e3ff5905663655c0f46d1aaae44a Gitweb: https://git.kernel.org/tip/5af05b8d51a8e3ff5905663655c0f46d1= aaae44a Author: Thomas Gleixner AuthorDate: Tue, 17 Oct 2023 23:23:55 +02:00 Committer: Borislav Petkov (AMD) CommitterDate: Tue, 24 Oct 2023 15:05:54 +02:00 x86/microcode/amd: Use cached microcode for AP load Now that the microcode cache is initialized before the APs are brought up, there is no point in scanning builtin/initrd microcode during AP loading. Convert the AP loader to utilize the cache, which in turn makes the CPU hotplug callback which applies the microcode after initrd/builtin is gone, obsolete as the early loading during late hotplug operations including the resume path depends now only on the cache. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.243426023@linutronix.de --- arch/x86/kernel/cpu/microcode/amd.c | 20 +++++++++++--------- arch/x86/kernel/cpu/microcode/core.c | 15 ++------------- arch/x86/kernel/cpu/microcode/internal.h | 2 -- 3 files changed, 13 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/micr= ocode/amd.c index 6717f0e..99aa5a8 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -496,7 +496,7 @@ static bool get_builtin_microcode(struct cpio_data *cp,= unsigned int family) return false; } =20 -static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio= _data *ret) +static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, stru= ct cpio_data *ret) { struct cpio_data cp; =20 @@ -506,12 +506,12 @@ static void find_blobs_in_containers(unsigned int cpu= id_1_eax, struct cpio_data=20 *ret =3D cp; } =20 -static void apply_ucode_from_containers(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { struct cpio_data cp =3D { }; =20 /* Needed in load_microcode_amd() */ - ucode_cpu_info[smp_processor_id()].cpu_sig.sig =3D cpuid_1_eax; + ucode_cpu_info[0].cpu_sig.sig =3D cpuid_1_eax; =20 find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) @@ -520,11 +520,6 @@ static void apply_ucode_from_containers(unsigned int c= puid_1_eax) early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } =20 -void load_ucode_amd_early(unsigned int cpuid_1_eax) -{ - return apply_ucode_from_containers(cpuid_1_eax); -} - static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size= _t size); =20 static int __init save_microcode_in_initrd(void) @@ -608,7 +603,6 @@ static struct ucode_patch *find_patch(unsigned int cpu) struct ucode_cpu_info *uci =3D ucode_cpu_info + cpu; u16 equiv_id; =20 - equiv_id =3D find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; @@ -710,6 +704,14 @@ out: return ret; } =20 +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu =3D smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig =3D cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/mic= rocode/core.c index 3d769ff..15c5042 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -154,7 +154,7 @@ void __init load_ucode_bsp(void) if (intel) load_ucode_intel_bsp(); else - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_bsp(cpuid_1_eax); } =20 void load_ucode_ap(void) @@ -173,7 +173,7 @@ void load_ucode_ap(void) break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >=3D 0x10) - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; default: break; @@ -494,15 +494,6 @@ static struct syscore_ops mc_syscore_ops =3D { .resume =3D microcode_bsp_resume, }; =20 -static int mc_cpu_starting(unsigned int cpu) -{ - enum ucode_state err =3D microcode_ops->apply_microcode(cpu); - - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); - - return err =3D=3D UCODE_ERROR; -} - static int mc_cpu_online(unsigned int cpu) { struct device *dev =3D get_cpu_device(cpu); @@ -590,8 +581,6 @@ static int __init microcode_init(void) schedule_on_each_cpu(setup_online_cpu); =20 register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:start= ing", - mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); =20 diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu= /microcode/internal.h index 32f6ad5..89fbf74 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -91,7 +91,6 @@ extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_ap(unsigned int family); -void load_ucode_amd_early(unsigned int cpuid_1_eax); int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); @@ -99,7 +98,6 @@ void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline void load_ucode_amd_early(unsigned int family) { } static inline int save_microcode_in_initrd_amd(unsigned int family) { retu= rn -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL= ; }