From nobody Mon Feb 9 01:46:05 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C123DC001DF for ; Sat, 29 Jul 2023 00:49:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S236989AbjG2Atf (ORCPT ); Fri, 28 Jul 2023 20:49:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41254 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236976AbjG2AtO (ORCPT ); Fri, 28 Jul 2023 20:49:14 -0400 Received: from mail-yb1-xb49.google.com (mail-yb1-xb49.google.com [IPv6:2607:f8b0:4864:20::b49]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E22BE49FC for ; Fri, 28 Jul 2023 17:48:36 -0700 (PDT) Received: by mail-yb1-xb49.google.com with SMTP id 3f1490d57ef6-d2a392775c6so529229276.0 for ; Fri, 28 Jul 2023 17:48:36 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20221208; t=1690591655; x=1691196455; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=zg26OyCczKyFRFTxHD1ktdVAbwOnbHTDCM7ufXEGy6o=; b=yBgD4egDyD7rn5Rlgh5FzPjyIZK4NrQZnJM1XrsEWRbnhlUJw2W+HwTx6xEzr65Djo d5xWhDMzlm0ICw9QLqr0W/Pmm4dWjeERc0IF1lelxEBluV7VrHar/xOIrk7VKhd7mvKo U/IXmBY88FL+332U62NfJQT6PL5AF7CP+NHFuMECDhqsM1ymjNUVKHOgJWcF/Tp3+M+M zeOoyliISA4B2nUv4cyAPRkrkzcucMs56N9AE1YLAwXUh4cDFDq5lJByKszSr/GFE99y 5qlO+lGEK4m9OiYMzAwk8RJtyKfLgfTYgAIPqlq5y9w/OkjyoWnZ8EZZvmsqK+tTEXGs 4xAQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1690591655; x=1691196455; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=zg26OyCczKyFRFTxHD1ktdVAbwOnbHTDCM7ufXEGy6o=; b=UspaHsGFr5dty84LuzpGHsHepigOKeLZlHZ2Bfi3Ugx2LT6g6jIwX65zj8/xcDPJYO 68U4/DABftUwf/Vt3TOqKk5wuIVeG6dds6hTt+ioPtVx1KcZX1DQ91DTO+7hN8zA1hll ntveq2D8IzmuYiffV60P8DGy6eiqfdZ89a8z4a88BnEjhW76r+Zb6NSIlTLtppE3q36B MGPQmNnsm94z6HTCEuSxBd3GuaaWE6lxiG69YS2uMTQmhpnF5jwyWR6Vc01KYydFG/JD bdIH/npqYk5bxZsErV8phO8V5+TD9cYVuGVBDlJqlI1tbWYIf5pL3A/4WqeMBvM33IS1 95Cw== X-Gm-Message-State: ABy/qLa+mY19MdnbDDjaY+WfGGfOx6CZouWz2Pn0Nvn3AAEmPco7s1fs s9+RFRvlk1S3Ykw7VTBSffmbIg2YhH4= X-Google-Smtp-Source: APBJJlG4/uU8nu5xZXbyky4bUC5BOM5iTCGwVDMKaOGimw0QhzWTuEPEhaCE+OKPkruPWhR8uCo5iizSUfg= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a05:6902:100f:b0:cf9:3564:33cc with SMTP id w15-20020a056902100f00b00cf9356433ccmr24320ybt.13.1690591655397; Fri, 28 Jul 2023 17:47:35 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 28 Jul 2023 17:47:16 -0700 In-Reply-To: <20230729004722.1056172-1-seanjc@google.com> Mime-Version: 1.0 References: <20230729004722.1056172-1-seanjc@google.com> X-Mailer: git-send-email 2.41.0.487.g6d72f3e995-goog Message-ID: <20230729004722.1056172-7-seanjc@google.com> Subject: [PATCH v3 06/12] KVM: x86/mmu: Rename MMU_WARN_ON() to KVM_MMU_WARN_ON() From: Sean Christopherson To: Sean Christopherson , Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Mingwei Zhang , David Matlack , Jim Mattson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Rename MMU_WARN_ON() to make it super obvious that the assertions are all about KVM's MMU, not the primary MMU. Signed-off-by: Sean Christopherson Reviewed-by: Philippe Mathieu-Daud=C3=A9 --- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/mmu_internal.h | 4 ++-- arch/x86/kvm/mmu/spte.h | 8 ++++---- arch/x86/kvm/mmu/tdp_mmu.c | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 8a21b06a9646..80daaa84a8eb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1255,7 +1255,7 @@ static bool spte_clear_dirty(u64 *sptep) { u64 spte =3D *sptep; =20 - MMU_WARN_ON(!spte_ad_enabled(spte)); + KVM_MMU_WARN_ON(!spte_ad_enabled(spte)); spte &=3D ~shadow_dirty_mask; return mmu_spte_update(sptep, spte); } @@ -1699,7 +1699,7 @@ static void kvm_mmu_check_sptes_at_free(struct kvm_mm= u_page *sp) int i; =20 for (i =3D 0; i < SPTE_ENT_PER_PAGE; i++) { - if (MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) + if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i]))) pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at fre= e", sp->spt[i], &sp->spt[i], kvm_mmu_page_get_gfn(sp, i)); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_interna= l.h index 9ea80e4d463c..bb1649669bc9 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -9,9 +9,9 @@ #undef MMU_DEBUG =20 #ifdef MMU_DEBUG -#define MMU_WARN_ON(x) WARN_ON(x) +#define KVM_MMU_WARN_ON(x) WARN_ON(x) #else -#define MMU_WARN_ON(x) do { } while (0) +#define KVM_MMU_WARN_ON(x) do { } while (0) #endif =20 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. = */ diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1279db2eab44..83e6614f3720 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -265,13 +265,13 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page= *sp) =20 static inline bool spte_ad_enabled(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return (spte & SPTE_TDP_AD_MASK) !=3D SPTE_TDP_AD_DISABLED; } =20 static inline bool spte_ad_need_write_protect(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); /* * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0', * and non-TDP SPTEs will never set these bits. Optimize for 64-bit @@ -282,13 +282,13 @@ static inline bool spte_ad_need_write_protect(u64 spt= e) =20 static inline u64 spte_shadow_accessed_mask(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } =20 static inline u64 spte_shadow_dirty_mask(u64 spte) { - MMU_WARN_ON(!is_shadow_present_pte(spte)); + KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } =20 diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 512163d52194..f881de40f9ef 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1548,8 +1548,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, st= ruct kvm_mmu_page *root, if (!is_shadow_present_pte(iter.old_spte)) continue; =20 - MMU_WARN_ON(kvm_ad_enabled() && - spte_ad_need_write_protect(iter.old_spte)); + KVM_MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); =20 if (!(iter.old_spte & dbit)) continue; @@ -1607,8 +1607,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, st= ruct kvm_mmu_page *root, if (!mask) break; =20 - MMU_WARN_ON(kvm_ad_enabled() && - spte_ad_need_write_protect(iter.old_spte)); + KVM_MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); =20 if (iter.level > PG_LEVEL_4K || !(mask & (1UL << (iter.gfn - gfn)))) --=20 2.41.0.487.g6d72f3e995-goog