From nobody Mon Feb 9 13:58:06 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C6937C433EF for ; Mon, 21 Feb 2022 16:23:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1380299AbiBUQXd (ORCPT ); Mon, 21 Feb 2022 11:23:33 -0500 Received: from mxb-00190b01.gslb.pphosted.com ([23.128.96.19]:53426 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1380203AbiBUQXP (ORCPT ); Mon, 21 Feb 2022 11:23:15 -0500 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 48B722717D for ; Mon, 21 Feb 2022 08:22:51 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1645460570; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=vvmTg7zrEdwEArlt/5g3tbeZl6wapS1zW/jokMqAmyM=; b=iRz8b0GKskjYG/zQwDTcadMSR2x/ft3HciUEYPjopzbwLWr9xyKbtSYL5NFfvhDJcTYtFi ugkOuxhsEhnEkijl9Jt9XHWy2If+Tmq7T5ogybsuxK53UN8KD9sJwpLnAn7BFnBKLkqoCc 6ELZcqxrN2F0TUjJcM1nrTJoVWKL2t4= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-338-Rh47UOTvOlC4qG8vAQoSNg-1; Mon, 21 Feb 2022 11:22:46 -0500 X-MC-Unique: Rh47UOTvOlC4qG8vAQoSNg-1 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6C0221926DA3; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) Received: from virtlab701.virt.lab.eng.bos.redhat.com (virtlab701.virt.lab.eng.bos.redhat.com [10.19.152.228]) by smtp.corp.redhat.com (Postfix) with ESMTP id 0F67977468; Mon, 21 Feb 2022 16:22:45 +0000 (UTC) From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: dmatlack@google.com, seanjc@google.com Subject: [PATCH v2 03/25] KVM: x86/mmu: constify uses of struct kvm_mmu_role_regs Date: Mon, 21 Feb 2022 11:22:21 -0500 Message-Id: <20220221162243.683208-4-pbonzini@redhat.com> In-Reply-To: <20220221162243.683208-1-pbonzini@redhat.com> References: <20220221162243.683208-1-pbonzini@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" struct kvm_mmu_role_regs is computed just once and then accessed. Use const to make this clearer, even though the const fields of struct kvm_mmu_role_regs already prevent modifications to the contents of the struct, or rather make them harder. Reviewed-by: David Matlack Signed-off-by: Paolo Bonzini Reviewed-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4f9bbd02fb8b..97566ac539e3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -197,7 +197,7 @@ struct kvm_mmu_role_regs { * the single source of truth for the MMU's state. */ #define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \ -static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_rol= e_regs *regs)\ +static inline bool __maybe_unused ____is_##reg##_##name(const struct kvm_m= mu_role_regs *regs)\ { \ return !!(regs->reg & flag); \ } @@ -244,7 +244,7 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struc= t kvm_vcpu *vcpu) return regs; } =20 -static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs) +static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs) { if (!____is_cr0_pg(regs)) return 0; @@ -4681,7 +4681,7 @@ static void paging32_init_context(struct kvm_mmu *con= text) } =20 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *= vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_extended_role ext =3D {0}; =20 @@ -4704,7 +4704,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_= ext(struct kvm_vcpu *vcpu, } =20 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, + const struct kvm_mmu_role_regs *regs, bool base_only) { union kvm_mmu_role role =3D {0}; @@ -4740,7 +4740,8 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vc= pu *vcpu) =20 static union kvm_mmu_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); =20 @@ -4786,7 +4787,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) =20 static union kvm_mmu_role kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_mmu_role_common(vcpu, regs, base_onl= y); =20 @@ -4799,7 +4801,8 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu= *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs, bool base_only) + const struct kvm_mmu_role_regs *regs, + bool base_only) { union kvm_mmu_role role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only); @@ -4817,7 +4820,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *v= cpu, } =20 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu = *context, - struct kvm_mmu_role_regs *regs, + const struct kvm_mmu_role_regs *regs, union kvm_mmu_role new_role) { if (new_role.as_u64 =3D=3D context->mmu_role.as_u64) @@ -4840,7 +4843,7 @@ static void shadow_mmu_init_context(struct kvm_vcpu *= vcpu, struct kvm_mmu *conte } =20 static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { struct kvm_mmu *context =3D &vcpu->arch.root_mmu; union kvm_mmu_role new_role =3D @@ -4851,7 +4854,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, =20 static union kvm_mmu_role kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu, - struct kvm_mmu_role_regs *regs) + const struct kvm_mmu_role_regs *regs) { union kvm_mmu_role role =3D kvm_calc_shadow_root_page_role_common(vcpu, regs, false); @@ -4951,7 +4954,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) } =20 static union kvm_mmu_role -kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *= regs) +kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_= regs *regs) { union kvm_mmu_role role; =20 --=20 2.31.1