From nobody Sat Oct 4 11:11:59 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 51DB226A1BE for ; Mon, 18 Aug 2025 04:58:19 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493101; cv=none; b=uEoTrfFQjTgEqBBvlKJf2cf9AtX7ptSlC3eMsj0HYpNI8Nf/tvnNyeyT9qDo9TulbKG9fWIPXMyCP9TF6xAtA/FQN7AlP2hUgwnaADTDj3p2Z6gd9DyaC/Ceo4YrAqzYIwyMPs3Rv+eqfBGsOJmVrgCBCpFgAKajLVZeQyGAaSc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493101; c=relaxed/simple; bh=2OwGZeMLcO256otAUuyepHUmQcwRbEUAG21vRccL/C8=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=cAtdtrPdcKVwdL9Kr8QKxBUuYoEOJ+/MYOBCsVr6NcHTVB5Cwp8ls2HyL4ldMvGCgjjJ/ACYjrBS4gY5I5/hypncHispEmoJRf925bJeR7V+ReWFtFKCgtJkxLDCReur/BOl6/7n1DLlsUbS75BdMEbOmzHvR7aXjWoP4hy1ZbI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id ECA812A2A; Sun, 17 Aug 2025 21:58:04 -0700 (PDT) Received: from a076716.blr.arm.com (a076716.blr.arm.com [10.164.21.47]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 4BBC73F58B; Sun, 17 Aug 2025 21:58:10 -0700 (PDT) From: Anshuman Khandual To: linux-arm-kernel@lists.infradead.org Cc: Anshuman Khandual , Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Mark Brown , Ryan Roberts , kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org Subject: [PATCH 1/4] arm64/sysreg: Update TCR_EL1 register Date: Mon, 18 Aug 2025 10:27:56 +0530 Message-Id: <20250818045759.672408-2-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20250818045759.672408-1-anshuman.khandual@arm.com> References: <20250818045759.672408-1-anshuman.khandual@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Update TCR_EL1 register fields as per latest ARM ARM DDI 0487 7.B and while here drop an explicit sysreg definition SYS_TCR_EL1 from sysreg.h, which is now redundant. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual --- arch/arm64/include/asm/sysreg.h | 2 -- arch/arm64/tools/sysreg | 52 ++++++++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysre= g.h index d5b5f2ae1afa..ad5c901af229 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -281,8 +281,6 @@ #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) =20 -#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) - #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) #define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1) #define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 696ab1f32a67..4bdae8bb11dc 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -4756,17 +4756,53 @@ Field 37 TBI0 Field 36 AS Res0 35 Field 34:32 IPS -Field 31:30 TG1 -Field 29:28 SH1 -Field 27:26 ORGN1 -Field 25:24 IRGN1 +UnsignedEnum 31:30 TG1 + 0b01 16K + 0b10 4K + 0b11 64K +EndEnum +UnsignedEnum 29:28 SH1 + 0b00 NONE + 0b10 OUTER + 0b11 INNER +EndEnum +UnsignedEnum 27:26 ORGN1 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +UnsignedEnum 25:24 IRGN1 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum Field 23 EPD1 Field 22 A1 Field 21:16 T1SZ -Field 15:14 TG0 -Field 13:12 SH0 -Field 11:10 ORGN0 -Field 9:8 IRGN0 +UnsignedEnum 15:14 TG0 + 0b00 4K + 0b01 64K + 0b10 16K +EndEnum +UnsignedEnum 13:12 SH0 + 0b00 NONE + 0b10 OUTER + 0b11 INNER +EndEnum +UnsignedEnum 11:10 ORGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +UnsignedEnum 9:8 IRGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum Field 7 EPD0 Res0 6 Field 5:0 T0SZ --=20 2.25.1 From nobody Sat Oct 4 11:11:59 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 5A7CE2690DB for ; Mon, 18 Aug 2025 04:58:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493099; cv=none; b=bETSla7pVYADpn5Rxhp3hBUSaL6ILMJcb88691ATA8SO4/8fTiM8TJm6RdgkiziceDVP09iJQVjKepRElbYMj4dnOWm3JosV2uFBZqsoFjW15VIAReqA+POlLK4NV5Qtxk3+N0Vt9A0Lsh6pdg/DLP0rfNhq/ce2c7xWbcEiNsg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493099; c=relaxed/simple; bh=1V7aL4xFBt8bLOvuUCEQeF7XLkvfy5uksgcQrPF3kiQ=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=eCwQga7JV4Z/vsT3iIyX9Uj0Jd6zHhylUEen/nfnO1qU32Qo3g6XgrTYOM+PtFAyFk8FVOqW4VZU2HNXx1rfg5XlqQdhIpZkF3LAvLNnYGX1gFMNwhT010HeyhC0xhQ+KUEDeyKhShwpuH8dhMInqawL8rlAwOH2cVgS7NpNVks= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 8E2952D95; Sun, 17 Aug 2025 21:58:08 -0700 (PDT) Received: from a076716.blr.arm.com (a076716.blr.arm.com [10.164.21.47]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id A580A3F58B; Sun, 17 Aug 2025 21:58:13 -0700 (PDT) From: Anshuman Khandual To: linux-arm-kernel@lists.infradead.org Cc: Anshuman Khandual , Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Mark Brown , Ryan Roberts , kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org Subject: [PATCH 2/4] arm64/sysreg: Replace TCR_EL1 field macros Date: Mon, 18 Aug 2025 10:27:57 +0530 Message-Id: <20250818045759.672408-3-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20250818045759.672408-1-anshuman.khandual@arm.com> References: <20250818045759.672408-1-anshuman.khandual@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" This just replaces all used TCR_EL1 field macros with tools sysreg variant based fields and subsequently drops them from the header (pgtable-hwdef.h). While here, also drop all the unused TCR_XXX macros from the header. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Mark Brown Cc: kvmarm@lists.linux.dev Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual --- arch/arm64/include/asm/assembler.h | 6 +- arch/arm64/include/asm/cputype.h | 2 +- arch/arm64/include/asm/kvm_arm.h | 28 +++--- arch/arm64/include/asm/kvm_nested.h | 6 +- arch/arm64/include/asm/mmu_context.h | 4 +- arch/arm64/include/asm/pgtable-hwdef.h | 107 +++------------------ arch/arm64/include/asm/pgtable-prot.h | 2 +- arch/arm64/kernel/cpufeature.c | 4 +- arch/arm64/kernel/pi/map_kernel.c | 8 +- arch/arm64/kernel/vmcore_info.c | 2 +- arch/arm64/kvm/arm.c | 6 +- arch/arm64/kvm/at.c | 48 ++++----- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/nvhe/tlb.c | 2 +- arch/arm64/kvm/hyp/vhe/tlb.c | 2 +- arch/arm64/kvm/nested.c | 8 +- arch/arm64/kvm/pauth.c | 12 +-- arch/arm64/mm/proc.S | 29 +++--- tools/arch/arm64/include/asm/cputype.h | 2 +- 21 files changed, 101 insertions(+), 183 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/as= sembler.h index 23be85d93348..1392860a3c97 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -325,14 +325,14 @@ alternative_cb_end * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map */ .macro tcr_set_t0sz, valreg, t0sz - bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH + bfi \valreg, \t0sz, #TCR_EL1_T0SZ_SHIFT, #TCR_EL1_T0SZ_WIDTH .endm =20 /* * tcr_set_t1sz - update TCR.T1SZ */ .macro tcr_set_t1sz, valreg, t1sz - bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH + bfi \valreg, \t1sz, #TCR_EL1_T1SZ_SHIFT, #TCR_EL1_T1SZ_WIDTH .endm =20 /* @@ -589,7 +589,7 @@ alternative_endif .macro offset_ttbr1, ttbr, tmp #if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2) mrs \tmp, tcr_el1 - and \tmp, \tmp, #TCR_T1SZ_MASK + and \tmp, \tmp, #TCR_EL1_T1SZ_MASK cmp \tmp, #TCR_T1SZ(VA_BITS_MIN) orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET csel \ttbr, \tmp, \ttbr, eq diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cput= ype.h index 661735616787..5d80710ca85f 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -243,7 +243,7 @@ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX #define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0)) -#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) +#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_EL1_NFD1 | TCR_EL1_NFD0) =20 #ifndef __ASSEMBLY__ =20 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_= arm.h index 1da290aeedce..560d9cb63413 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -115,10 +115,10 @@ #define TCR_EL2_PS_SHIFT 16 #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) #define TCR_EL2_PS_40B (2 << TCR_EL2_PS_SHIFT) -#define TCR_EL2_TG0_MASK TCR_TG0_MASK -#define TCR_EL2_SH0_MASK TCR_SH0_MASK -#define TCR_EL2_ORGN0_MASK TCR_ORGN0_MASK -#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK +#define TCR_EL2_TG0_MASK TCR_EL1_TG0_MASK +#define TCR_EL2_SH0_MASK TCR_EL1_SH0_MASK +#define TCR_EL2_ORGN0_MASK TCR_EL1_ORGN0_MASK +#define TCR_EL2_IRGN0_MASK TCR_EL1_IRGN0_MASK #define TCR_EL2_T0SZ_MASK 0x3f #define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK) @@ -130,16 +130,16 @@ #define VTCR_EL2_HA (1 << 21) #define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK -#define VTCR_EL2_TG0_MASK TCR_TG0_MASK -#define VTCR_EL2_TG0_4K TCR_TG0_4K -#define VTCR_EL2_TG0_16K TCR_TG0_16K -#define VTCR_EL2_TG0_64K TCR_TG0_64K -#define VTCR_EL2_SH0_MASK TCR_SH0_MASK -#define VTCR_EL2_SH0_INNER TCR_SH0_INNER -#define VTCR_EL2_ORGN0_MASK TCR_ORGN0_MASK -#define VTCR_EL2_ORGN0_WBWA TCR_ORGN0_WBWA -#define VTCR_EL2_IRGN0_MASK TCR_IRGN0_MASK -#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA +#define VTCR_EL2_TG0_MASK TCR_EL1_TG0_MASK +#define VTCR_EL2_TG0_4K (TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) +#define VTCR_EL2_TG0_16K (TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) +#define VTCR_EL2_TG0_64K (TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) +#define VTCR_EL2_SH0_MASK TCR_EL1_SH0_MASK +#define VTCR_EL2_SH0_INNER (TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) +#define VTCR_EL2_ORGN0_MASK TCR_EL1_ORGN0_MASK +#define VTCR_EL2_ORGN0_WBWA (TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) +#define VTCR_EL2_IRGN0_MASK TCR_EL1_IRGN0_MASK +#define VTCR_EL2_IRGN0_WBWA (TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) #define VTCR_EL2_SL0_SHIFT 6 #define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/k= vm_nested.h index 7fd76f41c296..f018c0036baa 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -17,13 +17,13 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *v= cpu) /* Translation helpers from non-VHE EL2 to EL1 */ static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2) { - return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT; + return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_EL1_IPS_SHIFT; } =20 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr) { - return TCR_EPD1_MASK | /* disable TTBR1_EL1 */ - ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) | + return TCR_EL1_EPD1_MASK | /* disable TTBR1_EL1 */ + ((tcr & TCR_EL2_TBI) ? TCR_EL1_TBI0 : 0) | tcr_el2_ps_to_tcr_el1_ips(tcr) | (tcr & TCR_EL2_TG0_MASK) | (tcr & TCR_EL2_ORGN0_MASK) | diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/= mmu_context.h index 0dbe3b29049b..1b4ac7b23e18 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -73,10 +73,10 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0s= z) { unsigned long tcr =3D read_sysreg(tcr_el1); =20 - if ((tcr & TCR_T0SZ_MASK) =3D=3D t0sz) + if ((tcr & TCR_EL1_T0SZ_MASK) =3D=3D t0sz) return; =20 - tcr &=3D ~TCR_T0SZ_MASK; + tcr &=3D ~TCR_EL1_T0SZ_MASK; tcr |=3D t0sz; write_sysreg(tcr, tcr_el1); isb(); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/as= m/pgtable-hwdef.h index f3b77deedfa2..c74d1811f1fc 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -228,102 +228,17 @@ /* * TCR flags. */ -#define TCR_T0SZ_OFFSET 0 -#define TCR_T1SZ_OFFSET 16 -#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) -#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) -#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) -#define TCR_TxSZ_WIDTH 6 -#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET) -#define TCR_T1SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T1SZ_OFFSET) - -#define TCR_EPD0_SHIFT 7 -#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT) -#define TCR_IRGN0_SHIFT 8 -#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) - -#define TCR_EPD1_SHIFT 23 -#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT) -#define TCR_IRGN1_SHIFT 24 -#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WT (UL(2) << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WBnWA (UL(3) << TCR_IRGN1_SHIFT) - -#define TCR_IRGN_NC (TCR_IRGN0_NC | TCR_IRGN1_NC) -#define TCR_IRGN_WBWA (TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) -#define TCR_IRGN_WT (TCR_IRGN0_WT | TCR_IRGN1_WT) -#define TCR_IRGN_WBnWA (TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA) -#define TCR_IRGN_MASK (TCR_IRGN0_MASK | TCR_IRGN1_MASK) - - -#define TCR_ORGN0_SHIFT 10 -#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT) - -#define TCR_ORGN1_SHIFT 26 -#define TCR_ORGN1_MASK (UL(3) << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_NC (UL(0) << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WT (UL(2) << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WBnWA (UL(3) << TCR_ORGN1_SHIFT) - -#define TCR_ORGN_NC (TCR_ORGN0_NC | TCR_ORGN1_NC) -#define TCR_ORGN_WBWA (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA) -#define TCR_ORGN_WT (TCR_ORGN0_WT | TCR_ORGN1_WT) -#define TCR_ORGN_WBnWA (TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA) -#define TCR_ORGN_MASK (TCR_ORGN0_MASK | TCR_ORGN1_MASK) - -#define TCR_SH0_SHIFT 12 -#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) -#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) - -#define TCR_SH1_SHIFT 28 -#define TCR_SH1_MASK (UL(3) << TCR_SH1_SHIFT) -#define TCR_SH1_INNER (UL(3) << TCR_SH1_SHIFT) -#define TCR_SHARED (TCR_SH0_INNER | TCR_SH1_INNER) - -#define TCR_TG0_SHIFT 14 -#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT) -#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT) -#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT) -#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT) - -#define TCR_TG1_SHIFT 30 -#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT) -#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT) -#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) -#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) - -#define TCR_IPS_SHIFT 32 -#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT) -#define TCR_A1 (UL(1) << 22) -#define TCR_ASID16 (UL(1) << 36) -#define TCR_TBI0 (UL(1) << 37) -#define TCR_TBI1 (UL(1) << 38) -#define TCR_HA (UL(1) << 39) -#define TCR_HD (UL(1) << 40) -#define TCR_HPD0_SHIFT 41 -#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT) -#define TCR_HPD1_SHIFT 42 -#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT) -#define TCR_TBID0 (UL(1) << 51) -#define TCR_TBID1 (UL(1) << 52) -#define TCR_NFD0 (UL(1) << 53) -#define TCR_NFD1 (UL(1) << 54) -#define TCR_E0PD0 (UL(1) << 55) -#define TCR_E0PD1 (UL(1) << 56) -#define TCR_TCMA0 (UL(1) << 57) -#define TCR_TCMA1 (UL(1) << 58) -#define TCR_DS (UL(1) << 59) +#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_EL1_T0SZ_SHIFT) +#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_EL1_T1SZ_SHIFT) + +#define TCR_IRGN_WBWA ((TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) |\ + (TCR_EL1_IRGN1_WBWA << TCR_EL1_IRGN1_SHIFT)) + +#define TCR_ORGN_WBWA ((TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) |\ + (TCR_EL1_ORGN1_WBWA << TCR_EL1_ORGN1_SHIFT)) + +#define TCR_SHARED ((TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) |\ + (TCR_EL1_SH1_INNER << TCR_EL1_SH1_SHIFT)) =20 /* * TTBR. diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm= /pgtable-prot.h index 85dceb1c66f4..21a3d3342283 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -84,7 +84,7 @@ extern unsigned long prot_ns_shared; #else static inline bool __pure lpa2_is_enabled(void) { - return read_tcr() & TCR_DS; + return read_tcr() & TCR_EL1_DS; } =20 #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9ad065f15f1d..bb995d594a88 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2064,7 +2064,7 @@ static struct cpumask dbm_cpus __read_mostly; =20 static inline void __cpu_enable_hw_dbm(void) { - u64 tcr =3D read_sysreg(tcr_el1) | TCR_HD; + u64 tcr =3D read_sysreg(tcr_el1) | TCR_EL1_HD; =20 write_sysreg(tcr, tcr_el1); isb(); @@ -2323,7 +2323,7 @@ static bool has_generic_auth(const struct arm64_cpu_c= apabilities *entry, static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) { if (this_cpu_has_cap(ARM64_HAS_E0PD)) - sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); + sysreg_clear_set(tcr_el1, 0, TCR_EL1_E0PD1); } #endif /* CONFIG_ARM64_E0PD */ =20 diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_k= ernel.c index 0f4bd7771859..3f81e0610577 100644 --- a/arch/arm64/kernel/pi/map_kernel.c +++ b/arch/arm64/kernel/pi/map_kernel.c @@ -135,13 +135,13 @@ static void __init map_kernel(u64 kaslr_offset, u64 v= a_offset, int root_level) static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr) { u64 sctlr =3D read_sysreg(sctlr_el1); - u64 tcr =3D read_sysreg(tcr_el1) | TCR_DS; + u64 tcr =3D read_sysreg(tcr_el1) | TCR_EL1_DS; u64 mmfr0 =3D read_sysreg(id_aa64mmfr0_el1); u64 parange =3D cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); =20 - tcr &=3D ~TCR_IPS_MASK; - tcr |=3D parange << TCR_IPS_SHIFT; + tcr &=3D ~TCR_EL1_IPS_MASK; + tcr |=3D parange << TCR_EL1_IPS_SHIFT; =20 asm(" msr sctlr_el1, %0 ;" " isb ;" @@ -256,7 +256,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status= , void *fdt) } =20 if (va_bits > VA_BITS_MIN) - sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits)); + sysreg_clear_set(tcr_el1, TCR_EL1_T1SZ_MASK, TCR_T1SZ(va_bits)); =20 /* * The virtual KASLR displacement modulo 2MiB is decided by the diff --git a/arch/arm64/kernel/vmcore_info.c b/arch/arm64/kernel/vmcore_inf= o.c index b19d5d6cb8b3..9619ece66b79 100644 --- a/arch/arm64/kernel/vmcore_info.c +++ b/arch/arm64/kernel/vmcore_info.c @@ -14,7 +14,7 @@ static inline u64 get_tcr_el1_t1sz(void); =20 static inline u64 get_tcr_el1_t1sz(void) { - return (read_sysreg(tcr_el1) & TCR_T1SZ_MASK) >> TCR_T1SZ_OFFSET; + return (read_sysreg(tcr_el1) & TCR_EL1_T1SZ_MASK) >> TCR_EL1_T1SZ_SHIFT; } =20 void arch_crash_save_vmcoreinfo(void) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 888f7c7abf54..b47d6d530e57 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2000,10 +2000,10 @@ static void __init cpu_prepare_hyp_mode(int cpu, u3= 2 hyp_va_bits) =20 tcr =3D read_sysreg(tcr_el1); if (cpus_have_final_cap(ARM64_KVM_HVHE)) { - tcr &=3D ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK); - tcr |=3D TCR_EPD1_MASK; + tcr &=3D ~(TCR_EL1_HD | TCR_EL1_HA | TCR_EL1_A1 | TCR_EL1_T0SZ_MASK); + tcr |=3D TCR_EL1_EPD1_MASK; } else { - unsigned long ips =3D FIELD_GET(TCR_IPS_MASK, tcr); + unsigned long ips =3D FIELD_GET(TCR_EL1_IPS_MASK, tcr); =20 tcr &=3D TCR_EL2_MASK; tcr |=3D TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips); diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c index 0e5610533949..5f0f10ef38f0 100644 --- a/arch/arm64/kvm/at.c +++ b/arch/arm64/kvm/at.c @@ -134,8 +134,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct = s1_walk_info *wi, tbi =3D (wi->regime =3D=3D TR_EL2 ? FIELD_GET(TCR_EL2_TBI, tcr) : (va55 ? - FIELD_GET(TCR_TBI1, tcr) : - FIELD_GET(TCR_TBI0, tcr))); + FIELD_GET(TCR_EL1_TBI1, tcr) : + FIELD_GET(TCR_EL1_TBI0, tcr))); =20 if (!tbi && (u64)sign_extend64(va, 55) !=3D va) goto addrsz; @@ -183,8 +183,8 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct = s1_walk_info *wi, wi->hpd &=3D (wi->regime =3D=3D TR_EL2 ? FIELD_GET(TCR_EL2_HPD, tcr) : (va55 ? - FIELD_GET(TCR_HPD1, tcr) : - FIELD_GET(TCR_HPD0, tcr))); + FIELD_GET(TCR_EL1_HPD1, tcr) : + FIELD_GET(TCR_EL1_HPD0, tcr))); /* R_JHSVW */ wi->hpd |=3D s1pie_enabled(vcpu, wi->regime); =20 @@ -196,28 +196,28 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struc= t s1_walk_info *wi, =20 /* Someone was silly enough to encode TG0/TG1 differently */ if (va55) { - wi->txsz =3D FIELD_GET(TCR_T1SZ_MASK, tcr); - tg =3D FIELD_GET(TCR_TG1_MASK, tcr); + wi->txsz =3D FIELD_GET(TCR_EL1_T1SZ_MASK, tcr); + tg =3D FIELD_GET(TCR_EL1_TG1_MASK, tcr); =20 - switch (tg << TCR_TG1_SHIFT) { - case TCR_TG1_4K: + switch (tg << TCR_EL1_TG1_SHIFT) { + case TCR_EL1_TG1_4K: wi->pgshift =3D 12; break; - case TCR_TG1_16K: + case TCR_EL1_TG1_16K: wi->pgshift =3D 14; break; - case TCR_TG1_64K: + case TCR_EL1_TG1_64K: default: /* IMPDEF: treat any other value as 64k */ wi->pgshift =3D 16; break; } } else { - wi->txsz =3D FIELD_GET(TCR_T0SZ_MASK, tcr); - tg =3D FIELD_GET(TCR_TG0_MASK, tcr); + wi->txsz =3D FIELD_GET(TCR_EL1_T0SZ_MASK, tcr); + tg =3D FIELD_GET(TCR_EL1_TG0_MASK, tcr); =20 - switch (tg << TCR_TG0_SHIFT) { - case TCR_TG0_4K: + switch (tg) { + case TCR_EL1_TG0_4K: wi->pgshift =3D 12; break; - case TCR_TG0_16K: + case TCR_EL1_TG0_16K: wi->pgshift =3D 14; break; - case TCR_TG0_64K: + case TCR_EL1_TG0_64K: default: /* IMPDEF: treat any other value as 64k */ wi->pgshift =3D 16; break; } @@ -236,11 +236,11 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struc= t s1_walk_info *wi, switch (BIT(wi->pgshift)) { case SZ_4K: lva =3D kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN4, 52_BIT); - lva &=3D tcr & (wi->regime =3D=3D TR_EL2 ? TCR_EL2_DS : TCR_DS); + lva &=3D tcr & (wi->regime =3D=3D TR_EL2 ? TCR_EL2_DS : TCR_EL1_DS); break; case SZ_16K: lva =3D kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN16, 52_BIT); - lva &=3D tcr & (wi->regime =3D=3D TR_EL2 ? TCR_EL2_DS : TCR_DS); + lva &=3D tcr & (wi->regime =3D=3D TR_EL2 ? TCR_EL2_DS : TCR_EL1_DS); break; case SZ_64K: lva =3D kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, VARange, 52); @@ -259,12 +259,12 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struc= t s1_walk_info *wi, =20 /* I_ZFSYQ */ if (wi->regime !=3D TR_EL2 && - (tcr & (va55 ? TCR_EPD1_MASK : TCR_EPD0_MASK))) + (tcr & (va55 ? TCR_EL1_EPD1_MASK : TCR_EL1_EPD0_MASK))) goto transfault_l0; =20 /* R_BNDVG and following statements */ if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) && - wi->as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0))) + wi->as_el0 && (tcr & (va55 ? TCR_EL1_E0PD1 : TCR_EL1_E0PD0))) goto transfault_l0; =20 /* AArch64.S1StartLevel() */ @@ -272,7 +272,7 @@ static int setup_s1_walk(struct kvm_vcpu *vcpu, struct = s1_walk_info *wi, wi->sl =3D 3 - (((ia_bits - 1) - wi->pgshift) / stride); =20 ps =3D (wi->regime =3D=3D TR_EL2 ? - FIELD_GET(TCR_EL2_PS_MASK, tcr) : FIELD_GET(TCR_IPS_MASK, tcr)); + FIELD_GET(TCR_EL2_PS_MASK, tcr) : FIELD_GET(TCR_EL1_IPS_MASK, tcr)); =20 wi->max_oa_bits =3D min(get_kvm_ipa_limit(), ps_to_output_size(ps)); =20 @@ -421,13 +421,13 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_w= alk_info *wi, switch (wi->regime) { case TR_EL10: tcr =3D vcpu_read_sys_reg(vcpu, TCR_EL1); - asid_ttbr =3D ((tcr & TCR_A1) ? + asid_ttbr =3D ((tcr & TCR_EL1_A1) ? vcpu_read_sys_reg(vcpu, TTBR1_EL1) : vcpu_read_sys_reg(vcpu, TTBR0_EL1)); break; case TR_EL20: tcr =3D vcpu_read_sys_reg(vcpu, TCR_EL2); - asid_ttbr =3D ((tcr & TCR_A1) ? + asid_ttbr =3D ((tcr & TCR_EL1_A1) ? vcpu_read_sys_reg(vcpu, TTBR1_EL2) : vcpu_read_sys_reg(vcpu, TTBR0_EL2)); break; @@ -437,7 +437,7 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_wal= k_info *wi, =20 wr->asid =3D FIELD_GET(TTBR_ASID_MASK, asid_ttbr); if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || - !(tcr & TCR_ASID16)) + !(tcr & TCR_EL1_AS)) wr->asid &=3D GENMASK(7, 0); } =20 diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/i= nclude/hyp/switch.h index 84ec4e100fbb..91189c5ab190 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -881,7 +881,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) * Uphold the requirements of the architecture by masking guest writes * to TCR_EL1.{HA,HD} here. */ - val &=3D ~(TCR_HD | TCR_HA); + val &=3D ~(TCR_EL1_HD | TCR_EL1_HA); write_sysreg_el1(val, SYS_TCR); __kvm_skip_instr(vcpu); return true; diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hy= p/include/hyp/sysreg-sr.h index a17cbe7582de..fb0dc749db7b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -225,7 +225,7 @@ static inline void __sysreg_restore_el1_state(struct kv= m_cpu_context *ctxt, * set. Pairs with nVHE's __activate_traps(). */ write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) | - TCR_EPD1_MASK | TCR_EPD0_MASK), + TCR_EL1_EPD1_MASK | TCR_EL1_EPD0_MASK), SYS_TCR); isb(); } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/swi= tch.c index ccd575d5f6de..70c3d020d97e 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -86,7 +86,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) * ensure that things happen in this exact order. */ val =3D read_sysreg_el1(SYS_TCR); - write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR); + write_sysreg_el1(val | TCR_EL1_EPD1_MASK | TCR_EL1_EPD0_MASK, SYS_TCR); isb(); val =3D read_sysreg_el1(SYS_SCTLR); write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR); diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 48da9ca9763f..b4eaa52790b8 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -84,7 +84,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu, * temporarily enabled in the next context. */ val =3D cxt->tcr =3D read_sysreg_el1(SYS_TCR); - val |=3D TCR_EPD1_MASK | TCR_EPD0_MASK; + val |=3D TCR_EL1_EPD1_MASK | TCR_EL1_EPD0_MASK; write_sysreg_el1(val, SYS_TCR); isb(); =20 diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index ec2569818629..5c58c3b5cc74 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -41,7 +41,7 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu, * allocate IPA->PA walks, so we enable the S1 MMU... */ val =3D cxt->tcr =3D read_sysreg_el1(SYS_TCR); - val |=3D TCR_EPD1_MASK | TCR_EPD0_MASK; + val |=3D TCR_EL1_EPD1_MASK | TCR_EL1_EPD0_MASK; write_sysreg_el1(val, SYS_TCR); val =3D cxt->sctlr =3D read_sysreg_el1(SYS_SCTLR); val |=3D SCTLR_ELx_M; diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 153b3e11b115..7b1802a4072e 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -1266,14 +1266,14 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vc= pu) =20 if (vt->wr.nG) { u64 tcr =3D vcpu_read_sys_reg(vcpu, TCR_EL2); - u64 ttbr =3D ((tcr & TCR_A1) ? + u64 ttbr =3D ((tcr & TCR_EL1_A1) ? vcpu_read_sys_reg(vcpu, TTBR1_EL2) : vcpu_read_sys_reg(vcpu, TTBR0_EL2)); u16 asid; =20 asid =3D FIELD_GET(TTBR_ASID_MASK, ttbr); if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || - !(tcr & TCR_ASID16)) + !(tcr & TCR_EL1_AS)) asid &=3D GENMASK(7, 0); =20 return asid !=3D vt->wr.asid; @@ -1366,14 +1366,14 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu) =20 if (vt->wr.nG) { u64 tcr =3D vcpu_read_sys_reg(vcpu, TCR_EL2); - u64 ttbr =3D ((tcr & TCR_A1) ? + u64 ttbr =3D ((tcr & TCR_EL1_A1) ? vcpu_read_sys_reg(vcpu, TTBR1_EL2) : vcpu_read_sys_reg(vcpu, TTBR0_EL2)); u16 asid; =20 asid =3D FIELD_GET(TTBR_ASID_MASK, ttbr); if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) || - !(tcr & TCR_ASID16)) + !(tcr & TCR_EL1_AS)) asid &=3D GENMASK(7, 0); =20 if (asid !=3D vt->wr.asid) diff --git a/arch/arm64/kvm/pauth.c b/arch/arm64/kvm/pauth.c index d5eb3ae876be..29c3797b3e1c 100644 --- a/arch/arm64/kvm/pauth.c +++ b/arch/arm64/kvm/pauth.c @@ -73,11 +73,11 @@ static bool effective_tbi(struct kvm_vcpu *vcpu, bool b= it55) tbi =3D tcr & BIT(20); tbid =3D tcr & BIT(29); } else if (bit55) { - tbi =3D tcr & TCR_TBI1; - tbid =3D tcr & TCR_TBID1; + tbi =3D tcr & TCR_EL1_TBI1; + tbid =3D tcr & TCR_EL1_TBID1; } else { - tbi =3D tcr & TCR_TBI0; - tbid =3D tcr & TCR_TBID0; + tbi =3D tcr & TCR_EL1_TBI0; + tbid =3D tcr & TCR_EL1_TBID0; } =20 return tbi && !tbid; @@ -91,9 +91,9 @@ static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool= bit55) int txsz; =20 if (!vcpu_el2_e2h_is_set(vcpu) || !bit55) - txsz =3D FIELD_GET(TCR_T0SZ_MASK, tcr); + txsz =3D FIELD_GET(TCR_EL1_T0SZ_MASK, tcr); else - txsz =3D FIELD_GET(TCR_T1SZ_MASK, tcr); + txsz =3D FIELD_GET(TCR_EL1_T1SZ_MASK, tcr); =20 return 64 - clamp(txsz, mintxsz, maxtxsz); } diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 8c75965afc9e..73b859ec46de 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -23,15 +23,18 @@ #include =20 #ifdef CONFIG_ARM64_64K_PAGES -#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K +#define TCR_TG_FLAGS ((TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) |\ + (TCR_EL1_TG1_64K << TCR_EL1_TG1_SHIFT)) #elif defined(CONFIG_ARM64_16K_PAGES) -#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K +#define TCR_TG_FLAGS ((TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) |\ + (TCR_EL1_TG1_16K << TCR_EL1_TG1_SHIFT)) #else /* CONFIG_ARM64_4K_PAGES */ -#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K +#define TCR_TG_FLAGS ((TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) |\ + (TCR_EL1_TG1_4K << TCR_EL1_TG1_SHIFT)) #endif =20 #ifdef CONFIG_RANDOMIZE_BASE -#define TCR_KASLR_FLAGS TCR_NFD1 +#define TCR_KASLR_FLAGS TCR_EL1_NFD1 #else #define TCR_KASLR_FLAGS 0 #endif @@ -40,19 +43,19 @@ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA =20 #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 +#define TCR_KASAN_SW_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID1 #else #define TCR_KASAN_SW_FLAGS 0 #endif =20 #ifdef CONFIG_KASAN_HW_TAGS -#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 +#define TCR_MTE_FLAGS TCR_EL1_TCMA1 | TCR_EL1_TBI1 | TCR_EL1_TBID1 #elif defined(CONFIG_ARM64_MTE) /* * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relie= s on * TBI being enabled at EL1. */ -#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 +#define TCR_MTE_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID1 #else #define TCR_MTE_FLAGS 0 #endif @@ -129,7 +132,7 @@ SYM_FUNC_START(cpu_do_resume) =20 /* Don't change t0sz here, mask those bits when restoring */ mrs x7, tcr_el1 - bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + bfi x8, x7, TCR_EL1_T0SZ_SHIFT, TCR_EL1_T0SZ_WIDTH =20 msr tcr_el1, x8 msr vbar_el1, x9 @@ -468,8 +471,8 @@ SYM_FUNC_START(__cpu_setup) tcr2 .req x15 mov_q mair, MAIR_EL1_SET mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FL= AGS | \ - TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ - TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS + TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_EL1_AS | \ + TCR_EL1_TBI0 | TCR_EL1_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS mov tcr2, xzr =20 tcr_clear_errata_bits tcr, x9, x5 @@ -479,7 +482,7 @@ SYM_FUNC_START(__cpu_setup) alternative_if ARM64_HAS_VA52 tcr_set_t1sz tcr, x9 #ifdef CONFIG_ARM64_LPA2 - orr tcr, tcr, #TCR_DS + orr tcr, tcr, #TCR_EL1_DS #endif alternative_else_nop_endif #endif @@ -487,7 +490,7 @@ alternative_else_nop_endif /* * Set the IPS bits in TCR_EL1. */ - tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 + tcr_compute_pa_size tcr, #TCR_EL1_IPS_SHIFT, x5, x6 #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable hardware update of the Access Flags bit. @@ -497,7 +500,7 @@ alternative_else_nop_endif mrs x9, ID_AA64MMFR1_EL1 ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4 cbz x9, 1f - orr tcr, tcr, #TCR_HA // hardware Access flag update + orr tcr, tcr, #TCR_EL1_HA // hardware Access flag update #ifdef CONFIG_ARM64_HAFT cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT b.lt 1f diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/incl= ude/asm/cputype.h index 9a5d85cfd1fb..19a68298673c 100644 --- a/tools/arch/arm64/include/asm/cputype.h +++ b/tools/arch/arm64/include/asm/cputype.h @@ -223,7 +223,7 @@ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX #define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0)) -#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) +#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_EL1_NFD1 | TCR_EL1_NFD0) =20 #ifndef __ASSEMBLY__ =20 --=20 2.25.1 From nobody Sat Oct 4 11:11:59 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id B628F268C42 for ; Mon, 18 Aug 2025 04:58:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493102; cv=none; b=AosWfRh42rGHKqfbHaj57BdJtivVkq6W8FdPsmgKKkm+CTwdjdtpvVW5aLlaf5fb0XqrDzhGbpuYKB3AIJzhsYIyYiJIM3ma97IEu3549gCUFNrSEvpJsHV90bo2mY+QCZ58raRjaTdPzbbU3oocKXLEigPb329gYjSyHoHZvY4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493102; c=relaxed/simple; bh=ZKEYCR0gDkrKbo3AZPVkiwGPIxrdNv31xbNb+X9KSok=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=g0X+K3i1lUurdUDvR40JvLykLO14Nupj/Ah4Q2U6RjnUh/1fnHScmnf+xRbxAiI7MDYTEewVlmCYKP81PWsANn9tVGHZQImbx8sTFtmZ4ahlM7YC3X4Sf6Rpo/Rl4EQitj3hO5oiUVKUmUkEgo3CwcmtExWa4tTesGa0bOmoBMU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E755D1762; Sun, 17 Aug 2025 21:58:11 -0700 (PDT) Received: from a076716.blr.arm.com (a076716.blr.arm.com [10.164.21.47]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 45EDE3F58B; Sun, 17 Aug 2025 21:58:17 -0700 (PDT) From: Anshuman Khandual To: linux-arm-kernel@lists.infradead.org Cc: Anshuman Khandual , Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Mark Brown , Ryan Roberts , kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org Subject: [PATCH 3/4] arm64/sysreg: Add TCR_EL2 register Date: Mon, 18 Aug 2025 10:27:58 +0530 Message-Id: <20250818045759.672408-4-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20250818045759.672408-1-anshuman.khandual@arm.com> References: <20250818045759.672408-1-anshuman.khandual@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Add TCR_EL2 register fields as per the latest ARM ARM DDI 0487 7.B in tools sysreg format and drop all the existing redundant macros from the header (arch/arm64/include/asm/kvm_arm.h). While here also drop an explicit sysreg definction SYS_TCR_EL2 from sysreg.h header. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: kvmarm@lists.linux.dev Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual --- arch/arm64/include/asm/kvm_arm.h | 13 ---------- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/tools/sysreg | 44 ++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_= arm.h index 560d9cb63413..8994cddef182 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,19 +107,6 @@ =20 #define MPAMHCR_HOST_FLAGS 0 =20 -/* TCR_EL2 Registers bits */ -#define TCR_EL2_DS (1UL << 32) -#define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) -#define TCR_EL2_HPD (1 << 24) -#define TCR_EL2_TBI (1 << 20) -#define TCR_EL2_PS_SHIFT 16 -#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT) -#define TCR_EL2_PS_40B (2 << TCR_EL2_PS_SHIFT) -#define TCR_EL2_TG0_MASK TCR_EL1_TG0_MASK -#define TCR_EL2_SH0_MASK TCR_EL1_SH0_MASK -#define TCR_EL2_ORGN0_MASK TCR_EL1_ORGN0_MASK -#define TCR_EL2_IRGN0_MASK TCR_EL1_IRGN0_MASK -#define TCR_EL2_T0SZ_MASK 0x3f #define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK) =20 diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysre= g.h index ad5c901af229..112d5d0acb50 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -523,7 +523,6 @@ =20 #define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) #define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1) -#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2) #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) =20 diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 4bdae8bb11dc..d2b40105eb41 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -4812,6 +4812,50 @@ Sysreg TCR_EL12 3 5 2 0 2 Mapping TCR_EL1 EndSysreg =20 +Sysreg TCR_EL2 3 4 2 0 2 +Res0 63:34 +Field 33 MTX +Field 32 DS +Res1 31 +Field 30 TCMA +Field 29 TBID +Field 28 HWU62 +Field 27 HWU61 +Field 26 HWU60 +Field 25 HWU59 +Field 24 HPD +Res1 23 +Field 22 HD +Field 21 HA +Field 20 TBI +Res0 19 +Field 18:16 PS +UnsignedEnum 15:14 TG0 + 0b00 4K + 0b01 64K + 0b10 16K +EndEnum +UnsignedEnum 13:12 SH0 + 0b00 NONE + 0b10 OUTER + 0b11 INNER +EndEnum +UnsignedEnum 11:10 ORGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +UnsignedEnum 9:8 IRGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +Res0 7:6 +Field 5:0 T0SZ +EndSysreg + Sysreg TCRALIAS_EL1 3 0 2 7 6 Mapping TCR_EL1 EndSysreg --=20 2.25.1 From nobody Sat Oct 4 11:11:59 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 0798E26E6E4 for ; Mon, 18 Aug 2025 04:58:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493105; cv=none; b=Ah5EL2EzTch8K3evpcPMLbZdxh2ojp1s5evgsKEJtrtcfN2xflplaHU2Xf3QVwwQUIDDbHyrHaVv+yigyGoH/bw/vd9D+y2g2+OcQ3RsOYiuDPagnFzgo9g6dDEn8ohwkkiSRx9U5ax0WwCK0UN+8qBDyDdT1kW2nOTXLl2UI4M= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755493105; c=relaxed/simple; bh=arGTDLJ+WyYUfHUXadbRbVI+ZbCbclB090oJcp2Pzts=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=K9I5jREPPH3O4FPwRV1bjEnYteNEg4fccyKVpILsYZR7vov4dounMOWI9Q6Fonuv7mPUuJYapTweTiMLuuTtTb0MBrLCPjPF7wxnLA7fUdmDKVRxlpW+yr8U0nGpuLNVy5bFQYraVl+QWVtl7tr7s9pATc302OgDSdmvhK6+u6k= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 48DBB2A2A; Sun, 17 Aug 2025 21:58:15 -0700 (PDT) Received: from a076716.blr.arm.com (a076716.blr.arm.com [10.164.21.47]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 9F4E53F58B; Sun, 17 Aug 2025 21:58:20 -0700 (PDT) From: Anshuman Khandual To: linux-arm-kernel@lists.infradead.org Cc: Anshuman Khandual , Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Mark Brown , Ryan Roberts , kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org Subject: [PATCH 4/4] arm64/sysreg: Add VTCR_EL2 register Date: Mon, 18 Aug 2025 10:27:59 +0530 Message-Id: <20250818045759.672408-5-anshuman.khandual@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20250818045759.672408-1-anshuman.khandual@arm.com> References: <20250818045759.672408-1-anshuman.khandual@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Add VTCR_EL2 register fields as per the latest ARM ARM DDI 0487 7.B in tools sysreg format and drop the existing macros from the header (arch/arm64/include/asm/kvm_arm.h). While here also drop an explicit sysreg definction SYS_VTCR_EL2 from sysreg.h header. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Oliver Upton Cc: Mark Brown Cc: kvmarm@lists.linux.dev Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual --- arch/arm64/include/asm/kvm_arm.h | 32 +++--------------- arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/kvm/hyp/pgtable.c | 6 ++-- arch/arm64/kvm/nested.c | 2 +- arch/arm64/tools/sysreg | 57 ++++++++++++++++++++++++++++++++ 5 files changed, 66 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_= arm.h index 8994cddef182..5abe3536f1d2 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -110,31 +110,7 @@ #define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK) =20 -/* VTCR_EL2 Registers bits */ -#define VTCR_EL2_DS TCR_EL2_DS -#define VTCR_EL2_RES1 (1U << 31) -#define VTCR_EL2_HD (1 << 22) -#define VTCR_EL2_HA (1 << 21) -#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT -#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK -#define VTCR_EL2_TG0_MASK TCR_EL1_TG0_MASK -#define VTCR_EL2_TG0_4K (TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) -#define VTCR_EL2_TG0_16K (TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) -#define VTCR_EL2_TG0_64K (TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) -#define VTCR_EL2_SH0_MASK TCR_EL1_SH0_MASK -#define VTCR_EL2_SH0_INNER (TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) -#define VTCR_EL2_ORGN0_MASK TCR_EL1_ORGN0_MASK -#define VTCR_EL2_ORGN0_WBWA (TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) -#define VTCR_EL2_IRGN0_MASK TCR_EL1_IRGN0_MASK -#define VTCR_EL2_IRGN0_WBWA (TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) -#define VTCR_EL2_SL0_SHIFT 6 -#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) -#define VTCR_EL2_T0SZ_MASK 0x3f -#define VTCR_EL2_VS_SHIFT 19 -#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) -#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) - -#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x) +#define VTCR_T0SZ(x) ((UL(64) - (x)) << VTCR_EL2_T0SZ_SHIFT) =20 /* * We configure the Stage-2 page tables to always restrict the IPA space t= o be @@ -148,8 +124,10 @@ * */ =20 -#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ - VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) +#define VTCR_EL2_COMMON_BITS ((VTCR_EL2_SH0_INNER << VTCR_EL2_SH0_SHIFT) |= \ + (VTCR_EL2_ORGN0_WBWA << VTCR_EL2_ORGN0_SHIFT) | \ + (VTCR_EL2_IRGN0_WBWA << VTCR_EL2_IRGN0_SHIFT) | \ + (VTCR_EL2_RES1)) =20 /* * VTCR_EL2:SL0 indicates the entry level for Stage2 translation. diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysre= g.h index 112d5d0acb50..ea0a7e5a8628 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -524,7 +524,6 @@ #define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) #define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1) #define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) -#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) =20 #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index c351b4abd5db..c1607b5b7cbf 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -584,7 +584,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) s8 lvls; =20 vtcr |=3D kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT; - vtcr |=3D VTCR_EL2_T0SZ(phys_shift); + vtcr |=3D VTCR_T0SZ(phys_shift); /* * Use a minimum 2 level page table to prevent splitting * host PMD huge pages at stage2. @@ -625,8 +625,8 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) =20 /* Set the vmid bits */ vtcr |=3D (get_vmid_bits(mmfr1) =3D=3D 16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; + SYS_FIELD_PREP_ENUM(VTCR_EL2, VS, 16BIT) : + SYS_FIELD_PREP_ENUM(VTCR_EL2, VS, 8BIT); =20 return vtcr; } diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 7b1802a4072e..34d8d192d5c2 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -336,7 +336,7 @@ static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_= info *wi) { wi->t0sz =3D vtcr & TCR_EL2_T0SZ_MASK; =20 - switch (vtcr & VTCR_EL2_TG0_MASK) { + switch (SYS_FIELD_GET(VTCR_EL2, TG0, vtcr)) { case VTCR_EL2_TG0_4K: wi->pgshift =3D 12; break; case VTCR_EL2_TG0_16K: diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index d2b40105eb41..f5a0a304f844 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -4910,6 +4910,63 @@ Field 1 PIE Field 0 PnCH EndSysreg =20 +Sysreg VTCR_EL2 3 4 2 1 2 +Res0 63:46 +Field 45 HDBSS +Field 44 HAFT +Res0 43:42 +Field 41 TL0 +Field 40 GCSH +Res0 39 +Field 38 D128 +Field 37 S2POE +Field 36 S2PIE +Field 35 TL1 +Field 34 AssuredOnly +Field 33 SL2 +Field 32 DS +Res1 31 +Field 30 NSA +Field 29 NSW +Field 28 HWU62 +Field 27 HWU61 +Field 26 HWU60 +Field 25 HWU59 +Res0 24:23 +Field 22 HD +Field 21 HA +Res0 20 +UnsignedEnum 19 VS + 0b0 8BIT + 0b1 16BIT +EndEnum +Field 18:16 PS +UnsignedEnum 15:14 TG0 + 0b00 4K + 0b01 64K + 0b10 16K +EndEnum +UnsignedEnum 13:12 SH0 + 0b00 NONE + 0b10 OUTER + 0b11 INNER +EndEnum +UnsignedEnum 11:10 ORGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +UnsignedEnum 9:8 IRGN0 + 0b00 NC + 0b01 WBWA + 0b10 WT + 0b11 WBnWA +EndEnum +Field 7:6 SL0 +Field 5:0 T0SZ +EndSysreg + SysregFields MAIR2_ELx Field 63:56 Attr7 Field 55:48 Attr6 --=20 2.25.1