[PATCH v2 08/11] xen: rename p2m_ipa_bits to p2m_gpa_bits

Oleksii Kurochko posted 11 patches 3 hours ago
[PATCH v2 08/11] xen: rename p2m_ipa_bits to p2m_gpa_bits
Posted by Oleksii Kurochko 3 hours ago
The IPA terminology is Arm-specific, so rename p2m_ipa_bits to
p2m_gpa_bits to use architecture-neutral naming.

No functional changes.

Reported-by: Jan Beulich <jbeulich@suse.com>
---
Changes in v2:
 - New patch
---
 xen/arch/arm/domain_build.c              | 12 ++++++------
 xen/arch/arm/domctl.c                    |  2 +-
 xen/arch/arm/include/asm/p2m.h           |  4 ++--
 xen/arch/arm/mmu/p2m.c                   | 18 +++++++++---------
 xen/arch/arm/p2m.c                       |  6 +++---
 xen/common/device-tree/domain-build.c    |  2 +-
 xen/drivers/passthrough/arm/ipmmu-vmsa.c |  4 ++--
 xen/drivers/passthrough/arm/smmu-v3.c    |  2 +-
 xen/drivers/passthrough/arm/smmu.c       |  2 +-
 9 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index e8795745ddc7..38ab41ec6b19 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -744,7 +744,7 @@ static int __init find_memory_holes(const struct kernel_info *kinfo,
 
     /* Start with maximum possible addressable physical memory range */
     start = 0;
-    end = (1ULL << p2m_ipa_bits) - 1;
+    end = (1ULL << p2m_gpa_bits) - 1;
     res = rangeset_add_range(mem_holes, PFN_DOWN(start), PFN_DOWN(end));
     if ( res )
     {
@@ -815,7 +815,7 @@ static int __init find_memory_holes(const struct kernel_info *kinfo,
     }
 
     start = 0;
-    end = (1ULL << p2m_ipa_bits) - 1;
+    end = (1ULL << p2m_gpa_bits) - 1;
     res = rangeset_report_ranges(mem_holes, PFN_DOWN(start), PFN_DOWN(end),
                                  add_ext_regions,  ext_regions);
     if ( res )
@@ -849,7 +849,7 @@ static int __init find_domU_holes(const struct kernel_info *kinfo,
 
         start = ROUNDUP(bankbase[i] + kinfo_mem->bank[i].size, SZ_2M);
 
-        bankend = ~0ULL >> (64 - p2m_ipa_bits);
+        bankend = ~0ULL >> (64 - p2m_gpa_bits);
         bankend = min(bankend, bankbase[i] + banksize[i] - 1);
 
         if ( bankend > start )
@@ -881,7 +881,7 @@ static int __init find_domU_holes(const struct kernel_info *kinfo,
     }
 
     res = rangeset_report_ranges(mem_holes, 0,
-                                 PFN_DOWN((1ULL << p2m_ipa_bits) - 1),
+                                 PFN_DOWN((1ULL << p2m_gpa_bits) - 1),
                                  add_ext_regions, ext_regions);
     if ( res )
         ext_regions->nr_banks = 0;
@@ -907,7 +907,7 @@ static unsigned int __init count_ranges(struct rangeset *r)
 {
     unsigned int cnt = 0;
 
-    (void) rangeset_report_ranges(r, 0, PFN_DOWN((1ULL << p2m_ipa_bits) - 1),
+    (void) rangeset_report_ranges(r, 0, PFN_DOWN((1ULL << p2m_gpa_bits) - 1),
                                   count, &cnt);
 
     return cnt;
@@ -972,7 +972,7 @@ static int __init find_host_extended_regions(const struct kernel_info *kinfo,
         }
 
         rangeset_report_ranges(kinfo->xen_reg_assigned, 0,
-                               PFN_DOWN((1ULL << p2m_ipa_bits) - 1),
+                               PFN_DOWN((1ULL << p2m_gpa_bits) - 1),
                                rangeset_to_membank, xen_reg);
     }
 
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index ad914c915f81..d8db595ab348 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -23,7 +23,7 @@ void arch_get_domain_info(const struct domain *d,
     /* All ARM domains use hardware assisted paging. */
     info->flags |= XEN_DOMINF_hap;
 
-    info->gpaddr_bits = p2m_ipa_bits;
+    info->gpaddr_bits = p2m_gpa_bits;
 }
 
 static int handle_vuart_init(struct domain *d, 
diff --git a/xen/arch/arm/include/asm/p2m.h b/xen/arch/arm/include/asm/p2m.h
index 010ce8c9ebbd..b15b57aa32bd 100644
--- a/xen/arch/arm/include/asm/p2m.h
+++ b/xen/arch/arm/include/asm/p2m.h
@@ -12,7 +12,7 @@
 #define paddr_bits PADDR_BITS
 
 /* Holds the bit size of IPAs in p2m tables.  */
-extern unsigned int p2m_ipa_bits;
+extern unsigned int p2m_gpa_bits;
 
 #define MAX_VMID_8_BIT  (1UL << 8)
 #define MAX_VMID_16_BIT (1UL << 16)
@@ -186,7 +186,7 @@ static inline bool arch_acquire_resource_check(struct domain *d)
 }
 
 /*
- * Helper to restrict "p2m_ipa_bits" according the external entity
+ * Helper to restrict "p2m_gpa_bits" according the external entity
  * (e.g. IOMMU) requirements.
  *
  * Each corresponding driver should report the maximum IPA bits
diff --git a/xen/arch/arm/mmu/p2m.c b/xen/arch/arm/mmu/p2m.c
index 51abf3504fcf..08871c61b812 100644
--- a/xen/arch/arm/mmu/p2m.c
+++ b/xen/arch/arm/mmu/p2m.c
@@ -1734,11 +1734,11 @@ void __init setup_virt_paging(void)
     } t0sz_32;
 #else
     /*
-     * Restrict "p2m_ipa_bits" if needed. As P2M table is always configured
+     * Restrict "p2m_gpa_bits" if needed. As P2M table is always configured
      * with IPA bits == PA bits, compare against "pabits".
      */
-    if ( pa_range_info[system_cpuinfo.mm64.pa_range].pabits < p2m_ipa_bits )
-        p2m_ipa_bits = pa_range_info[system_cpuinfo.mm64.pa_range].pabits;
+    if ( pa_range_info[system_cpuinfo.mm64.pa_range].pabits < p2m_gpa_bits )
+        p2m_gpa_bits = pa_range_info[system_cpuinfo.mm64.pa_range].pabits;
 
     /*
      * cpu info sanitization made sure we support 16bits VMID only if all
@@ -1748,10 +1748,10 @@ void __init setup_virt_paging(void)
         max_vmid = MAX_VMID_16_BIT;
 #endif
 
-    /* Choose suitable "pa_range" according to the resulted "p2m_ipa_bits". */
+    /* Choose suitable "pa_range" according to the resulted "p2m_gpa_bits". */
     for ( i = 0; i < ARRAY_SIZE(pa_range_info); i++ )
     {
-        if ( p2m_ipa_bits == pa_range_info[i].pabits )
+        if ( p2m_gpa_bits == pa_range_info[i].pabits )
         {
             pa_range = i;
             break;
@@ -1760,7 +1760,7 @@ void __init setup_virt_paging(void)
 
     /* Check if we found the associated entry in the array */
     if ( pa_range >= ARRAY_SIZE(pa_range_info) || !pa_range_info[pa_range].pabits )
-        panic("%u-bit P2M is not supported\n", p2m_ipa_bits);
+        panic("%u-bit P2M is not supported\n", p2m_gpa_bits);
 
 #ifdef CONFIG_ARM_64
     val |= VTCR_PS(pa_range);
@@ -1778,14 +1778,14 @@ void __init setup_virt_paging(void)
     p2m_root_level = 2 - pa_range_info[pa_range].sl0;
 
 #ifdef CONFIG_ARM_64
-    p2m_ipa_bits = 64 - pa_range_info[pa_range].t0sz;
+    p2m_gpa_bits = 64 - pa_range_info[pa_range].t0sz;
 #else
     t0sz_32.val = pa_range_info[pa_range].t0sz;
-    p2m_ipa_bits = 32 - t0sz_32.val;
+    p2m_gpa_bits = 32 - t0sz_32.val;
 #endif
 
     printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n",
-           p2m_ipa_bits,
+           p2m_gpa_bits,
            pa_range_info[pa_range].pabits,
            ( MAX_VMID == MAX_VMID_16_BIT ) ? 16 : 8);
 
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index fb03978a19af..5564e7d3c1db 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -19,7 +19,7 @@ unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
  * Set to the maximum configured support for IPA bits, so the number of IPA bits can be
  * restricted by external entity (e.g. IOMMU).
  */
-unsigned int __read_mostly p2m_ipa_bits = PADDR_BITS;
+unsigned int __read_mostly p2m_gpa_bits = PADDR_BITS;
 
 /* Unlock the flush and do a P2M TLB flush if necessary */
 void p2m_write_unlock(struct p2m_domain *p2m)
@@ -603,8 +603,8 @@ void __init p2m_restrict_ipa_bits(unsigned int ipa_bits)
      * Calculate the minimum of the maximum IPA bits that any external entity
      * can support.
      */
-    if ( ipa_bits < p2m_ipa_bits )
-        p2m_ipa_bits = ipa_bits;
+    if ( ipa_bits < p2m_gpa_bits )
+        p2m_gpa_bits = ipa_bits;
 }
 
 /*
diff --git a/xen/common/device-tree/domain-build.c b/xen/common/device-tree/domain-build.c
index 6708c9dd66e6..362da1cae780 100644
--- a/xen/common/device-tree/domain-build.c
+++ b/xen/common/device-tree/domain-build.c
@@ -220,7 +220,7 @@ int __init find_unallocated_memory(const struct kernel_info *kinfo,
     }
 
     start = 0;
-    end = (1ULL << p2m_ipa_bits) - 1;
+    end = (1ULL << p2m_gpa_bits) - 1;
     res = rangeset_report_ranges(unalloc_mem, PFN_DOWN(start), PFN_DOWN(end),
                                  cb, free_regions);
     if ( res )
diff --git a/xen/drivers/passthrough/arm/ipmmu-vmsa.c b/xen/drivers/passthrough/arm/ipmmu-vmsa.c
index ea9fa9ddf3ce..e2b4c95dcc67 100644
--- a/xen/drivers/passthrough/arm/ipmmu-vmsa.c
+++ b/xen/drivers/passthrough/arm/ipmmu-vmsa.c
@@ -575,11 +575,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 
     /*
      * TTBCR
-     * We use long descriptors and allocate the whole "p2m_ipa_bits" IPA space
+     * We use long descriptors and allocate the whole "p2m_gpa_bits" IPA space
      * to TTBR0. Use 4KB page granule. Start page table walks at first level.
      * Always bypass stage 1 translation.
      */
-    tsz0 = (64 - p2m_ipa_bits) << IMTTBCR_TSZ0_SHIFT;
+    tsz0 = (64 - p2m_gpa_bits) << IMTTBCR_TSZ0_SHIFT;
     ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | IMTTBCR_PMB |
                          IMTTBCR_SL0_LVL_1 | tsz0);
 
diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
index bf153227dbd9..9e86cd7b0ad0 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -1202,7 +1202,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
 		return -EINVAL;
 	}
 
-	vtcr->tsz = 64 - p2m_ipa_bits;
+	vtcr->tsz = 64 - p2m_gpa_bits;
 	vtcr->sl = 2 - P2M_ROOT_LEVEL;
 
 	arm_lpae_s2_cfg.vttbr  = page_to_maddr(smmu_domain->d->arch.p2m.root);
diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c
index 22d306d0cb80..fa28fd7db79c 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -1276,7 +1276,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 			 * Xen: The IOMMU share the page-tables with the P2M
 			 * which may have restrict the size further.
 			 */
-			reg |= (64 - p2m_ipa_bits) << TTBCR_T0SZ_SHIFT;
+			reg |= (64 - p2m_gpa_bits) << TTBCR_T0SZ_SHIFT;
 
 			switch (smmu->s2_output_size) {
 			case 32:
-- 
2.53.0