[PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment

Tom Lendacky posted 8 patches 1 month, 4 weeks ago
There is a newer version of this series
[PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment
Posted by Tom Lendacky 1 month, 4 weeks ago
In preparation for support of a segmented RMP table, treat the contiguous
RMP table as a segmented RMP table with a single segment covering all
of memory. By treating a contiguous RMP table as a single segment, much
of the code that initializes and accesses the RMP can be re-used.

Segmented RMP tables can have up to 512 segment entries. Each segment
will have metadata associated with it to identify the segment location,
the segment size, etc. The segment data and the physical address are used
to determine the index of the segment within the table and then the RMP
entry within the segment. For an actual segmented RMP table environment,
much of the segment information will come from a configuration MSR. For
the contiguous RMP, though, much of the information will be statically
defined.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/virt/svm/sev.c | 195 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 176 insertions(+), 19 deletions(-)

diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 81e21d833cf0..ebfb924652f8 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -18,6 +18,7 @@
 #include <linux/cpumask.h>
 #include <linux/iommu.h>
 #include <linux/amd-iommu.h>
+#include <linux/nospec.h>
 
 #include <asm/sev.h>
 #include <asm/processor.h>
@@ -74,12 +75,42 @@ struct rmpentry_raw {
  */
 #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
 
+/*
+ * For a non-segmented RMP table, use the maximum physical addressing as the
+ * segment size in order to always arrive at index 0 in the table.
+ */
+#define RMPTABLE_NON_SEGMENTED_SHIFT	52
+
+struct rmp_segment_desc {
+	struct rmpentry_raw *rmp_entry;
+	u64 max_index;
+	u64 size;
+};
+
+/*
+ * Segmented RMP Table support.
+ *   - The segment size is used for two purposes:
+ *     - Identify the amount of memory covered by an RMP segment
+ *     - Quickly locate an RMP segment table entry for a physical address
+ *
+ *   - The RMP segment table contains pointers to an RMP table that covers
+ *     a specific portion of memory. There can be up to 512 8-byte entries,
+ *     one pages worth.
+ */
+static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
+static unsigned int rst_max_index __ro_after_init = 512;
+
+static u64 rmp_segment_size_max;
+static unsigned int rmp_segment_coverage_shift;
+static unsigned long rmp_segment_coverage_size;
+static unsigned long rmp_segment_coverage_mask;
+#define RST_ENTRY_INDEX(x)	((x) >> rmp_segment_coverage_shift)
+#define RMP_ENTRY_INDEX(x)	PHYS_PFN((x) & rmp_segment_coverage_mask)
+
 /* Mask to apply to a PFN to get the first PFN of a 2MB page */
 #define PFN_PMD_MASK	GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
 
 static u64 probed_rmp_base, probed_rmp_size;
-static struct rmpentry_raw *rmptable __ro_after_init;
-static u64 rmptable_max_pfn __ro_after_init;
 
 static LIST_HEAD(snp_leaked_pages_list);
 static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
@@ -185,6 +216,92 @@ static bool __init init_rmptable_bookkeeping(void)
 	return true;
 }
 
+static bool __init alloc_rmp_segment_desc(u64 segment_pa, u64 segment_size, u64 pa)
+{
+	struct rmp_segment_desc *desc;
+	unsigned long rst_index;
+	void *rmp_segment;
+
+	/* Validate the RMP segment size */
+	if (segment_size > rmp_segment_size_max) {
+		pr_err("Invalid RMP size (%#llx) for configured segment size (%#llx)\n",
+		       segment_size, rmp_segment_size_max);
+		return false;
+	}
+
+	/* Validate the RMP segment table index */
+	rst_index = RST_ENTRY_INDEX(pa);
+	if (rst_index >= rst_max_index) {
+		pr_err("Invalid RMP segment base address (%#llx) for configured segment size (%#lx)\n",
+		       pa, rmp_segment_coverage_size);
+		return false;
+	}
+	rst_index = array_index_nospec(rst_index, rst_max_index);
+
+	if (rmp_segment_table[rst_index]) {
+		pr_err("RMP segment descriptor already exists at index %lu\n", rst_index);
+		return false;
+	}
+
+	/* Map the RMP entries */
+	rmp_segment = memremap(segment_pa, segment_size, MEMREMAP_WB);
+	if (!rmp_segment) {
+		pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n",
+		       segment_pa, segment_size);
+		return false;
+	}
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		memunmap(rmp_segment);
+		return false;
+	}
+
+	desc->rmp_entry = rmp_segment;
+	desc->max_index = segment_size / sizeof(*desc->rmp_entry);
+	desc->size = segment_size;
+
+	/* Add the segment descriptor to the table */
+	rmp_segment_table[rst_index] = desc;
+
+	return true;
+}
+
+static void __init free_rmp_segment_table(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < rst_max_index; i++) {
+		struct rmp_segment_desc *desc;
+
+		desc = rmp_segment_table[i];
+		if (!desc)
+			continue;
+
+		memunmap(desc->rmp_entry);
+
+		kfree(desc);
+	}
+
+	free_page((unsigned long)rmp_segment_table);
+
+	rmp_segment_table = NULL;
+}
+
+static bool __init alloc_rmp_segment_table(void)
+{
+	struct page *page;
+
+	/* Allocate the table used to index into the RMP segments */
+	page = alloc_page(__GFP_ZERO);
+	if (!page)
+		return false;
+
+	rmp_segment_table = page_address(page);
+
+	return true;
+}
+
 /*
  * Do the necessary preparations which are verified by the firmware as
  * described in the SNP_INIT_EX firmware command description in the SNP
@@ -192,8 +309,8 @@ static bool __init init_rmptable_bookkeeping(void)
  */
 static int __init snp_rmptable_init(void)
 {
-	u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val;
-	void *rmptable_start;
+	u64 max_rmp_pfn, calc_rmp_sz, rmptable_segment, rmptable_size, rmp_end, val;
+	unsigned int i;
 
 	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
 		return 0;
@@ -222,17 +339,18 @@ static int __init snp_rmptable_init(void)
 		goto nosnp;
 	}
 
+	if (!alloc_rmp_segment_table())
+		goto nosnp;
+
 	/* Map only the RMP entries */
-	rmptable_start = memremap(probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ,
-				  probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ,
-				  MEMREMAP_WB);
-	if (!rmptable_start) {
-		pr_err("Failed to map RMP table\n");
+	rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
+	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
+
+	if (!alloc_rmp_segment_desc(rmptable_segment, rmptable_size, 0)) {
+		free_rmp_segment_table();
 		goto nosnp;
 	}
 
-	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
-
 	/*
 	 * Check if SEV-SNP is already enabled, this can happen in case of
 	 * kexec boot.
@@ -243,12 +361,20 @@ static int __init snp_rmptable_init(void)
 
 	/* Zero out the RMP bookkeeping area */
 	if (!init_rmptable_bookkeeping()) {
-		memunmap(rmptable_start);
+		free_rmp_segment_table();
 		goto nosnp;
 	}
 
 	/* Zero out the RMP entries */
-	memset(rmptable_start, 0, rmptable_size);
+	for (i = 0; i < rst_max_index; i++) {
+		struct rmp_segment_desc *desc;
+
+		desc = rmp_segment_table[i];
+		if (!desc)
+			continue;
+
+		memset(desc->rmp_entry, 0, desc->size);
+	}
 
 	/* Flush the caches to ensure that data is written before SNP is enabled. */
 	wbinvd_on_all_cpus();
@@ -259,9 +385,6 @@ static int __init snp_rmptable_init(void)
 	on_each_cpu(snp_enable, NULL, 1);
 
 skip_enable:
-	rmptable = (struct rmpentry_raw *)rmptable_start;
-	rmptable_max_pfn = rmptable_size / sizeof(struct rmpentry_raw) - 1;
-
 	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/rmptable_init:online", __snp_enable, NULL);
 
 	/*
@@ -282,6 +405,17 @@ static int __init snp_rmptable_init(void)
  */
 device_initcall(snp_rmptable_init);
 
+static void set_rmp_segment_info(unsigned int segment_shift)
+{
+	rmp_segment_coverage_shift = segment_shift;
+	rmp_segment_coverage_size  = 1UL << rmp_segment_coverage_shift;
+	rmp_segment_coverage_mask  = rmp_segment_coverage_size - 1;
+
+	/* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
+	rmp_segment_size_max = PHYS_PFN(rmp_segment_coverage_size);
+	rmp_segment_size_max <<= 4;
+}
+
 #define RMP_ADDR_MASK GENMASK_ULL(51, 13)
 
 bool snp_probe_rmptable_info(void)
@@ -303,6 +437,11 @@ bool snp_probe_rmptable_info(void)
 
 	rmp_sz = rmp_end - rmp_base + 1;
 
+	/* Treat the contiguous RMP table as a single segment */
+	rst_max_index = 1;
+
+	set_rmp_segment_info(RMPTABLE_NON_SEGMENTED_SHIFT);
+
 	probed_rmp_base = rmp_base;
 	probed_rmp_size = rmp_sz;
 
@@ -314,13 +453,31 @@ bool snp_probe_rmptable_info(void)
 
 static struct rmpentry_raw *__get_rmpentry(unsigned long pfn)
 {
-	if (!rmptable)
+	struct rmp_segment_desc *desc;
+	unsigned long rst_index;
+	unsigned long paddr;
+	u64 segment_index;
+
+	if (!rmp_segment_table)
 		return ERR_PTR(-ENODEV);
 
-	if (unlikely(pfn > rmptable_max_pfn))
+	paddr = pfn << PAGE_SHIFT;
+
+	rst_index = RST_ENTRY_INDEX(paddr);
+	if (unlikely(rst_index >= rst_max_index))
+		return ERR_PTR(-EFAULT);
+	rst_index = array_index_nospec(rst_index, rst_max_index);
+
+	desc = rmp_segment_table[rst_index];
+	if (unlikely(!desc))
 		return ERR_PTR(-EFAULT);
 
-	return rmptable + pfn;
+	segment_index = RMP_ENTRY_INDEX(paddr);
+	if (unlikely(segment_index >= desc->max_index))
+		return ERR_PTR(-EFAULT);
+	segment_index = array_index_nospec(segment_index, desc->max_index);
+
+	return desc->rmp_entry + segment_index;
 }
 
 static int get_rmpentry(u64 pfn, struct rmpentry *entry)
-- 
2.43.2
Re: [PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment
Posted by Neeraj Upadhyay 1 month, 1 week ago

On 9/30/2024 8:52 PM, Tom Lendacky wrote:
> In preparation for support of a segmented RMP table, treat the contiguous
> RMP table as a segmented RMP table with a single segment covering all
> of memory. By treating a contiguous RMP table as a single segment, much
> of the code that initializes and accesses the RMP can be re-used.
> 
> Segmented RMP tables can have up to 512 segment entries. Each segment
> will have metadata associated with it to identify the segment location,
> the segment size, etc. The segment data and the physical address are used
> to determine the index of the segment within the table and then the RMP
> entry within the segment. For an actual segmented RMP table environment,
> much of the segment information will come from a configuration MSR. For
> the contiguous RMP, though, much of the information will be statically
> defined.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> ---
>  arch/x86/virt/svm/sev.c | 195 ++++++++++++++++++++++++++++++++++++----
>  1 file changed, 176 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
> index 81e21d833cf0..ebfb924652f8 100644
> --- a/arch/x86/virt/svm/sev.c
> +++ b/arch/x86/virt/svm/sev.c
> @@ -18,6 +18,7 @@
>  #include <linux/cpumask.h>
>  #include <linux/iommu.h>
>  #include <linux/amd-iommu.h>
> +#include <linux/nospec.h>
>  
>  #include <asm/sev.h>
>  #include <asm/processor.h>
> @@ -74,12 +75,42 @@ struct rmpentry_raw {
>   */
>  #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
>  
> +/*
> + * For a non-segmented RMP table, use the maximum physical addressing as the
> + * segment size in order to always arrive at index 0 in the table.
> + */
> +#define RMPTABLE_NON_SEGMENTED_SHIFT	52
> +
> +struct rmp_segment_desc {
> +	struct rmpentry_raw *rmp_entry;
> +	u64 max_index;
> +	u64 size;
> +};
> +
> +/*
> + * Segmented RMP Table support.
> + *   - The segment size is used for two purposes:
> + *     - Identify the amount of memory covered by an RMP segment
> + *     - Quickly locate an RMP segment table entry for a physical address
> + *
> + *   - The RMP segment table contains pointers to an RMP table that covers
> + *     a specific portion of memory. There can be up to 512 8-byte entries,
> + *     one pages worth.
> + */
> +static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
> +static unsigned int rst_max_index __ro_after_init = 512;
> +
> +static u64 rmp_segment_size_max;
> +static unsigned int rmp_segment_coverage_shift;
> +static unsigned long rmp_segment_coverage_size;
> +static unsigned long rmp_segment_coverage_mask;

rmp_segment_size_max is of type u64 and rmp_segment_coverage_size is 1 << 52
for single RMP segment. So, maybe use u64 for rmp_segment_coverage_size
and rmp_segment_coverage_mask also?


- Neeraj
Re: [PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment
Posted by Tom Lendacky 1 month, 1 week ago
On 10/18/24 00:59, Neeraj Upadhyay wrote:
> On 9/30/2024 8:52 PM, Tom Lendacky wrote:
>> In preparation for support of a segmented RMP table, treat the contiguous
>> RMP table as a segmented RMP table with a single segment covering all
>> of memory. By treating a contiguous RMP table as a single segment, much
>> of the code that initializes and accesses the RMP can be re-used.
>>
>> Segmented RMP tables can have up to 512 segment entries. Each segment
>> will have metadata associated with it to identify the segment location,
>> the segment size, etc. The segment data and the physical address are used
>> to determine the index of the segment within the table and then the RMP
>> entry within the segment. For an actual segmented RMP table environment,
>> much of the segment information will come from a configuration MSR. For
>> the contiguous RMP, though, much of the information will be statically
>> defined.
>>
>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>> ---
>>  arch/x86/virt/svm/sev.c | 195 ++++++++++++++++++++++++++++++++++++----
>>  1 file changed, 176 insertions(+), 19 deletions(-)
>>
>> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
>> index 81e21d833cf0..ebfb924652f8 100644
>> --- a/arch/x86/virt/svm/sev.c
>> +++ b/arch/x86/virt/svm/sev.c
>> @@ -18,6 +18,7 @@
>>  #include <linux/cpumask.h>
>>  #include <linux/iommu.h>
>>  #include <linux/amd-iommu.h>
>> +#include <linux/nospec.h>
>>  
>>  #include <asm/sev.h>
>>  #include <asm/processor.h>
>> @@ -74,12 +75,42 @@ struct rmpentry_raw {
>>   */
>>  #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
>>  
>> +/*
>> + * For a non-segmented RMP table, use the maximum physical addressing as the
>> + * segment size in order to always arrive at index 0 in the table.
>> + */
>> +#define RMPTABLE_NON_SEGMENTED_SHIFT	52
>> +
>> +struct rmp_segment_desc {
>> +	struct rmpentry_raw *rmp_entry;
>> +	u64 max_index;
>> +	u64 size;
>> +};
>> +
>> +/*
>> + * Segmented RMP Table support.
>> + *   - The segment size is used for two purposes:
>> + *     - Identify the amount of memory covered by an RMP segment
>> + *     - Quickly locate an RMP segment table entry for a physical address
>> + *
>> + *   - The RMP segment table contains pointers to an RMP table that covers
>> + *     a specific portion of memory. There can be up to 512 8-byte entries,
>> + *     one pages worth.
>> + */
>> +static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
>> +static unsigned int rst_max_index __ro_after_init = 512;
>> +
>> +static u64 rmp_segment_size_max;
>> +static unsigned int rmp_segment_coverage_shift;
>> +static unsigned long rmp_segment_coverage_size;
>> +static unsigned long rmp_segment_coverage_mask;
> 
> rmp_segment_size_max is of type u64 and rmp_segment_coverage_size is 1 << 52
> for single RMP segment. So, maybe use u64 for rmp_segment_coverage_size
> and rmp_segment_coverage_mask also?

This is 64-bit only code where unsigned long is the same size as u64 and
is typically preferred when dealing with numbers like this, which is why I
use that here. It does get a bit confusing because of the use of u64 and
unsigned long but I tried to keep things in sync between usages of the
same type as much as possible.

Thanks,
Tom

> 
> 
> - Neeraj
Re: [PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment
Posted by Tom Lendacky 1 month, 1 week ago
On 10/18/24 08:56, Tom Lendacky wrote:
> On 10/18/24 00:59, Neeraj Upadhyay wrote:
>> On 9/30/2024 8:52 PM, Tom Lendacky wrote:
>>> In preparation for support of a segmented RMP table, treat the contiguous
>>> RMP table as a segmented RMP table with a single segment covering all
>>> of memory. By treating a contiguous RMP table as a single segment, much
>>> of the code that initializes and accesses the RMP can be re-used.
>>>
>>> Segmented RMP tables can have up to 512 segment entries. Each segment
>>> will have metadata associated with it to identify the segment location,
>>> the segment size, etc. The segment data and the physical address are used
>>> to determine the index of the segment within the table and then the RMP
>>> entry within the segment. For an actual segmented RMP table environment,
>>> much of the segment information will come from a configuration MSR. For
>>> the contiguous RMP, though, much of the information will be statically
>>> defined.
>>>
>>> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
>>> ---
>>>  arch/x86/virt/svm/sev.c | 195 ++++++++++++++++++++++++++++++++++++----
>>>  1 file changed, 176 insertions(+), 19 deletions(-)
>>>
>>> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
>>> index 81e21d833cf0..ebfb924652f8 100644
>>> --- a/arch/x86/virt/svm/sev.c
>>> +++ b/arch/x86/virt/svm/sev.c
>>> @@ -18,6 +18,7 @@
>>>  #include <linux/cpumask.h>
>>>  #include <linux/iommu.h>
>>>  #include <linux/amd-iommu.h>
>>> +#include <linux/nospec.h>
>>>  
>>>  #include <asm/sev.h>
>>>  #include <asm/processor.h>
>>> @@ -74,12 +75,42 @@ struct rmpentry_raw {
>>>   */
>>>  #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
>>>  
>>> +/*
>>> + * For a non-segmented RMP table, use the maximum physical addressing as the
>>> + * segment size in order to always arrive at index 0 in the table.
>>> + */
>>> +#define RMPTABLE_NON_SEGMENTED_SHIFT	52
>>> +
>>> +struct rmp_segment_desc {
>>> +	struct rmpentry_raw *rmp_entry;
>>> +	u64 max_index;
>>> +	u64 size;
>>> +};
>>> +
>>> +/*
>>> + * Segmented RMP Table support.
>>> + *   - The segment size is used for two purposes:
>>> + *     - Identify the amount of memory covered by an RMP segment
>>> + *     - Quickly locate an RMP segment table entry for a physical address
>>> + *
>>> + *   - The RMP segment table contains pointers to an RMP table that covers
>>> + *     a specific portion of memory. There can be up to 512 8-byte entries,
>>> + *     one pages worth.
>>> + */
>>> +static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
>>> +static unsigned int rst_max_index __ro_after_init = 512;
>>> +
>>> +static u64 rmp_segment_size_max;
>>> +static unsigned int rmp_segment_coverage_shift;
>>> +static unsigned long rmp_segment_coverage_size;
>>> +static unsigned long rmp_segment_coverage_mask;
>>
>> rmp_segment_size_max is of type u64 and rmp_segment_coverage_size is 1 << 52
>> for single RMP segment. So, maybe use u64 for rmp_segment_coverage_size
>> and rmp_segment_coverage_mask also?
> 
> This is 64-bit only code where unsigned long is the same size as u64 and
> is typically preferred when dealing with numbers like this, which is why I
> use that here. It does get a bit confusing because of the use of u64 and
> unsigned long but I tried to keep things in sync between usages of the
> same type as much as possible.

But let me see what everything looks like if I unify all the fields to u64...

Thanks,
Tom

> 
> Thanks,
> Tom
> 
>>
>>
>> - Neeraj
Re: [PATCH v3 6/8] x86/sev: Treat the contiguous RMP table as a single RMP segment
Posted by Nikunj A. Dadhania 1 month, 1 week ago
On 9/30/2024 8:52 PM, Tom Lendacky wrote:
> In preparation for support of a segmented RMP table, treat the contiguous
> RMP table as a segmented RMP table with a single segment covering all
> of memory. By treating a contiguous RMP table as a single segment, much
> of the code that initializes and accesses the RMP can be re-used.
> 
> Segmented RMP tables can have up to 512 segment entries. Each segment
> will have metadata associated with it to identify the segment location,
> the segment size, etc. The segment data and the physical address are used
> to determine the index of the segment within the table and then the RMP
> entry within the segment. For an actual segmented RMP table environment,
> much of the segment information will come from a configuration MSR. For
> the contiguous RMP, though, much of the information will be statically
> defined.
> 
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>

Reviewed-by: Nikunj A Dadhania <nikunj@amd.com>

Regards,
Nikunj

> ---
>  arch/x86/virt/svm/sev.c | 195 ++++++++++++++++++++++++++++++++++++----
>  1 file changed, 176 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
> index 81e21d833cf0..ebfb924652f8 100644
> --- a/arch/x86/virt/svm/sev.c
> +++ b/arch/x86/virt/svm/sev.c
> @@ -18,6 +18,7 @@
>  #include <linux/cpumask.h>
>  #include <linux/iommu.h>
>  #include <linux/amd-iommu.h>
> +#include <linux/nospec.h>
>  
>  #include <asm/sev.h>
>  #include <asm/processor.h>
> @@ -74,12 +75,42 @@ struct rmpentry_raw {
>   */
>  #define RMPTABLE_CPU_BOOKKEEPING_SZ	0x4000
>  
> +/*
> + * For a non-segmented RMP table, use the maximum physical addressing as the
> + * segment size in order to always arrive at index 0 in the table.
> + */
> +#define RMPTABLE_NON_SEGMENTED_SHIFT	52
> +
> +struct rmp_segment_desc {
> +	struct rmpentry_raw *rmp_entry;
> +	u64 max_index;
> +	u64 size;
> +};
> +
> +/*
> + * Segmented RMP Table support.
> + *   - The segment size is used for two purposes:
> + *     - Identify the amount of memory covered by an RMP segment
> + *     - Quickly locate an RMP segment table entry for a physical address
> + *
> + *   - The RMP segment table contains pointers to an RMP table that covers
> + *     a specific portion of memory. There can be up to 512 8-byte entries,
> + *     one pages worth.
> + */
> +static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
> +static unsigned int rst_max_index __ro_after_init = 512;
> +
> +static u64 rmp_segment_size_max;
> +static unsigned int rmp_segment_coverage_shift;
> +static unsigned long rmp_segment_coverage_size;
> +static unsigned long rmp_segment_coverage_mask;
> +#define RST_ENTRY_INDEX(x)	((x) >> rmp_segment_coverage_shift)
> +#define RMP_ENTRY_INDEX(x)	PHYS_PFN((x) & rmp_segment_coverage_mask)
> +
>  /* Mask to apply to a PFN to get the first PFN of a 2MB page */
>  #define PFN_PMD_MASK	GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
>  
>  static u64 probed_rmp_base, probed_rmp_size;
> -static struct rmpentry_raw *rmptable __ro_after_init;
> -static u64 rmptable_max_pfn __ro_after_init;
>  
>  static LIST_HEAD(snp_leaked_pages_list);
>  static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
> @@ -185,6 +216,92 @@ static bool __init init_rmptable_bookkeeping(void)
>  	return true;
>  }
>  
> +static bool __init alloc_rmp_segment_desc(u64 segment_pa, u64 segment_size, u64 pa)
> +{
> +	struct rmp_segment_desc *desc;
> +	unsigned long rst_index;
> +	void *rmp_segment;
> +
> +	/* Validate the RMP segment size */
> +	if (segment_size > rmp_segment_size_max) {
> +		pr_err("Invalid RMP size (%#llx) for configured segment size (%#llx)\n",
> +		       segment_size, rmp_segment_size_max);
> +		return false;
> +	}
> +
> +	/* Validate the RMP segment table index */
> +	rst_index = RST_ENTRY_INDEX(pa);
> +	if (rst_index >= rst_max_index) {
> +		pr_err("Invalid RMP segment base address (%#llx) for configured segment size (%#lx)\n",
> +		       pa, rmp_segment_coverage_size);
> +		return false;
> +	}
> +	rst_index = array_index_nospec(rst_index, rst_max_index);
> +
> +	if (rmp_segment_table[rst_index]) {
> +		pr_err("RMP segment descriptor already exists at index %lu\n", rst_index);
> +		return false;
> +	}
> +
> +	/* Map the RMP entries */
> +	rmp_segment = memremap(segment_pa, segment_size, MEMREMAP_WB);
> +	if (!rmp_segment) {
> +		pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n",
> +		       segment_pa, segment_size);
> +		return false;
> +	}
> +
> +	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
> +	if (!desc) {
> +		memunmap(rmp_segment);
> +		return false;
> +	}
> +
> +	desc->rmp_entry = rmp_segment;
> +	desc->max_index = segment_size / sizeof(*desc->rmp_entry);
> +	desc->size = segment_size;
> +
> +	/* Add the segment descriptor to the table */
> +	rmp_segment_table[rst_index] = desc;
> +
> +	return true;
> +}
> +
> +static void __init free_rmp_segment_table(void)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < rst_max_index; i++) {
> +		struct rmp_segment_desc *desc;
> +
> +		desc = rmp_segment_table[i];
> +		if (!desc)
> +			continue;
> +
> +		memunmap(desc->rmp_entry);
> +
> +		kfree(desc);
> +	}
> +
> +	free_page((unsigned long)rmp_segment_table);
> +
> +	rmp_segment_table = NULL;
> +}
> +
> +static bool __init alloc_rmp_segment_table(void)
> +{
> +	struct page *page;
> +
> +	/* Allocate the table used to index into the RMP segments */
> +	page = alloc_page(__GFP_ZERO);
> +	if (!page)
> +		return false;
> +
> +	rmp_segment_table = page_address(page);
> +
> +	return true;
> +}
> +
>  /*
>   * Do the necessary preparations which are verified by the firmware as
>   * described in the SNP_INIT_EX firmware command description in the SNP
> @@ -192,8 +309,8 @@ static bool __init init_rmptable_bookkeeping(void)
>   */
>  static int __init snp_rmptable_init(void)
>  {
> -	u64 max_rmp_pfn, calc_rmp_sz, rmptable_size, rmp_end, val;
> -	void *rmptable_start;
> +	u64 max_rmp_pfn, calc_rmp_sz, rmptable_segment, rmptable_size, rmp_end, val;
> +	unsigned int i;
>  
>  	if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
>  		return 0;
> @@ -222,17 +339,18 @@ static int __init snp_rmptable_init(void)
>  		goto nosnp;
>  	}
>  
> +	if (!alloc_rmp_segment_table())
> +		goto nosnp;
> +
>  	/* Map only the RMP entries */
> -	rmptable_start = memremap(probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ,
> -				  probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ,
> -				  MEMREMAP_WB);
> -	if (!rmptable_start) {
> -		pr_err("Failed to map RMP table\n");
> +	rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
> +	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
> +
> +	if (!alloc_rmp_segment_desc(rmptable_segment, rmptable_size, 0)) {
> +		free_rmp_segment_table();
>  		goto nosnp;
>  	}
>  
> -	rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
> -
>  	/*
>  	 * Check if SEV-SNP is already enabled, this can happen in case of
>  	 * kexec boot.
> @@ -243,12 +361,20 @@ static int __init snp_rmptable_init(void)
>  
>  	/* Zero out the RMP bookkeeping area */
>  	if (!init_rmptable_bookkeeping()) {
> -		memunmap(rmptable_start);
> +		free_rmp_segment_table();
>  		goto nosnp;
>  	}
>  
>  	/* Zero out the RMP entries */
> -	memset(rmptable_start, 0, rmptable_size);
> +	for (i = 0; i < rst_max_index; i++) {
> +		struct rmp_segment_desc *desc;
> +
> +		desc = rmp_segment_table[i];
> +		if (!desc)
> +			continue;
> +
> +		memset(desc->rmp_entry, 0, desc->size);
> +	}
>  
>  	/* Flush the caches to ensure that data is written before SNP is enabled. */
>  	wbinvd_on_all_cpus();
> @@ -259,9 +385,6 @@ static int __init snp_rmptable_init(void)
>  	on_each_cpu(snp_enable, NULL, 1);
>  
>  skip_enable:
> -	rmptable = (struct rmpentry_raw *)rmptable_start;
> -	rmptable_max_pfn = rmptable_size / sizeof(struct rmpentry_raw) - 1;
> -
>  	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/rmptable_init:online", __snp_enable, NULL);
>  
>  	/*
> @@ -282,6 +405,17 @@ static int __init snp_rmptable_init(void)
>   */
>  device_initcall(snp_rmptable_init);
>  
> +static void set_rmp_segment_info(unsigned int segment_shift)
> +{
> +	rmp_segment_coverage_shift = segment_shift;
> +	rmp_segment_coverage_size  = 1UL << rmp_segment_coverage_shift;
> +	rmp_segment_coverage_mask  = rmp_segment_coverage_size - 1;
> +
> +	/* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
> +	rmp_segment_size_max = PHYS_PFN(rmp_segment_coverage_size);
> +	rmp_segment_size_max <<= 4;
> +}
> +
>  #define RMP_ADDR_MASK GENMASK_ULL(51, 13)
>  
>  bool snp_probe_rmptable_info(void)
> @@ -303,6 +437,11 @@ bool snp_probe_rmptable_info(void)
>  
>  	rmp_sz = rmp_end - rmp_base + 1;
>  
> +	/* Treat the contiguous RMP table as a single segment */
> +	rst_max_index = 1;
> +
> +	set_rmp_segment_info(RMPTABLE_NON_SEGMENTED_SHIFT);
> +
>  	probed_rmp_base = rmp_base;
>  	probed_rmp_size = rmp_sz;
>  
> @@ -314,13 +453,31 @@ bool snp_probe_rmptable_info(void)
>  
>  static struct rmpentry_raw *__get_rmpentry(unsigned long pfn)
>  {
> -	if (!rmptable)
> +	struct rmp_segment_desc *desc;
> +	unsigned long rst_index;
> +	unsigned long paddr;
> +	u64 segment_index;
> +
> +	if (!rmp_segment_table)
>  		return ERR_PTR(-ENODEV);
>  
> -	if (unlikely(pfn > rmptable_max_pfn))
> +	paddr = pfn << PAGE_SHIFT;
> +
> +	rst_index = RST_ENTRY_INDEX(paddr);
> +	if (unlikely(rst_index >= rst_max_index))
> +		return ERR_PTR(-EFAULT);
> +	rst_index = array_index_nospec(rst_index, rst_max_index);
> +
> +	desc = rmp_segment_table[rst_index];
> +	if (unlikely(!desc))
>  		return ERR_PTR(-EFAULT);
>  
> -	return rmptable + pfn;
> +	segment_index = RMP_ENTRY_INDEX(paddr);
> +	if (unlikely(segment_index >= desc->max_index))
> +		return ERR_PTR(-EFAULT);
> +	segment_index = array_index_nospec(segment_index, desc->max_index);
> +
> +	return desc->rmp_entry + segment_index;
>  }
>  
>  static int get_rmpentry(u64 pfn, struct rmpentry *entry)