The RMM may not be tracking all the memory of the system at boot. Create
the necessary tracking state and GPTs within the RMM so that all boot
memory can be delegated to the RMM as needed during runtime.
Note: support is currently missing for SROs which means that if the RMM
needs memory donating this will fail (and render CCA unusable in Linux).
Signed-off-by: Steven Price <steven.price@arm.com>
---
New patch for v13
---
arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
index 9590dff9a2c1..80aedc85e94a 100644
--- a/arch/arm64/kvm/rmi.c
+++ b/arch/arm64/kvm/rmi.c
@@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/memblock.h>
#include <asm/kvm_pgtable.h>
#include <asm/rmi_cmds.h>
@@ -56,6 +57,18 @@ static int rmi_check_version(void)
return 0;
}
+/*
+ * These are the 'default' sizes when passing 0 as the tracking_region_size.
+ * TODO: Support other granule sizes
+ */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define RMM_GRANULE_TRACKING_SIZE SZ_1G
+#elif defined(CONFIG_PAGE_SIZE_16KB)
+#define RMM_GRANULE_TRACKING_SIZE SZ_32M
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define RMM_GRANULE_TRACKING_SIZE SZ_512M
+#endif
+
static int rmi_configure(void)
{
struct rmm_config *config __free(free_page) = NULL;
@@ -95,6 +108,80 @@ static int rmi_configure(void)
return 0;
}
+static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
+{
+ start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
+ end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
+
+ while (start < end) {
+ unsigned long ret, category, state;
+
+ ret = rmi_granule_tracking_get(start, &category, &state);
+ if (ret != RMI_SUCCESS ||
+ state != RMI_TRACKING_FINE ||
+ category != RMI_MEM_CATEGORY_CONVENTIONAL) {
+ /* TODO: Set granule tracking in this case */
+ kvm_err("Granule tracking for region isn't fine/conventional: %llx",
+ start);
+ return -ENODEV;
+ }
+ start += RMM_GRANULE_TRACKING_SIZE;
+ }
+
+ return 0;
+}
+
+static unsigned long rmi_l0gpt_size(void)
+{
+ return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
+ rmm_feat_reg1));
+}
+
+static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long l0gpt_sz = rmi_l0gpt_size();
+
+ start = ALIGN_DOWN(start, l0gpt_sz);
+ end = ALIGN(end, l0gpt_sz);
+
+ while (start < end) {
+ int ret = rmi_gpt_l1_create(start);
+
+ if (ret && ret != RMI_ERROR_GPT) {
+ /*
+ * FIXME: Handle SRO so that memory can be donated for
+ * the tables.
+ */
+ kvm_err("GPT Level1 table missing for %llx\n", start);
+ return -ENOMEM;
+ }
+ start += l0gpt_sz;
+ }
+
+ return 0;
+}
+
+static int rmi_init_metadata(void)
+{
+ phys_addr_t start, end;
+ const struct memblock_region *r;
+
+ for_each_mem_region(r) {
+ int ret;
+
+ start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
+ end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
+ ret = rmi_verify_memory_tracking(start, end);
+ if (ret)
+ return ret;
+ ret = rmi_create_gpts(start, end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int rmm_check_features(void)
{
if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
@@ -120,6 +207,8 @@ void kvm_init_rmi(void)
return;
if (rmi_configure())
return;
+ if (rmi_init_metadata())
+ return;
/* Future patch will enable static branch kvm_rmi_is_available */
}
--
2.43.0
Hi,
On Wed, Mar 18, 2026 at 03:53:34PM +0000, Steven Price wrote:
> The RMM may not be tracking all the memory of the system at boot. Create
> the necessary tracking state and GPTs within the RMM so that all boot
> memory can be delegated to the RMM as needed during runtime.
>
> Note: support is currently missing for SROs which means that if the RMM
> needs memory donating this will fail (and render CCA unusable in Linux).
>
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
> New patch for v13
> ---
> arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 89 insertions(+)
>
> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> index 9590dff9a2c1..80aedc85e94a 100644
> --- a/arch/arm64/kvm/rmi.c
> +++ b/arch/arm64/kvm/rmi.c
> @@ -4,6 +4,7 @@
> */
>
> #include <linux/kvm_host.h>
> +#include <linux/memblock.h>
>
> #include <asm/kvm_pgtable.h>
> #include <asm/rmi_cmds.h>
> @@ -56,6 +57,18 @@ static int rmi_check_version(void)
> return 0;
> }
>
> +/*
> + * These are the 'default' sizes when passing 0 as the tracking_region_size.
> + * TODO: Support other granule sizes
> + */
> +#ifdef CONFIG_PAGE_SIZE_4KB
> +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
> +#elif defined(CONFIG_PAGE_SIZE_16KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
> +#elif defined(CONFIG_PAGE_SIZE_64KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
> +#endif
> +
> static int rmi_configure(void)
> {
> struct rmm_config *config __free(free_page) = NULL;
> @@ -95,6 +108,80 @@ static int rmi_configure(void)
> return 0;
> }
>
> +static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> +{
> + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
This will produce an error on systems where the start of system memory is not
aligned to RMM_GRANULE_TRACKING_SIZE. For instance, on QEMU-SBSA the system
memory starts at 0x100_4300_0000. With the above and RMM_GRANULE_TRACKING_SIZE
set to SZ_1G, @start becomes 0x100_4000_0000, which falls outside the memory map
known to the TF-A. I fixed it with these modifications:
LINUX:
diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
index 10ff1c3bddaf..21bfbbe2f047 100644
--- a/arch/arm64/kvm/rmi.c
+++ b/arch/arm64/kvm/rmi.c
@@ -424,7 +424,9 @@ static int rmi_configure(void)
static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
{
- start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
+ phys_addr_t offset;
+
+ offset = start - ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
while (start < end) {
@@ -439,7 +441,13 @@ static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
start);
return -ENODEV;
}
- start += RMM_GRANULE_TRACKING_SIZE;
+
+ if (offset) {
+ start += (RMM_GRANULE_TRACKING_SIZE - offset);
+ offset = 0;
+ } else {
+ start += RMM_GRANULE_TRACKING_SIZE;
+ }
}
return 0;
RMM:
diff --git a/runtime/rmi/granule.c b/runtime/rmi/granule.c
index cef521fc0869..60358d9ee81e 100644
--- a/runtime/rmi/granule.c
+++ b/runtime/rmi/granule.c
@@ -209,9 +209,11 @@ void smc_granule_tracking_get(unsigned long addr,
return;
}
+#if 0
if (!ALIGNED(addr, RMM_INTERNAL_TRACKING_REGION_SIZE)) {
return;
}
+#endif
g = find_granule(addr);
if (g != NULL) {
This is likely not the right fix but hopefully provides some guidance. Send me
your patches when you have an idea and I'll test them.
Thanks,
Mathieu
> + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
> +
> + while (start < end) {
> + unsigned long ret, category, state;
> +
> + ret = rmi_granule_tracking_get(start, &category, &state);
> + if (ret != RMI_SUCCESS ||
> + state != RMI_TRACKING_FINE ||
> + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
> + /* TODO: Set granule tracking in this case */
> + kvm_err("Granule tracking for region isn't fine/conventional: %llx",
> + start);
> + return -ENODEV;
> + }
> + start += RMM_GRANULE_TRACKING_SIZE;
> + }
> +
> + return 0;
> +}
> +
> +static unsigned long rmi_l0gpt_size(void)
> +{
> + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
> + rmm_feat_reg1));
> +}
> +
> +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
> +{
> + unsigned long l0gpt_sz = rmi_l0gpt_size();
> +
> + start = ALIGN_DOWN(start, l0gpt_sz);
> + end = ALIGN(end, l0gpt_sz);
> +
> + while (start < end) {
> + int ret = rmi_gpt_l1_create(start);
> +
> + if (ret && ret != RMI_ERROR_GPT) {
> + /*
> + * FIXME: Handle SRO so that memory can be donated for
> + * the tables.
> + */
> + kvm_err("GPT Level1 table missing for %llx\n", start);
> + return -ENOMEM;
> + }
> + start += l0gpt_sz;
> + }
> +
> + return 0;
> +}
> +
> +static int rmi_init_metadata(void)
> +{
> + phys_addr_t start, end;
> + const struct memblock_region *r;
> +
> + for_each_mem_region(r) {
> + int ret;
> +
> + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
> + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
> + ret = rmi_verify_memory_tracking(start, end);
> + if (ret)
> + return ret;
> + ret = rmi_create_gpts(start, end);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int rmm_check_features(void)
> {
> if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
> @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
> return;
> if (rmi_configure())
> return;
> + if (rmi_init_metadata())
> + return;
>
> /* Future patch will enable static branch kvm_rmi_is_available */
> }
> --
> 2.43.0
>
>
Hi Mathieu,
On 30/03/2026 21:58, Mathieu Poirier wrote:
> Hi,
>
> On Wed, Mar 18, 2026 at 03:53:34PM +0000, Steven Price wrote:
>> The RMM may not be tracking all the memory of the system at boot. Create
>> the necessary tracking state and GPTs within the RMM so that all boot
>> memory can be delegated to the RMM as needed during runtime.
>>
>> Note: support is currently missing for SROs which means that if the RMM
>> needs memory donating this will fail (and render CCA unusable in Linux).
>>
>> Signed-off-by: Steven Price <steven.price@arm.com>
>> ---
>> New patch for v13
>> ---
>> arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
>> 1 file changed, 89 insertions(+)
>>
>> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
>> index 9590dff9a2c1..80aedc85e94a 100644
>> --- a/arch/arm64/kvm/rmi.c
>> +++ b/arch/arm64/kvm/rmi.c
>> @@ -4,6 +4,7 @@
>> */
>>
>> #include <linux/kvm_host.h>
>> +#include <linux/memblock.h>
>>
>> #include <asm/kvm_pgtable.h>
>> #include <asm/rmi_cmds.h>
>> @@ -56,6 +57,18 @@ static int rmi_check_version(void)
>> return 0;
>> }
>>
>> +/*
>> + * These are the 'default' sizes when passing 0 as the tracking_region_size.
>> + * TODO: Support other granule sizes
>> + */
>> +#ifdef CONFIG_PAGE_SIZE_4KB
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
>> +#elif defined(CONFIG_PAGE_SIZE_16KB)
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
>> +#elif defined(CONFIG_PAGE_SIZE_64KB)
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
>> +#endif
>> +
>> static int rmi_configure(void)
>> {
>> struct rmm_config *config __free(free_page) = NULL;
>> @@ -95,6 +108,80 @@ static int rmi_configure(void)
>> return 0;
>> }
>>
>> +static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
>> +{
>> + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
>
> This will produce an error on systems where the start of system memory is not
> aligned to RMM_GRANULE_TRACKING_SIZE. For instance, on QEMU-SBSA the system
> memory starts at 0x100_4300_0000. With the above and RMM_GRANULE_TRACKING_SIZE
> set to SZ_1G, @start becomes 0x100_4000_0000, which falls outside the memory map
> known to the TF-A. I fixed it with these modifications:
Thanks for raising this. This would need to be addressed in the RMM
spec, I have raised it with the team and will be addressed soon.
>
> LINUX:
>
> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> index 10ff1c3bddaf..21bfbbe2f047 100644
> --- a/arch/arm64/kvm/rmi.c
> +++ b/arch/arm64/kvm/rmi.c
> @@ -424,7 +424,9 @@ static int rmi_configure(void)
>
> static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> {
> - start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> + phys_addr_t offset;
> +
> + offset = start - ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
>
> while (start < end) {
> @@ -439,7 +441,13 @@ static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> start);
> return -ENODEV;
> }
> - start += RMM_GRANULE_TRACKING_SIZE;
> +
> + if (offset) {
> + start += (RMM_GRANULE_TRACKING_SIZE - offset);
> + offset = 0;
> + } else {
> + start += RMM_GRANULE_TRACKING_SIZE;
> + }
> }
>
> return 0;
>
> RMM:
>
> diff --git a/runtime/rmi/granule.c b/runtime/rmi/granule.c
> index cef521fc0869..60358d9ee81e 100644
> --- a/runtime/rmi/granule.c
> +++ b/runtime/rmi/granule.c
> @@ -209,9 +209,11 @@ void smc_granule_tracking_get(unsigned long addr,
> return;
> }
>
> +#if 0
> if (!ALIGNED(addr, RMM_INTERNAL_TRACKING_REGION_SIZE)) {
> return;
> }
> +#endif
>
> g = find_granule(addr);
> if (g != NULL) {
>
> This is likely not the right fix but hopefully provides some guidance. Send me
> your patches when you have an idea and I'll test them.
We will send you the update once it is fixed in the RMM spec. The rough
idea is to remove the ALIGNMENT restrictions and return a Range that
the host can iterate over to find "regions" with the same type of
memory.
Cheers
Suzuki
>
> Thanks,
> Mathieu
>
>
>> + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
>> +
>> + while (start < end) {
>> + unsigned long ret, category, state;
>> +
>> + ret = rmi_granule_tracking_get(start, &category, &state);
>> + if (ret != RMI_SUCCESS ||
>> + state != RMI_TRACKING_FINE ||
>> + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
>> + /* TODO: Set granule tracking in this case */
>> + kvm_err("Granule tracking for region isn't fine/conventional: %llx",
>> + start);
>> + return -ENODEV;
>> + }
>> + start += RMM_GRANULE_TRACKING_SIZE;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static unsigned long rmi_l0gpt_size(void)
>> +{
>> + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
>> + rmm_feat_reg1));
>> +}
>> +
>> +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
>> +{
>> + unsigned long l0gpt_sz = rmi_l0gpt_size();
>> +
>> + start = ALIGN_DOWN(start, l0gpt_sz);
>> + end = ALIGN(end, l0gpt_sz);
>> +
>> + while (start < end) {
>> + int ret = rmi_gpt_l1_create(start);
>> +
>> + if (ret && ret != RMI_ERROR_GPT) {
>> + /*
>> + * FIXME: Handle SRO so that memory can be donated for
>> + * the tables.
>> + */
>> + kvm_err("GPT Level1 table missing for %llx\n", start);
>> + return -ENOMEM;
>> + }
>> + start += l0gpt_sz;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static int rmi_init_metadata(void)
>> +{
>> + phys_addr_t start, end;
>> + const struct memblock_region *r;
>> +
>> + for_each_mem_region(r) {
>> + int ret;
>> +
>> + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
>> + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
>> + ret = rmi_verify_memory_tracking(start, end);
>> + if (ret)
>> + return ret;
>> + ret = rmi_create_gpts(start, end);
>> + if (ret)
>> + return ret;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> static int rmm_check_features(void)
>> {
>> if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
>> @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
>> return;
>> if (rmi_configure())
>> return;
>> + if (rmi_init_metadata())
>> + return;
>>
>> /* Future patch will enable static branch kvm_rmi_is_available */
>> }
>> --
>> 2.43.0
>>
>>
On Tue, Mar 31, 2026 at 12:05:47PM +0100, Suzuki K Poulose wrote:
> Hi Mathieu,
>
> On 30/03/2026 21:58, Mathieu Poirier wrote:
> > Hi,
> >
> > On Wed, Mar 18, 2026 at 03:53:34PM +0000, Steven Price wrote:
> > > The RMM may not be tracking all the memory of the system at boot. Create
> > > the necessary tracking state and GPTs within the RMM so that all boot
> > > memory can be delegated to the RMM as needed during runtime.
> > >
> > > Note: support is currently missing for SROs which means that if the RMM
> > > needs memory donating this will fail (and render CCA unusable in Linux).
> > >
> > > Signed-off-by: Steven Price <steven.price@arm.com>
> > > ---
> > > New patch for v13
> > > ---
> > > arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
> > > 1 file changed, 89 insertions(+)
> > >
> > > diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> > > index 9590dff9a2c1..80aedc85e94a 100644
> > > --- a/arch/arm64/kvm/rmi.c
> > > +++ b/arch/arm64/kvm/rmi.c
> > > @@ -4,6 +4,7 @@
> > > */
> > > #include <linux/kvm_host.h>
> > > +#include <linux/memblock.h>
> > > #include <asm/kvm_pgtable.h>
> > > #include <asm/rmi_cmds.h>
> > > @@ -56,6 +57,18 @@ static int rmi_check_version(void)
> > > return 0;
> > > }
> > > +/*
> > > + * These are the 'default' sizes when passing 0 as the tracking_region_size.
> > > + * TODO: Support other granule sizes
> > > + */
> > > +#ifdef CONFIG_PAGE_SIZE_4KB
> > > +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
> > > +#elif defined(CONFIG_PAGE_SIZE_16KB)
> > > +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
> > > +#elif defined(CONFIG_PAGE_SIZE_64KB)
> > > +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
> > > +#endif
> > > +
> > > static int rmi_configure(void)
> > > {
> > > struct rmm_config *config __free(free_page) = NULL;
> > > @@ -95,6 +108,80 @@ static int rmi_configure(void)
> > > return 0;
> > > }
> > > +static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> > > +{
> > > + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> >
> > This will produce an error on systems where the start of system memory is not
> > aligned to RMM_GRANULE_TRACKING_SIZE. For instance, on QEMU-SBSA the system
> > memory starts at 0x100_4300_0000. With the above and RMM_GRANULE_TRACKING_SIZE
> > set to SZ_1G, @start becomes 0x100_4000_0000, which falls outside the memory map
> > known to the TF-A. I fixed it with these modifications:
>
> Thanks for raising this. This would need to be addressed in the RMM
> spec, I have raised it with the team and will be addressed soon.
>
> >
> > LINUX:
> >
> > diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> > index 10ff1c3bddaf..21bfbbe2f047 100644
> > --- a/arch/arm64/kvm/rmi.c
> > +++ b/arch/arm64/kvm/rmi.c
> > @@ -424,7 +424,9 @@ static int rmi_configure(void)
> > static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> > {
> > - start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> > + phys_addr_t offset;
> > +
> > + offset = start - ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> > end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
> > while (start < end) {
> > @@ -439,7 +441,13 @@ static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> > start);
> > return -ENODEV;
> > }
> > - start += RMM_GRANULE_TRACKING_SIZE;
> > +
> > + if (offset) {
> > + start += (RMM_GRANULE_TRACKING_SIZE - offset);
> > + offset = 0;
> > + } else {
> > + start += RMM_GRANULE_TRACKING_SIZE;
> > + }
> > }
> > return 0;
> >
> > RMM:
> >
> > diff --git a/runtime/rmi/granule.c b/runtime/rmi/granule.c
> > index cef521fc0869..60358d9ee81e 100644
> > --- a/runtime/rmi/granule.c
> > +++ b/runtime/rmi/granule.c
> > @@ -209,9 +209,11 @@ void smc_granule_tracking_get(unsigned long addr,
> > return;
> > }
> > +#if 0
> > if (!ALIGNED(addr, RMM_INTERNAL_TRACKING_REGION_SIZE)) {
> > return;
> > }
> > +#endif
> > g = find_granule(addr);
> > if (g != NULL) {
> >
> > This is likely not the right fix but hopefully provides some guidance. Send me
> > your patches when you have an idea and I'll test them.
>
> We will send you the update once it is fixed in the RMM spec. The rough idea
> is to remove the ALIGNMENT restrictions and return a Range that
> the host can iterate over to find "regions" with the same type of
> memory.
>
Ok, thanks for looking into this.
>
> Cheers
> Suzuki
>
>
> >
> > Thanks,
> > Mathieu
> >
> >
> > > + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
> > > +
> > > + while (start < end) {
> > > + unsigned long ret, category, state;
> > > +
> > > + ret = rmi_granule_tracking_get(start, &category, &state);
> > > + if (ret != RMI_SUCCESS ||
> > > + state != RMI_TRACKING_FINE ||
> > > + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
> > > + /* TODO: Set granule tracking in this case */
> > > + kvm_err("Granule tracking for region isn't fine/conventional: %llx",
> > > + start);
> > > + return -ENODEV;
> > > + }
> > > + start += RMM_GRANULE_TRACKING_SIZE;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static unsigned long rmi_l0gpt_size(void)
> > > +{
> > > + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
> > > + rmm_feat_reg1));
> > > +}
> > > +
> > > +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
> > > +{
> > > + unsigned long l0gpt_sz = rmi_l0gpt_size();
> > > +
> > > + start = ALIGN_DOWN(start, l0gpt_sz);
> > > + end = ALIGN(end, l0gpt_sz);
> > > +
> > > + while (start < end) {
> > > + int ret = rmi_gpt_l1_create(start);
> > > +
> > > + if (ret && ret != RMI_ERROR_GPT) {
> > > + /*
> > > + * FIXME: Handle SRO so that memory can be donated for
> > > + * the tables.
> > > + */
> > > + kvm_err("GPT Level1 table missing for %llx\n", start);
> > > + return -ENOMEM;
> > > + }
> > > + start += l0gpt_sz;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int rmi_init_metadata(void)
> > > +{
> > > + phys_addr_t start, end;
> > > + const struct memblock_region *r;
> > > +
> > > + for_each_mem_region(r) {
> > > + int ret;
> > > +
> > > + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
> > > + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
> > > + ret = rmi_verify_memory_tracking(start, end);
> > > + if (ret)
> > > + return ret;
> > > + ret = rmi_create_gpts(start, end);
> > > + if (ret)
> > > + return ret;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > static int rmm_check_features(void)
> > > {
> > > if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
> > > @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
> > > return;
> > > if (rmi_configure())
> > > return;
> > > + if (rmi_init_metadata())
> > > + return;
> > > /* Future patch will enable static branch kvm_rmi_is_available */
> > > }
> > > --
> > > 2.43.0
> > >
> > >
>
On 18/03/2026 15:53, Steven Price wrote:
> The RMM may not be tracking all the memory of the system at boot. Create
> the necessary tracking state and GPTs within the RMM so that all boot
> memory can be delegated to the RMM as needed during runtime.
>
> Note: support is currently missing for SROs which means that if the RMM
> needs memory donating this will fail (and render CCA unusable in Linux).
>
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
> New patch for v13
> ---
> arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 89 insertions(+)
>
> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> index 9590dff9a2c1..80aedc85e94a 100644
> --- a/arch/arm64/kvm/rmi.c
> +++ b/arch/arm64/kvm/rmi.c
> @@ -4,6 +4,7 @@
> */
>
> #include <linux/kvm_host.h>
> +#include <linux/memblock.h>
>
> #include <asm/kvm_pgtable.h>
> #include <asm/rmi_cmds.h>
> @@ -56,6 +57,18 @@ static int rmi_check_version(void)
> return 0;
> }
>
> +/*
> + * These are the 'default' sizes when passing 0 as the tracking_region_size.
This is a little bit vague. Should we explicitly mention :
"For now we set the tracking_region_size to 0 for RMI_RMM_CONFIG_SET()"
> + * TODO: Support other granule sizes
nit: s/granule/Tracking/
Suzuki
> + */
> +#ifdef CONFIG_PAGE_SIZE_4KB
> +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
> +#elif defined(CONFIG_PAGE_SIZE_16KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
> +#elif defined(CONFIG_PAGE_SIZE_64KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
> +#endif
> +
> static int rmi_configure(void)
> {
> struct rmm_config *config __free(free_page) = NULL;
> @@ -95,6 +108,80 @@ static int rmi_configure(void)
> return 0;
> }
>
> +static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
> +{
> + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
> +
> + while (start < end) {
> + unsigned long ret, category, state;
> +
> + ret = rmi_granule_tracking_get(start, &category, &state);
> + if (ret != RMI_SUCCESS ||
> + state != RMI_TRACKING_FINE ||
> + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
> + /* TODO: Set granule tracking in this case */
> + kvm_err("Granule tracking for region isn't fine/conventional: %llx",
> + start);
> + return -ENODEV;
> + }
> + start += RMM_GRANULE_TRACKING_SIZE;
> + }
> +
> + return 0;
> +}
> +
> +static unsigned long rmi_l0gpt_size(void)
> +{
> + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
> + rmm_feat_reg1));
> +}
> +
> +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
> +{
> + unsigned long l0gpt_sz = rmi_l0gpt_size();
> +
> + start = ALIGN_DOWN(start, l0gpt_sz);
> + end = ALIGN(end, l0gpt_sz);
> +
> + while (start < end) {
> + int ret = rmi_gpt_l1_create(start);
> +
> + if (ret && ret != RMI_ERROR_GPT) {
> + /*
> + * FIXME: Handle SRO so that memory can be donated for
> + * the tables.
> + */
> + kvm_err("GPT Level1 table missing for %llx\n", start);
> + return -ENOMEM;
> + }
> + start += l0gpt_sz;
> + }
> +
> + return 0;
> +}
> +
> +static int rmi_init_metadata(void)
> +{
> + phys_addr_t start, end;
> + const struct memblock_region *r;
> +
> + for_each_mem_region(r) {
> + int ret;
> +
> + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
> + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
> + ret = rmi_verify_memory_tracking(start, end);
> + if (ret)
> + return ret;
> + ret = rmi_create_gpts(start, end);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int rmm_check_features(void)
> {
> if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
> @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
> return;
> if (rmi_configure())
> return;
> + if (rmi_init_metadata())
> + return;
>
> /* Future patch will enable static branch kvm_rmi_is_available */
> }
Hi Steven
On 18/03/2026 15:53, Steven Price wrote:
> The RMM may not be tracking all the memory of the system at boot. Create
Looks good to me. Please find some suggestions below.
May be add a bit more context here :
RMM maintains the state of all the granules in the System to make sure
that the host is abiding by the rules. This state can be maintained at
different granularity - per PAGE (TRACKING_FINE) or per region (COARSE),
where the "region size" depends on the underlying "RMI_GRANULE_SIZE".
The state of the "tracked area" must be the same. This implies, we may
need to have "FINE" tracking for DRAM, so that we can start delegating
PAGEs. For now, we only support RMM with statically carved out memory
for tracking FINE granularity for the tracking regions. We will extend
the support for modifying the TRACKING region in the future.
Similarly, the firmware may create L0 GPT entries describing the total
address space (think of this as Block mappings in the page tables). But
if we change the "PAS" of a granule in the block mapping, we may need
to create L1 tables to track the PAS at the finer granularity. For now
we only support a system where the L1 GPTs are created at boot time
and dynamic GPT support will be added later.
> the necessary tracking state and GPTs within the RMM so that all boot
> memory can be delegated to the RMM as needed during runtime.
>
> Note: support is currently missing for SROs which means that if the RMM
> needs memory donating this will fail (and render CCA unusable in Linux).
>
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
> New patch for v13
> ---
> arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 89 insertions(+)
>
> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
> index 9590dff9a2c1..80aedc85e94a 100644
> --- a/arch/arm64/kvm/rmi.c
> +++ b/arch/arm64/kvm/rmi.c
> @@ -4,6 +4,7 @@
> */
>
> #include <linux/kvm_host.h>
> +#include <linux/memblock.h>
>
> #include <asm/kvm_pgtable.h>
> #include <asm/rmi_cmds.h>
> @@ -56,6 +57,18 @@ static int rmi_check_version(void)
> return 0;
> }
>
> +/*
> + * These are the 'default' sizes when passing 0 as the tracking_region_size.
> + * TODO: Support other granule sizes
> + */
> +#ifdef CONFIG_PAGE_SIZE_4KB
> +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
> +#elif defined(CONFIG_PAGE_SIZE_16KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
> +#elif defined(CONFIG_PAGE_SIZE_64KB)
> +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
> +#endif
> +
Probably this should be made a Kconfig option, like the VA_BITS we have
today for each page size.
> static int rmi_configure(void)
> {
> struct rmm_config *config __free(free_page) = NULL;
> @@ -95,6 +108,80 @@ static int rmi_configure(void)
> return 0;
> }
>
> +static int rmi_verify_memory_tracking(phys_addr_t start, phys_addr_t end)
Could we add a comment what we are trying to do here ?
/*
* Make sure the area is tracked by RMM at FINE granularity.
* We do not support changing the TRACKING yet. This will
* be added in the future.
*/
> +{
> + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
> + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
> +
> + while (start < end) {
> + unsigned long ret, category, state;
> +
> + ret = rmi_granule_tracking_get(start, &category, &state);
> + if (ret != RMI_SUCCESS ||
> + state != RMI_TRACKING_FINE ||
> + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
> + /* TODO: Set granule tracking in this case */
> + kvm_err("Granule tracking for region isn't fine/conventional: %llx",
> + start);
> + return -ENODEV;
> + }
> + start += RMM_GRANULE_TRACKING_SIZE;
> + }
> +
> + return 0;
> +}
> +
> +static unsigned long rmi_l0gpt_size(void)
> +{
> + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
> + rmm_feat_reg1));
> +}
> +
> +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
> +{
> + unsigned long l0gpt_sz = rmi_l0gpt_size();
> +
> + start = ALIGN_DOWN(start, l0gpt_sz);
> + end = ALIGN(end, l0gpt_sz);
> +
> + while (start < end) {
> + int ret = rmi_gpt_l1_create(start);
How about adding a comment here explaining why we look for RMI_ERROR_GPT ?
>
/*
* Make sure the L1 GPT tables are created for the region.
* RMI_ERROR_GPT indicates the L1 table exists.
*/
+
> + if (ret && ret != RMI_ERROR_GPT) {
> + /*
> + * FIXME: Handle SRO so that memory can be donated for
> + * the tables.
> + */
> + kvm_err("GPT Level1 table missing for %llx\n", start);
> + return -ENOMEM;
> + }
> + start += l0gpt_sz;
> + }
> +
> + return 0;
> +}
> +
> +static int rmi_init_metadata(void)
> +{
> + phys_addr_t start, end;
> + const struct memblock_region *r;
> +
> + for_each_mem_region(r) {
> + int ret;
> +
> + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
> + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
> + ret = rmi_verify_memory_tracking(start, end);
> + if (ret)
> + return ret;
> + ret = rmi_create_gpts(start, end);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int rmm_check_features(void)
> {
> if (kvm_lpa2_is_enabled() && !rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
> @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
> return;
> if (rmi_configure())
> return;
> + if (rmi_init_metadata())
> + return;
>
> /* Future patch will enable static branch kvm_rmi_is_available */
> }
On 19/03/2026 10:31, Suzuki K Poulose wrote:
> Hi Steven
>
> On 18/03/2026 15:53, Steven Price wrote:
>> The RMM may not be tracking all the memory of the system at boot. Create
>
> Looks good to me. Please find some suggestions below.
>
>
> May be add a bit more context here :
>
> RMM maintains the state of all the granules in the System to make sure
> that the host is abiding by the rules. This state can be maintained at
> different granularity - per PAGE (TRACKING_FINE) or per region (COARSE),
> where the "region size" depends on the underlying "RMI_GRANULE_SIZE".
> The state of the "tracked area" must be the same. This implies, we may
> need to have "FINE" tracking for DRAM, so that we can start delegating
> PAGEs. For now, we only support RMM with statically carved out memory
> for tracking FINE granularity for the tracking regions. We will extend
> the support for modifying the TRACKING region in the future.
>
> Similarly, the firmware may create L0 GPT entries describing the total
> address space (think of this as Block mappings in the page tables). But
> if we change the "PAS" of a granule in the block mapping, we may need
> to create L1 tables to track the PAS at the finer granularity. For now
> we only support a system where the L1 GPTs are created at boot time
> and dynamic GPT support will be added later.
Thanks for the wording - that does indeed make things clearer. SRO
support will effectively enable the "future" items.
>> the necessary tracking state and GPTs within the RMM so that all boot
>> memory can be delegated to the RMM as needed during runtime.
>>
>> Note: support is currently missing for SROs which means that if the RMM
>> needs memory donating this will fail (and render CCA unusable in Linux).
>>
>> Signed-off-by: Steven Price <steven.price@arm.com>
>> ---
>> New patch for v13
>> ---
>> arch/arm64/kvm/rmi.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
>> 1 file changed, 89 insertions(+)
>>
>> diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
>> index 9590dff9a2c1..80aedc85e94a 100644
>> --- a/arch/arm64/kvm/rmi.c
>> +++ b/arch/arm64/kvm/rmi.c
>> @@ -4,6 +4,7 @@
>> */
>> #include <linux/kvm_host.h>
>> +#include <linux/memblock.h>
>> #include <asm/kvm_pgtable.h>
>> #include <asm/rmi_cmds.h>
>> @@ -56,6 +57,18 @@ static int rmi_check_version(void)
>> return 0;
>> }
>> +/*
>> + * These are the 'default' sizes when passing 0 as the
>> tracking_region_size.
>> + * TODO: Support other granule sizes
>> + */
>> +#ifdef CONFIG_PAGE_SIZE_4KB
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_1G
>> +#elif defined(CONFIG_PAGE_SIZE_16KB)
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_32M
>> +#elif defined(CONFIG_PAGE_SIZE_64KB)
>> +#define RMM_GRANULE_TRACKING_SIZE SZ_512M
>> +#endif
>> +
>
> Probably this should be made a Kconfig option, like the VA_BITS we have
> today for each page size.
Yes that's probably a good option - note that for 4k page size there is
only the one option in the spec. So this is only relevant for 16K/64K.
Thanks for the other comment suggestions below (and in the other email)
- all good points.
Thanks,
Steve
>> static int rmi_configure(void)
>> {
>> struct rmm_config *config __free(free_page) = NULL;
>> @@ -95,6 +108,80 @@ static int rmi_configure(void)
>> return 0;
>> }
>> +static int rmi_verify_memory_tracking(phys_addr_t start,
>> phys_addr_t end)
>
> Could we add a comment what we are trying to do here ?
>
> /*
> * Make sure the area is tracked by RMM at FINE granularity.
> * We do not support changing the TRACKING yet. This will
> * be added in the future.
> */
>
>
>> +{
>> + start = ALIGN_DOWN(start, RMM_GRANULE_TRACKING_SIZE);
>> + end = ALIGN(end, RMM_GRANULE_TRACKING_SIZE);
>> +
>> + while (start < end) {
>> + unsigned long ret, category, state;
>> +
>> + ret = rmi_granule_tracking_get(start, &category, &state);
>> + if (ret != RMI_SUCCESS ||
>> + state != RMI_TRACKING_FINE ||
>> + category != RMI_MEM_CATEGORY_CONVENTIONAL) {
>> + /* TODO: Set granule tracking in this case */
>> + kvm_err("Granule tracking for region isn't fine/
>> conventional: %llx",
>> + start);
>> + return -ENODEV;
>> + }
>> + start += RMM_GRANULE_TRACKING_SIZE;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static unsigned long rmi_l0gpt_size(void)
>> +{
>> + return 1UL << (30 + FIELD_GET(RMI_FEATURE_REGISTER_1_L0GPTSZ,
>> + rmm_feat_reg1));
>> +}
>> +
>> +static int rmi_create_gpts(phys_addr_t start, phys_addr_t end)
>> +{
>> + unsigned long l0gpt_sz = rmi_l0gpt_size();
>> +
>> + start = ALIGN_DOWN(start, l0gpt_sz);
>> + end = ALIGN(end, l0gpt_sz);
>> +
>> + while (start < end) {
>> + int ret = rmi_gpt_l1_create(start);
>
> How about adding a comment here explaining why we look for RMI_ERROR_GPT ?
>
>
>>
> /*
> * Make sure the L1 GPT tables are created for the region.
> * RMI_ERROR_GPT indicates the L1 table exists.
> */
> +
>> + if (ret && ret != RMI_ERROR_GPT) {
>
>
>> + /*
>> + * FIXME: Handle SRO so that memory can be donated for
>> + * the tables.
>> + */
>> + kvm_err("GPT Level1 table missing for %llx\n", start);
>> + return -ENOMEM;
>> + }
>> + start += l0gpt_sz;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static int rmi_init_metadata(void)
>> +{
>> + phys_addr_t start, end;
>> + const struct memblock_region *r;
>> +
>> + for_each_mem_region(r) {
>> + int ret;
>> +
>> + start = memblock_region_memory_base_pfn(r) << PAGE_SHIFT;
>> + end = memblock_region_memory_end_pfn(r) << PAGE_SHIFT;
>> + ret = rmi_verify_memory_tracking(start, end);
>> + if (ret)
>> + return ret;
>> + ret = rmi_create_gpts(start, end);
>> + if (ret)
>> + return ret;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> static int rmm_check_features(void)
>> {
>> if (kvm_lpa2_is_enabled() && !
>> rmi_has_feature(RMI_FEATURE_REGISTER_0_LPA2)) {
>> @@ -120,6 +207,8 @@ void kvm_init_rmi(void)
>> return;
>> if (rmi_configure())
>> return;
>> + if (rmi_init_metadata())
>> + return;
>> /* Future patch will enable static branch kvm_rmi_is_available */
>> }
>
© 2016 - 2026 Red Hat, Inc.