Introduce a registry to track protected MMIO regions that are unmapped
from the host stage-2 page tables. These regions are stored in a
fixed-size array and their ownership is donated to the hypervisor during
initialization to ensure host-exclusion and persistent tracking.
Signed-off-by: Sebastian Ene <sebastianene@google.com>
---
arch/arm64/include/asm/kvm_pkvm.h | 10 ++++++++++
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +++
arch/arm64/kvm/hyp/nvhe/setup.c | 25 +++++++++++++++++++++++++
3 files changed, 38 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index 757076ad4ec9..48ec7d519399 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -17,6 +17,16 @@
#define HYP_MEMBLOCK_REGIONS 128
+#define PKVM_PROTECTED_REGS_NUM 8
+
+struct pkvm_protected_reg {
+ u64 start_pfn;
+ size_t num_pages;
+};
+
+extern struct pkvm_protected_reg kvm_nvhe_sym(pkvm_protected_regs)[];
+extern unsigned int kvm_nvhe_sym(num_protected_reg);
+
int pkvm_init_host_vm(struct kvm *kvm);
int pkvm_create_hyp_vm(struct kvm *kvm);
bool pkvm_hyp_vm_is_created(struct kvm *kvm);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 0808367c52e5..7c125836b533 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -23,6 +23,9 @@
struct host_mmu host_mmu;
+struct pkvm_protected_reg pkvm_protected_regs[PKVM_PROTECTED_REGS_NUM];
+unsigned int num_protected_reg;
+
static struct hyp_pool host_s2_pool;
static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 90bd014e952f..ad5b96085e1b 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -284,6 +284,27 @@ static int fix_hyp_pgtable_refcnt(void)
&walker);
}
+static int unmap_protected_regions(void)
+{
+ struct pkvm_protected_reg *reg;
+ int i, ret, j = 0;
+
+ for (i = 0; i < num_protected_reg; i++) {
+ reg = &pkvm_protected_regs[i];
+ for (j = 0; j < reg->num_pages; j++) {
+ ret = __pkvm_host_donate_hyp_mmio(reg->start_pfn + j);
+ if (ret)
+ goto err_setup;
+ }
+ }
+
+ return 0;
+err_setup:
+ for (j = j - 1; j >= 0; j--)
+ __pkvm_hyp_donate_host_mmio(reg->start_pfn + j);
+ return ret;
+}
+
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
@@ -324,6 +345,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
+ ret = unmap_protected_regions();
+ if (ret)
+ goto out;
+
ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;
--
2.53.0.473.g4a7958ca14-goog
On Tue, Mar 10, 2026 at 12:49:21PM +0000, Sebastian Ene wrote:
> Introduce a registry to track protected MMIO regions that are unmapped
> from the host stage-2 page tables. These regions are stored in a
> fixed-size array and their ownership is donated to the hypervisor during
> initialization to ensure host-exclusion and persistent tracking.
>
> Signed-off-by: Sebastian Ene <sebastianene@google.com>
> ---
> arch/arm64/include/asm/kvm_pkvm.h | 10 ++++++++++
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +++
> arch/arm64/kvm/hyp/nvhe/setup.c | 25 +++++++++++++++++++++++++
> 3 files changed, 38 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
> index 757076ad4ec9..48ec7d519399 100644
> --- a/arch/arm64/include/asm/kvm_pkvm.h
> +++ b/arch/arm64/include/asm/kvm_pkvm.h
> @@ -17,6 +17,16 @@
>
> #define HYP_MEMBLOCK_REGIONS 128
>
> +#define PKVM_PROTECTED_REGS_NUM 8
> +
> +struct pkvm_protected_reg {
> + u64 start_pfn;
> + size_t num_pages;
nit: "u64 pfn, u64 nr_pages" to align with everywhere else.
> +};
> +
> +extern struct pkvm_protected_reg kvm_nvhe_sym(pkvm_protected_regs)[];
> +extern unsigned int kvm_nvhe_sym(num_protected_reg);
> +
> int pkvm_init_host_vm(struct kvm *kvm);
> int pkvm_create_hyp_vm(struct kvm *kvm);
> bool pkvm_hyp_vm_is_created(struct kvm *kvm);
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 0808367c52e5..7c125836b533 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -23,6 +23,9 @@
>
> struct host_mmu host_mmu;
>
> +struct pkvm_protected_reg pkvm_protected_regs[PKVM_PROTECTED_REGS_NUM];
> +unsigned int num_protected_reg;
> +
> static struct hyp_pool host_s2_pool;
>
> static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
> diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> index 90bd014e952f..ad5b96085e1b 100644
> --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> @@ -284,6 +284,27 @@ static int fix_hyp_pgtable_refcnt(void)
> &walker);
> }
>
> +static int unmap_protected_regions(void)
> +{
> + struct pkvm_protected_reg *reg;
> + int i, ret, j = 0;
> +
> + for (i = 0; i < num_protected_reg; i++) {
> + reg = &pkvm_protected_regs[i];
> + for (j = 0; j < reg->num_pages; j++) {
> + ret = __pkvm_host_donate_hyp_mmio(reg->start_pfn + j);
If this is to make this static at boot, we don't even need __pkvm_host_donate_hyp_mmio()
We can just map the region early enough in the hypervisor pkvm_create_mappings()
in recreate_hyp_mappings() and then let fix_host_ownership() do the host
stage2 unmapping.
> + if (ret)
> + goto err_setup;
> + }
> + }
> +
> + return 0;
> +err_setup:
> + for (j = j - 1; j >= 0; j--)
> + __pkvm_hyp_donate_host_mmio(reg->start_pfn + j);
> + return ret;
> +}
> +
> void __noreturn __pkvm_init_finalise(void)
> {
> struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
> @@ -324,6 +345,10 @@ void __noreturn __pkvm_init_finalise(void)
> if (ret)
> goto out;
>
> + ret = unmap_protected_regions();
> + if (ret)
> + goto out;
> +
> ret = hyp_ffa_init(ffa_proxy_pages);
> if (ret)
> goto out;
> --
> 2.53.0.473.g4a7958ca14-goog
>
Hi Sebastian,
On Tue, 10 Mar 2026 at 12:49, Sebastian Ene <sebastianene@google.com> wrote:
>
> Introduce a registry to track protected MMIO regions that are unmapped
> from the host stage-2 page tables. These regions are stored in a
> fixed-size array and their ownership is donated to the hypervisor during
> initialization to ensure host-exclusion and persistent tracking.
>
> Signed-off-by: Sebastian Ene <sebastianene@google.com>
> ---
> arch/arm64/include/asm/kvm_pkvm.h | 10 ++++++++++
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +++
> arch/arm64/kvm/hyp/nvhe/setup.c | 25 +++++++++++++++++++++++++
> 3 files changed, 38 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
> index 757076ad4ec9..48ec7d519399 100644
> --- a/arch/arm64/include/asm/kvm_pkvm.h
> +++ b/arch/arm64/include/asm/kvm_pkvm.h
> @@ -17,6 +17,16 @@
>
> #define HYP_MEMBLOCK_REGIONS 128
>
> +#define PKVM_PROTECTED_REGS_NUM 8
> +
> +struct pkvm_protected_reg {
> + u64 start_pfn;
> + size_t num_pages;
> +};
> +
> +extern struct pkvm_protected_reg kvm_nvhe_sym(pkvm_protected_regs)[];
> +extern unsigned int kvm_nvhe_sym(num_protected_reg);
> +
> int pkvm_init_host_vm(struct kvm *kvm);
> int pkvm_create_hyp_vm(struct kvm *kvm);
> bool pkvm_hyp_vm_is_created(struct kvm *kvm);
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 0808367c52e5..7c125836b533 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -23,6 +23,9 @@
>
> struct host_mmu host_mmu;
>
> +struct pkvm_protected_reg pkvm_protected_regs[PKVM_PROTECTED_REGS_NUM];
> +unsigned int num_protected_reg;
> +
> static struct hyp_pool host_s2_pool;
>
> static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
> diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> index 90bd014e952f..ad5b96085e1b 100644
> --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> @@ -284,6 +284,27 @@ static int fix_hyp_pgtable_refcnt(void)
> &walker);
> }
>
> +static int unmap_protected_regions(void)
I think the name of this function is confusing. It's not really
unmapping, it's donating the regions to hyp. Maybe
donate_protected_regions() or claim_protected_mmio_regions to reflect
that we are doing an ownership transfer?
> +{
> + struct pkvm_protected_reg *reg;
> + int i, ret, j = 0;
Please don't interleave the iterators with ret. Moreover, you can
define ret inside the loop below. Also, you don't need to initialze j.
> +
> + for (i = 0; i < num_protected_reg; i++) {
> + reg = &pkvm_protected_regs[i];
> + for (j = 0; j < reg->num_pages; j++) {
> + ret = __pkvm_host_donate_hyp_mmio(reg->start_pfn + j);
> + if (ret)
> + goto err_setup;
> + }
> + }
> +
> + return 0;
> +err_setup:
> + for (j = j - 1; j >= 0; j--)
> + __pkvm_hyp_donate_host_mmio(reg->start_pfn + j);
This rolls back the regions only for the latest `i` iteration, not all
the `i` iterations.
How about (not tested):
+ err_setup:
+ while (j--)
+ __pkvm_hyp_donate_host_mmio(reg->start_pfn + j);
+
+ while (i--) {
+ reg = &pkvm_protected_regs[i];
+ for (j = reg->num_pages - 1; j >= 0; j--)
+ __pkvm_hyp_donate_host_mmio(reg->start_pfn + j);
+ }
Thanks,
/fuad
> + return ret;
> +}
> +
> void __noreturn __pkvm_init_finalise(void)
> {
> struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
> @@ -324,6 +345,10 @@ void __noreturn __pkvm_init_finalise(void)
> if (ret)
> goto out;
>
> + ret = unmap_protected_regions();
> + if (ret)
> + goto out;
> +
> ret = hyp_ffa_init(ffa_proxy_pages);
> if (ret)
> goto out;
> --
> 2.53.0.473.g4a7958ca14-goog
>
© 2016 - 2026 Red Hat, Inc.