Tracking the hypervisor's ownership state into struct hyp_page has
several benefits, including allowing far more efficient lookups (no
page-table walk needed) and de-corelating the state from the presence
of a mapping. This will later allow to map pages into EL2 stage-1 less
proactively which is generally a good thing for security. And in the
future this will help with tracking the state of pages mapped into the
hypervisor's private range without requiring an alias into the 'linear
map' range.
Signed-off-by: Quentin Perret <qperret@google.com>
---
arch/arm64/kvm/hyp/include/nvhe/memory.h | 20 +++++++++-
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 51 ++++++++++++------------
arch/arm64/kvm/hyp/nvhe/setup.c | 6 ++-
3 files changed, 49 insertions(+), 28 deletions(-)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 4a3c55d26ef3..cc4c01158368 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -22,6 +22,7 @@ enum pkvm_page_state {
/* Meta-states which aren't encoded directly in the PTE's SW bits */
PKVM_NOPAGE = BIT(0) | BIT(1),
};
+#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
@@ -42,7 +43,14 @@ struct hyp_page {
u8 order;
/* Host (non-meta) state. Guarded by the host stage-2 lock. */
- unsigned __host_state : 8;
+ unsigned __host_state : 4;
+
+ /*
+ * Complement of the hyp (non-meta) state. Guarded by the hyp stage-1 lock. We use the
+ * complement so that the initial 0 in __hyp_state_comp (due to the entire vmemmap starting
+ * off zeroed) encodes PKVM_NOPAGE.
+ */
+ unsigned __hyp_state_comp : 4;
u32 host_share_guest_count;
};
@@ -89,6 +97,16 @@ static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
hyp_phys_to_page(phys)->__host_state = state;
}
+static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
+{
+ return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+}
+
+static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
+{
+ hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+}
+
/*
* Refcounting for 'struct hyp_page'.
* hyp_pool::lock must be held if atomic access to the refcount is required.
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index a45ffdec7612..3ab8c81500c2 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -642,24 +642,24 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
+static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- if (!kvm_pte_valid(pte))
- return PKVM_NOPAGE;
+ phys_addr_t end = phys + size;
- return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ for (; phys < end; phys += PAGE_SIZE)
+ set_hyp_state(phys, state);
}
-static int __hyp_check_page_state_range(u64 addr, u64 size,
- enum pkvm_page_state state)
+static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- struct check_walk_data d = {
- .desired = state,
- .get_page_state = hyp_get_page_state,
- };
+ phys_addr_t end = phys + size;
+
+ for (; phys < end; phys += PAGE_SIZE) {
+ if (get_hyp_state(phys) != state)
+ return -EPERM;
+ }
- hyp_assert_lock_held(&pkvm_pgd_lock);
- return check_page_state_range(&pkvm_pgtable, addr, size, &d);
+ return 0;
}
static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
@@ -687,7 +687,6 @@ int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
u64 size = PAGE_SIZE;
int ret;
@@ -698,13 +697,13 @@ int __pkvm_host_share_hyp(u64 pfn)
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
unlock:
@@ -727,7 +726,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
if (hyp_page_count((void *)virt)) {
@@ -735,6 +734,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
goto unlock;
}
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
@@ -750,7 +750,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
- enum kvm_pgtable_prot prot;
int ret;
host_lock_component();
@@ -760,13 +759,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
- ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
- WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
+ __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED);
+ WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));
unlock:
@@ -786,7 +785,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
host_lock_component();
hyp_lock_component();
- ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
@@ -795,6 +794,7 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
goto unlock;
}
+ __hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));
@@ -809,19 +809,18 @@ int hyp_pin_shared_mem(void *from, void *to)
{
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
u64 end = PAGE_ALIGN((u64)to);
+ u64 phys = __hyp_pa(start);
u64 size = end - start;
int ret;
host_lock_component();
hyp_lock_component();
- ret = __host_check_page_state_range(__hyp_pa(start), size,
- PKVM_PAGE_SHARED_OWNED);
+ ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (ret)
goto unlock;
- ret = __hyp_check_page_state_range(start, size,
- PKVM_PAGE_SHARED_BORROWED);
+ ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
if (ret)
goto unlock;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 1a414288fe8c..955c431af5d0 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -194,16 +194,20 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
/*
* Adjust the host stage-2 mappings to match the ownership attributes
- * configured in the hypervisor stage-1.
+ * configured in the hypervisor stage-1, and make sure to propagate them
+ * to the hyp_vmemmap state.
*/
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
+ set_hyp_state(phys, PKVM_PAGE_OWNED);
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
+ set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
+ set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
break;
default:
--
2.48.1.658.g4767266eb4-goog
On Thu, 27 Feb 2025 00:33:08 +0000,
Quentin Perret <qperret@google.com> wrote:
>
> Tracking the hypervisor's ownership state into struct hyp_page has
> several benefits, including allowing far more efficient lookups (no
> page-table walk needed) and de-corelating the state from the presence
> of a mapping. This will later allow to map pages into EL2 stage-1 less
> proactively which is generally a good thing for security. And in the
> future this will help with tracking the state of pages mapped into the
> hypervisor's private range without requiring an alias into the 'linear
> map' range.
>
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
> arch/arm64/kvm/hyp/include/nvhe/memory.h | 20 +++++++++-
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 51 ++++++++++++------------
> arch/arm64/kvm/hyp/nvhe/setup.c | 6 ++-
> 3 files changed, 49 insertions(+), 28 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> index 4a3c55d26ef3..cc4c01158368 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> @@ -22,6 +22,7 @@ enum pkvm_page_state {
> /* Meta-states which aren't encoded directly in the PTE's SW bits */
> PKVM_NOPAGE = BIT(0) | BIT(1),
> };
> +#define PKVM_PAGE_STATE_MASK (BIT(0) | BIT(1))
>
> #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
> static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
> @@ -42,7 +43,14 @@ struct hyp_page {
> u8 order;
>
> /* Host (non-meta) state. Guarded by the host stage-2 lock. */
> - unsigned __host_state : 8;
> + unsigned __host_state : 4;
> +
> + /*
> + * Complement of the hyp (non-meta) state. Guarded by the hyp stage-1 lock. We use the
> + * complement so that the initial 0 in __hyp_state_comp (due to the entire vmemmap starting
> + * off zeroed) encodes PKVM_NOPAGE.
> + */
> + unsigned __hyp_state_comp : 4;
>
> u32 host_share_guest_count;
> };
> @@ -89,6 +97,16 @@ static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
> hyp_phys_to_page(phys)->__host_state = state;
> }
>
> +static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
> +{
> + return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
> +}
> +
> +static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
> +{
> + hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
> +}
> +
> /*
> * Refcounting for 'struct hyp_page'.
> * hyp_pool::lock must be held if atomic access to the refcount is required.
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index a45ffdec7612..3ab8c81500c2 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -642,24 +642,24 @@ static int __host_set_page_state_range(u64 addr, u64 size,
> return 0;
> }
>
> -static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
> +static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
> {
> - if (!kvm_pte_valid(pte))
> - return PKVM_NOPAGE;
> + phys_addr_t end = phys + size;
>
> - return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
> + for (; phys < end; phys += PAGE_SIZE)
> + set_hyp_state(phys, state);
> }
>
> -static int __hyp_check_page_state_range(u64 addr, u64 size,
> - enum pkvm_page_state state)
> +static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
> {
> - struct check_walk_data d = {
> - .desired = state,
> - .get_page_state = hyp_get_page_state,
> - };
> + phys_addr_t end = phys + size;
> +
> + for (; phys < end; phys += PAGE_SIZE) {
> + if (get_hyp_state(phys) != state)
> + return -EPERM;
> + }
>
> - hyp_assert_lock_held(&pkvm_pgd_lock);
> - return check_page_state_range(&pkvm_pgtable, addr, size, &d);
> + return 0;
> }
>
> static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
> @@ -687,7 +687,6 @@ int __pkvm_host_share_hyp(u64 pfn)
> {
> u64 phys = hyp_pfn_to_phys(pfn);
> void *virt = __hyp_va(phys);
> - enum kvm_pgtable_prot prot;
> u64 size = PAGE_SIZE;
> int ret;
>
> @@ -698,13 +697,13 @@ int __pkvm_host_share_hyp(u64 pfn)
> if (ret)
> goto unlock;
> if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
> - ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
> + ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
OK, I think I finally clicked here. Does it mean that all the tracking
is now done in terms of PAs instead of VAs?
> if (ret)
> goto unlock;
> }
>
> - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
> + __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
> + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
And this is the split between the state now being kept in the on a PA
base and the actual mapping that is now only takes the page attributes
and no SW bits?
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
On Friday 14 Mar 2025 at 11:31:36 (+0000), Marc Zyngier wrote:
> On Thu, 27 Feb 2025 00:33:08 +0000,
> Quentin Perret <qperret@google.com> wrote:
> > @@ -698,13 +697,13 @@ int __pkvm_host_share_hyp(u64 pfn)
> > if (ret)
> > goto unlock;
> > if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
> > - ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
> > + ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
>
> OK, I think I finally clicked here. Does it mean that all the tracking
> is now done in terms of PAs instead of VAs?
Yep, that's exactly that. The hyp_vmemmap is indexed by pfn, so I felt
that the conversion to a PA-based tracking made sense. That also make it
clear that the 'hyp state' is not a property of a mapping, but really of
the underlying physical page.
> > if (ret)
> > goto unlock;
> > }
> >
> > - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
> > + __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
> > + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
>
> And this is the split between the state now being kept in the on a PA
> base and the actual mapping that is now only takes the page attributes
> and no SW bits?
Precisely, and the next patch in this series takes advantage of the
fact that we're now de-correlating the hyp state from the presence of a
hyp s1 mapping in the linear map range. In the future there'll be more
use-cases for this I think (e.g. the hyp allocator where we'll have
pages owned by the hypervisor but only mapped in the 'private' range,
things like that).
Thanks,
Quentin
On Fri, 14 Mar 2025 14:06:48 +0000,
Quentin Perret <qperret@google.com> wrote:
>
> On Friday 14 Mar 2025 at 11:31:36 (+0000), Marc Zyngier wrote:
> > On Thu, 27 Feb 2025 00:33:08 +0000,
> > Quentin Perret <qperret@google.com> wrote:
> > > @@ -698,13 +697,13 @@ int __pkvm_host_share_hyp(u64 pfn)
> > > if (ret)
> > > goto unlock;
> > > if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
> > > - ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
> > > + ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE);
> >
> > OK, I think I finally clicked here. Does it mean that all the tracking
> > is now done in terms of PAs instead of VAs?
>
> Yep, that's exactly that. The hyp_vmemmap is indexed by pfn, so I felt
> that the conversion to a PA-based tracking made sense. That also make it
> clear that the 'hyp state' is not a property of a mapping, but really of
> the underlying physical page.
It indeed makes sense. It is just that it took me some time to realise
the extent of the change.
>
> > > if (ret)
> > > goto unlock;
> > > }
> > >
> > > - prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
> > > - WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
> > > + __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
> > > + WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
> >
> > And this is the split between the state now being kept in the on a PA
> > base and the actual mapping that is now only takes the page attributes
> > and no SW bits?
>
> Precisely, and the next patch in this series takes advantage of the
> fact that we're now de-correlating the hyp state from the presence of a
> hyp s1 mapping in the linear map range. In the future there'll be more
> use-cases for this I think (e.g. the hyp allocator where we'll have
> pages owned by the hypervisor but only mapped in the 'private' range,
> things like that).
Yup, that's probably the correct direction of travel. The hypervisor
shouldn't need to map everything -- quite the opposite actually.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
[...]
> diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> index 1a414288fe8c..955c431af5d0 100644
> --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> @@ -194,16 +194,20 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
>
> /*
> * Adjust the host stage-2 mappings to match the ownership attributes
> - * configured in the hypervisor stage-1.
> + * configured in the hypervisor stage-1, and make sure to propagate them
> + * to the hyp_vmemmap state.
> */
> state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
> switch (state) {
> case PKVM_PAGE_OWNED:
> + set_hyp_state(phys, PKVM_PAGE_OWNED);
> return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
> case PKVM_PAGE_SHARED_OWNED:
> + set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
> set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
> break;
> case PKVM_PAGE_SHARED_BORROWED:
> + set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
> set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
> break;
> default:
Are the SHARED_OWNED/SHARED_BORROWED still relevant since the introduction of
"KVM: arm64: Don't map 'kvm_vgic_global_state' at EL2 with pKVM"? It doesn't
seem we have any !OWNED pages in the hyp anymore at setup, do we?
> --
> 2.48.1.658.g4767266eb4-goog
>
On Monday 03 Mar 2025 at 09:47:48 (+0000), Vincent Donnefort wrote:
> [...]
>
> > diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
> > index 1a414288fe8c..955c431af5d0 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/setup.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/setup.c
> > @@ -194,16 +194,20 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
> >
> > /*
> > * Adjust the host stage-2 mappings to match the ownership attributes
> > - * configured in the hypervisor stage-1.
> > + * configured in the hypervisor stage-1, and make sure to propagate them
> > + * to the hyp_vmemmap state.
> > */
> > state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
> > switch (state) {
> > case PKVM_PAGE_OWNED:
> > + set_hyp_state(phys, PKVM_PAGE_OWNED);
> > return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
> > case PKVM_PAGE_SHARED_OWNED:
> > + set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
> > set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
> > break;
> > case PKVM_PAGE_SHARED_BORROWED:
> > + set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
> > set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
> > break;
> > default:
>
> Are the SHARED_OWNED/SHARED_BORROWED still relevant since the introduction of
> "KVM: arm64: Don't map 'kvm_vgic_global_state' at EL2 with pKVM"? It doesn't
> seem we have any !OWNED pages in the hyp anymore at setup, do we?
That's a good point. I personally don't hate that we have this code here
for completeness though -- it's simple enough that maintaining it isn't
too bad, and if we were to add shared pages in the future it would 'just
work'. But no strong opinion. I guess we could also remove this code as
a separate clean-up, this isn't specific to this series.
Thanks!
Quentin
© 2016 - 2026 Red Hat, Inc.