Add a helper to iterate over the hypervisor vmemmap. This will be
particularly handy with the introduction of huge mapping support
for the np-guest stage-2.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index eb0c2ebd1743..676a0a13c741 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -96,24 +96,24 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
-static inline enum pkvm_page_state get_host_state(phys_addr_t phys)
+static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
{
- return (enum pkvm_page_state)hyp_phys_to_page(phys)->__host_state;
+ return (enum pkvm_page_state)p->__host_state;
}
-static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
+static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
{
- hyp_phys_to_page(phys)->__host_state = state;
+ p->__host_state = state;
}
-static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
+static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
{
- return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+ return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
}
-static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
+static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
{
- hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+ p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
}
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 23544928a637..4d269210dae0 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -60,6 +60,9 @@ static void hyp_unlock_component(void)
hyp_spin_unlock(&pkvm_pgd_lock);
}
+#define for_each_hyp_page(start, size, page) \
+ for (page = hyp_phys_to_page(start); page < hyp_phys_to_page((start) + (size)); page++)
+
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
@@ -481,7 +484,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EAGAIN;
if (pte) {
- WARN_ON(addr_is_memory(addr) && get_host_state(addr) != PKVM_NOPAGE);
+ WARN_ON(addr_is_memory(addr) &&
+ get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
return -EPERM;
}
@@ -507,10 +511,10 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
{
- phys_addr_t end = addr + size;
+ struct hyp_page *page;
- for (; addr < end; addr += PAGE_SIZE)
- set_host_state(addr, state);
+ for_each_hyp_page(addr, size, page)
+ set_host_state(page, state);
}
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
@@ -632,16 +636,17 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
static int __host_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- u64 end = addr + size;
+ struct hyp_page *page;
int ret;
- ret = check_range_allowed_memory(addr, end);
+ ret = check_range_allowed_memory(addr, addr + size);
if (ret)
return ret;
hyp_assert_lock_held(&host_mmu.lock);
- for (; addr < end; addr += PAGE_SIZE) {
- if (get_host_state(addr) != state)
+
+ for_each_hyp_page(addr, size, page) {
+ if (get_host_state(page) != state)
return -EPERM;
}
@@ -651,7 +656,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
- if (get_host_state(addr) == PKVM_NOPAGE) {
+ if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) {
int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
if (ret)
@@ -665,18 +670,18 @@ static int __host_set_page_state_range(u64 addr, u64 size,
static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- phys_addr_t end = phys + size;
+ struct hyp_page *page;
- for (; phys < end; phys += PAGE_SIZE)
- set_hyp_state(phys, state);
+ for_each_hyp_page(phys, size, page)
+ set_hyp_state(page, state);
}
static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
{
- phys_addr_t end = phys + size;
+ struct hyp_page *page;
- for (; phys < end; phys += PAGE_SIZE) {
- if (get_hyp_state(phys) != state)
+ for_each_hyp_page(phys, size, page) {
+ if (get_hyp_state(page) != state)
return -EPERM;
}
@@ -927,7 +932,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
goto unlock;
page = hyp_phys_to_page(phys);
- switch (get_host_state(phys)) {
+ switch (get_host_state(page)) {
case PKVM_PAGE_OWNED:
WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
break;
@@ -979,9 +984,9 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
if (WARN_ON(ret))
return ret;
- if (get_host_state(phys) != PKVM_PAGE_SHARED_OWNED)
- return -EPERM;
page = hyp_phys_to_page(phys);
+ if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
+ return -EPERM;
if (WARN_ON(!page->host_share_guest_count))
return -EINVAL;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 6d513a4b3763..c19860fc8183 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -190,6 +190,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
enum kvm_pgtable_walk_flags visit)
{
enum pkvm_page_state state;
+ struct hyp_page *page;
phys_addr_t phys;
if (!kvm_pte_valid(ctx->old))
@@ -202,6 +203,8 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (!addr_is_memory(phys))
return -EINVAL;
+ page = hyp_phys_to_page(phys);
+
/*
* Adjust the host stage-2 mappings to match the ownership attributes
* configured in the hypervisor stage-1, and make sure to propagate them
@@ -210,15 +213,15 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
- set_hyp_state(phys, PKVM_PAGE_OWNED);
+ set_hyp_state(page, PKVM_PAGE_OWNED);
return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
- set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
- set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
+ set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
+ set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
break;
case PKVM_PAGE_SHARED_BORROWED:
- set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
- set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
+ set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
+ set_host_state(page, PKVM_PAGE_SHARED_OWNED);
break;
default:
return -EINVAL;
--
2.49.0.1015.ga840276032-goog
On Fri, 09 May 2025 14:16:58 +0100,
Vincent Donnefort <vdonnefort@google.com> wrote:
>
> Add a helper to iterate over the hypervisor vmemmap. This will be
> particularly handy with the introduction of huge mapping support
> for the np-guest stage-2.
>
> Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
>
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> index eb0c2ebd1743..676a0a13c741 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> @@ -96,24 +96,24 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
> #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
> #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
>
> -static inline enum pkvm_page_state get_host_state(phys_addr_t phys)
> +static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
> {
> - return (enum pkvm_page_state)hyp_phys_to_page(phys)->__host_state;
> + return (enum pkvm_page_state)p->__host_state;
I'm not quite sure why we have this cast the first place. If we are so
keen on type consistency, why isn't __host_state an enum pkvm_page_state
the first place?
> }
>
> -static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
> +static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
> {
> - hyp_phys_to_page(phys)->__host_state = state;
> + p->__host_state = state;
> }
>
> -static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
> +static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
> {
> - return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
> + return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
And we don't seem that bothered here.
> }
>
> -static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
> +static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
> {
> - hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
> + p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
> }
>
> /*
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 23544928a637..4d269210dae0 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -60,6 +60,9 @@ static void hyp_unlock_component(void)
> hyp_spin_unlock(&pkvm_pgd_lock);
> }
>
> +#define for_each_hyp_page(start, size, page) \
> + for (page = hyp_phys_to_page(start); page < hyp_phys_to_page((start) + (size)); page++)
> +
Huh. This really should be something like:
#define for_each_hyp_page(__p, __st, __sz) \
for (struct hyp_page *__p = hyp_phys_to_page(__st), \
*__e = __p + ((__sz) >> PAGE_SHIFT); \
__p < __e; __p++)
so that:
- the iterator variable comes first and looks like a normal for-loop
- the iterator has a scope limited to the loop
- we don't evaluate parameters multiple times
- we don't evaluate the end of the loop multiple times
- we try not to reuse common variable names
> static void *host_s2_zalloc_pages_exact(size_t size)
> {
> void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
> @@ -481,7 +484,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
> return -EAGAIN;
>
> if (pte) {
> - WARN_ON(addr_is_memory(addr) && get_host_state(addr) != PKVM_NOPAGE);
> + WARN_ON(addr_is_memory(addr) &&
> + get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
> return -EPERM;
> }
>
> @@ -507,10 +511,10 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
>
> static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
> {
> - phys_addr_t end = addr + size;
> + struct hyp_page *page;
and then these extra declarations can go.
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
On Friday 16 May 2025 at 13:57:52 (+0100), Marc Zyngier wrote:
> On Fri, 09 May 2025 14:16:58 +0100,
> Vincent Donnefort <vdonnefort@google.com> wrote:
> >
> > Add a helper to iterate over the hypervisor vmemmap. This will be
> > particularly handy with the introduction of huge mapping support
> > for the np-guest stage-2.
> >
> > Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
> >
> > diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> > index eb0c2ebd1743..676a0a13c741 100644
> > --- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
> > +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
> > @@ -96,24 +96,24 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
> > #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
> > #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
> >
> > -static inline enum pkvm_page_state get_host_state(phys_addr_t phys)
> > +static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
> > {
> > - return (enum pkvm_page_state)hyp_phys_to_page(phys)->__host_state;
> > + return (enum pkvm_page_state)p->__host_state;
>
> I'm not quite sure why we have this cast the first place. If we are so
> keen on type consistency, why isn't __host_state an enum pkvm_page_state
> the first place?
Purely for cosmetic reasons TBH. The *hyp* state (as you've seen below)
really needs accessors with some logic to decode whatever we store in
hyp_page (the _complement_ of the hyp state in practice). So we
introduced accessors for the *host* state for consistency and obfuscated
the __host_state type in hyp_page to encourage the usage of these
accessors. None of that is strictly necessary, though. And the explicit
cast can go, it is already implicit from the return type of the accessor
and the compiler should be happy enough I think.
> > }
> >
> > -static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
> > +static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
> > {
> > - hyp_phys_to_page(phys)->__host_state = state;
> > + p->__host_state = state;
> > }
> >
> > -static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
> > +static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
> > {
> > - return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
> > + return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
>
> And we don't seem that bothered here.
Cheers,
Quentin
© 2016 - 2025 Red Hat, Inc.