arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
From: "kyle-jk.liao" <kyle-jk.liao@mediatek.com>
The host_stage2_adjust_range() function triggers a WARN_ON when
encountering pages marked as both PKVM_NOPAGE and PKVM_MODULE_OWNED_PAGE.
This occurs when load_unaligned_zeropad() accesses memory that crosses
page boundaries, where one of the pages is module-owned.
Extend the check to accept pages with the PKVM_MODULE_OWNED_PAGE flag
in addition to PKVM_NOPAGE, preventing spurious warnings during normal
module memory access patterns.
Signed-off-by: kyle-jk.liao <kyle-jk.liao@mediatek.com>
---
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 49db32f3ddf7..baaabf4b5112 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -495,6 +495,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
u64 granule;
s8 level;
int ret;
+ enum pkvm_page_state state;
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
@@ -505,8 +506,9 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
return -EAGAIN;
if (pte) {
+ state = get_host_state(hyp_phys_to_page(addr));
WARN_ON(addr_is_memory(addr) &&
- get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
+ (state != PKVM_NOPAGE && state != (PKVM_NOPAGE | PKVM_MODULE_OWNED_PAGE)));
return -EPERM;
}
--
2.45.2
On Mon, 09 Feb 2026 03:24:13 +0000,
<kyle-jk.liao@mediatek.com> wrote:
>
> From: "kyle-jk.liao" <kyle-jk.liao@mediatek.com>
>
> The host_stage2_adjust_range() function triggers a WARN_ON when
> encountering pages marked as both PKVM_NOPAGE and PKVM_MODULE_OWNED_PAGE.
> This occurs when load_unaligned_zeropad() accesses memory that crosses
> page boundaries, where one of the pages is module-owned.
>
> Extend the check to accept pages with the PKVM_MODULE_OWNED_PAGE flag
> in addition to PKVM_NOPAGE, preventing spurious warnings during normal
> module memory access patterns.
>
> Signed-off-by: kyle-jk.liao <kyle-jk.liao@mediatek.com>
> ---
> arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index 49db32f3ddf7..baaabf4b5112 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -495,6 +495,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
> u64 granule;
> s8 level;
> int ret;
> + enum pkvm_page_state state;
>
> hyp_assert_lock_held(&host_mmu.lock);
> ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
> @@ -505,8 +506,9 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
> return -EAGAIN;
>
> if (pte) {
> + state = get_host_state(hyp_phys_to_page(addr));
> WARN_ON(addr_is_memory(addr) &&
> - get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
> + (state != PKVM_NOPAGE && state != (PKVM_NOPAGE | PKVM_MODULE_OWNED_PAGE)));
> return -EPERM;
> }
>
None of that exists upstream.
M.
--
Without deviation from the norm, progress is not possible.
© 2016 - 2026 Red Hat, Inc.