arch/arm64/kvm/mmu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-)
Commit fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM") made the
initialization of the local memcache variable in user_mem_abort()
conditional, leaving a codepath where it is used uninitialized via
kvm_pgtable_stage2_map().
This can fail on any path that requires a stage-2 allocation
without transition via a permission fault or dirty logging.
Fix this by making sure that memcache is always valid.
Fixes: fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM")
Signed-off-by: Sebastian Ott <sebott@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/kvmarm/3f5db4c7-ccce-fb95-595c-692fa7aad227@redhat.com/
---
arch/arm64/kvm/mmu.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 754f2fe0cc67..eeda92330ade 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ if (!is_protected_kvm_enabled())
+ memcache = &vcpu->arch.mmu_page_cache;
+ else
+ memcache = &vcpu->arch.pkvm_memcache;
+
/*
* Permission faults just need to update the existing leaf entry,
* and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!fault_is_perm || (logging_active && write_fault)) {
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
- if (!is_protected_kvm_enabled()) {
- memcache = &vcpu->arch.mmu_page_cache;
+ if (!is_protected_kvm_enabled())
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
- } else {
- memcache = &vcpu->arch.pkvm_memcache;
+ else
ret = topup_hyp_memcache(memcache, min_pages);
- }
+
if (ret)
return ret;
}
base-commit: 92a09c47464d040866cf2b4cd052bc60555185fb
--
2.49.0
On Mon, 05 May 2025 19:31:48 +0200, Sebastian Ott wrote:
> Commit fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM") made the
> initialization of the local memcache variable in user_mem_abort()
> conditional, leaving a codepath where it is used uninitialized via
> kvm_pgtable_stage2_map().
>
> This can fail on any path that requires a stage-2 allocation
> without transition via a permission fault or dirty logging.
>
> [...]
Applied to fixes, thanks!
[1/1] KVM: arm64: Fix uninitialized memcache pointer in user_mem_abort()
https://git.kernel.org/kvmarm/kvmarm/c/157dbc4a321f
--
Best,
Oliver
On Mon, May 05, 2025 at 07:31:48PM +0200, Sebastian Ott wrote:
> Commit fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM") made the
> initialization of the local memcache variable in user_mem_abort()
> conditional, leaving a codepath where it is used uninitialized via
> kvm_pgtable_stage2_map().
>
> This can fail on any path that requires a stage-2 allocation
> without transition via a permission fault or dirty logging.
>
> Fix this by making sure that memcache is always valid.
>
> Fixes: fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM")
> Signed-off-by: Sebastian Ott <sebott@redhat.com>
> Reviewed-by: Marc Zyngier <maz@kernel.org>
> Cc: stable@vger.kernel.org
> Link: https://lore.kernel.org/kvmarm/3f5db4c7-ccce-fb95-595c-692fa7aad227@redhat.com/
> ---
> arch/arm64/kvm/mmu.c | 13 ++++++++-----
> 1 file changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 754f2fe0cc67..eeda92330ade 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> return -EFAULT;
> }
>
> + if (!is_protected_kvm_enabled())
> + memcache = &vcpu->arch.mmu_page_cache;
> + else
> + memcache = &vcpu->arch.pkvm_memcache;
> +
> /*
> * Permission faults just need to update the existing leaf entry,
> * and so normally don't require allocations from the memcache. The
> @@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> if (!fault_is_perm || (logging_active && write_fault)) {
> int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
>
> - if (!is_protected_kvm_enabled()) {
> - memcache = &vcpu->arch.mmu_page_cache;
> + if (!is_protected_kvm_enabled())
> ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
> - } else {
> - memcache = &vcpu->arch.pkvm_memcache;
> + else
> ret = topup_hyp_memcache(memcache, min_pages);
> - }
> +
> if (ret)
> return ret;
> }
>
> base-commit: 92a09c47464d040866cf2b4cd052bc60555185fb
> --
> 2.49.0
>
>
For the pKVM part
Reviewed-by: Vincent Donnefort <vdonnefort@google.com>
Thanks
© 2016 - 2026 Red Hat, Inc.