From: Jack Thomson <jackabt@amazon.com>
Add kvm_arch_vcpu_pre_fault_memory() for arm64. The implementation hands
off the stage-2 faulting logic to either gmem_abort() or
user_mem_abort().
Update __gmem_abort() and __user_mem_abort() to take the pre_fault
parameter. When passed, the paths to determine write or exec faults are
short circuited to false, as when pre-faulting, it should be treated
as a read fault.
This closely follows the implementation on x86.
Signed-off-by: Jack Thomson <jackabt@amazon.com>
---
arch/arm64/kvm/Kconfig | 1 +
arch/arm64/kvm/arm.c | 1 +
arch/arm64/kvm/mmu.c | 71 ++++++++++++++++++++++++++++++++++++------
3 files changed, 64 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bff62e75d681..1ac0605f86cb 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -25,6 +25,7 @@ menuconfig KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select KVM_GENERIC_PRE_FAULT_MEMORY
select KVM_XFER_TO_GUEST_WORK
select KVM_VFIO
select HAVE_KVM_DIRTY_RING_ACQ_REL
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 888f7c7abf54..65654a742864 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -322,6 +322,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IRQFD_RESAMPLE:
case KVM_CAP_COUNTER_OFFSET:
case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
+ case KVM_CAP_PRE_FAULT_MEMORY:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 082e7d8ae655..002f564c6ac7 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1523,7 +1523,8 @@ static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, bool is_perm)
+ struct kvm_memory_slot *memslot, bool is_perm,
+ bool pre_fault)
{
bool write_fault, exec_fault, writable;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS;
@@ -1537,6 +1538,9 @@ static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
int ret;
+ if (pre_fault)
+ flags |= KVM_PGTABLE_WALK_PRE_FAULT;
+
ret = prepare_mmu_memcache(vcpu, true, &memcache);
if (ret)
return ret;
@@ -1546,8 +1550,8 @@ static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
gfn = fault_ipa >> PAGE_SHIFT;
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ write_fault = !pre_fault && kvm_is_write_fault(vcpu);
+ exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
@@ -1599,7 +1603,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, bool is_perm)
{
- int ret = __gmem_abort(vcpu, fault_ipa, nested, memslot, is_perm);
+ int ret = __gmem_abort(vcpu, fault_ipa, nested, memslot, is_perm, false);
return ret != -EAGAIN ? ret : 0;
}
@@ -1607,7 +1611,7 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot,
long *page_size, unsigned long hva,
- bool fault_is_perm)
+ bool fault_is_perm, bool pre_fault)
{
int ret = 0;
bool topup_memcache;
@@ -1631,10 +1635,13 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vm_flags_t vm_flags;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS;
+ if (pre_fault)
+ flags |= KVM_PGTABLE_WALK_PRE_FAULT;
+
if (fault_is_perm)
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ write_fault = !pre_fault && kvm_is_write_fault(vcpu);
+ exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
/*
@@ -1895,8 +1902,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
bool fault_is_perm)
{
- int ret = __user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL,
- hva, fault_is_perm);
+ int ret = __user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL, hva,
+ fault_is_perm, false);
return ret != -EAGAIN ? ret : 0;
}
@@ -2468,3 +2475,49 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}
+
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range)
+{
+ int r;
+ hva_t hva;
+ phys_addr_t end;
+ long page_size;
+ struct kvm_memory_slot *memslot;
+ phys_addr_t ipa = range->gpa;
+ gfn_t gfn = gpa_to_gfn(range->gpa);
+
+ while (true) {
+ page_size = PAGE_SIZE;
+ memslot = gfn_to_memslot(vcpu->kvm, gfn);
+ if (!memslot)
+ return -ENOENT;
+
+ if (kvm_slot_has_gmem(memslot)) {
+ r = __gmem_abort(vcpu, ipa, NULL, memslot, false, true);
+ } else {
+ hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL);
+ if (kvm_is_error_hva(hva))
+ return -EFAULT;
+ r = __user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, false,
+ true);
+ }
+
+ if (r != -EAGAIN)
+ break;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
+ return -EIO;
+
+ cond_resched();
+ };
+
+ if (r < 0)
+ return r;
+
+ end = (range->gpa & ~(page_size - 1)) + page_size;
+ return min(range->size, end - range->gpa);
+}
--
2.43.0
On Thu, Sep 11, 2025 at 02:46:45PM +0100, Jack Thomson wrote: > @@ -1607,7 +1611,7 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > struct kvm_s2_trans *nested, > struct kvm_memory_slot *memslot, > long *page_size, unsigned long hva, > - bool fault_is_perm) > + bool fault_is_perm, bool pre_fault) > { > int ret = 0; > bool topup_memcache; > @@ -1631,10 +1635,13 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > vm_flags_t vm_flags; > enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS; > > + if (pre_fault) > + flags |= KVM_PGTABLE_WALK_PRE_FAULT; > + > if (fault_is_perm) > fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); > - write_fault = kvm_is_write_fault(vcpu); > - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); > + write_fault = !pre_fault && kvm_is_write_fault(vcpu); > + exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu); I'm not a fan of this. While user_mem_abort() is already a sloppy mess, one thing we could reliably assume is the presence of a valid fault context. Now we need to remember to special-case our interpretation of a fault on whether or not we're getting invoked for a pre-fault. I'd rather see the pre-fault infrastructure compose a synthetic fault context (HPFAR_EL2, ESR_EL2, etc.). It places the complexity where it belongs and the rest of the abort handling code should 'just work'. > +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, > + struct kvm_pre_fault_memory *range) > +{ > + int r; > + hva_t hva; > + phys_addr_t end; > + long page_size; > + struct kvm_memory_slot *memslot; > + phys_addr_t ipa = range->gpa; > + gfn_t gfn = gpa_to_gfn(range->gpa); > + > + while (true) { > + page_size = PAGE_SIZE; > + memslot = gfn_to_memslot(vcpu->kvm, gfn); > + if (!memslot) > + return -ENOENT; > + > + if (kvm_slot_has_gmem(memslot)) { > + r = __gmem_abort(vcpu, ipa, NULL, memslot, false, true); > + } else { > + hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL); > + if (kvm_is_error_hva(hva)) > + return -EFAULT; > + r = __user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, false, > + true); > + } > + > + if (r != -EAGAIN) > + break; > + > + if (signal_pending(current)) > + return -EINTR; > + > + if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) > + return -EIO; > + > + cond_resched(); > + }; Why do we need another retry loop? Looks like we've already got one in the arch-generic code. Thanks, Oliver
Hi Oliver, Thanks for reviewing! On 11/09/2025 7:42 pm, Oliver Upton wrote: > On Thu, Sep 11, 2025 at 02:46:45PM +0100, Jack Thomson wrote: >> @@ -1607,7 +1611,7 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> struct kvm_s2_trans *nested, >> struct kvm_memory_slot *memslot, >> long *page_size, unsigned long hva, >> - bool fault_is_perm) >> + bool fault_is_perm, bool pre_fault) >> { >> int ret = 0; >> bool topup_memcache; >> @@ -1631,10 +1635,13 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, >> vm_flags_t vm_flags; >> enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS; >> >> + if (pre_fault) >> + flags |= KVM_PGTABLE_WALK_PRE_FAULT; >> + >> if (fault_is_perm) >> fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); >> - write_fault = kvm_is_write_fault(vcpu); >> - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); >> + write_fault = !pre_fault && kvm_is_write_fault(vcpu); >> + exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu); > > I'm not a fan of this. While user_mem_abort() is already a sloppy mess, > one thing we could reliably assume is the presence of a valid fault > context. Now we need to remember to special-case our interpretation of a > fault on whether or not we're getting invoked for a pre-fault. > > I'd rather see the pre-fault infrastructure compose a synthetic fault > context (HPFAR_EL2, ESR_EL2, etc.). It places the complexity where it > belongs and the rest of the abort handling code should 'just work'. > Agreed, it looks much better with the synthetic abort. Is this the approach you had in mind? +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, + struct kvm_pre_fault_memory *range) +{ + int ret, idx; + hva_t hva; + phys_addr_t end; + u64 esr, hpfar; + struct kvm_memory_slot *memslot; + struct kvm_vcpu_fault_info *fault_info; + + long page_size = PAGE_SIZE; + phys_addr_t ipa = range->gpa; + gfn_t gfn = gpa_to_gfn(range->gpa); + + idx = srcu_read_lock(&vcpu->kvm->srcu); + + if (ipa >= kvm_phys_size(vcpu->arch.hw_mmu)) { + ret = -ENOENT; + goto out_unlock; + } + + memslot = gfn_to_memslot(vcpu->kvm, gfn); + if (!memslot) { + ret = -ENOENT; + goto out_unlock; + } + + fault_info = &vcpu->arch.fault; + + esr = fault_info->esr_el2; + hpfar = fault_info->hpfar_el2; + + fault_info->esr_el2 = ESR_ELx_FSC_ACCESS_L(KVM_PGTABLE_LAST_LEVEL); + fault_info->hpfar_el2 = HPFAR_EL2_NS | + ((ipa >> (12 - HPFAR_EL2_FIPA_SHIFT)) & HPFAR_EL2_FIPA_MASK); + + if (kvm_slot_has_gmem(memslot)) { + ret = gmem_abort(vcpu, ipa, NULL, memslot, false); + } else { + hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL); + if (kvm_is_error_hva(hva)) { + ret = -EFAULT; + goto out; + } + ret = user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, + false); + } + + if (ret < 0) + goto out; + + end = (range->gpa & ~(page_size - 1)) + page_size; + ret = min(range->size, end - range->gpa); + +out: + fault_info->esr_el2 = esr; + fault_info->hpfar_el2 = hpfar; +out_unlock: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; +} >> +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, >> + struct kvm_pre_fault_memory *range) >> +{ >> + int r; >> + hva_t hva; >> + phys_addr_t end; >> + long page_size; >> + struct kvm_memory_slot *memslot; >> + phys_addr_t ipa = range->gpa; >> + gfn_t gfn = gpa_to_gfn(range->gpa); >> + >> + while (true) { >> + page_size = PAGE_SIZE; >> + memslot = gfn_to_memslot(vcpu->kvm, gfn); >> + if (!memslot) >> + return -ENOENT; >> + >> + if (kvm_slot_has_gmem(memslot)) { >> + r = __gmem_abort(vcpu, ipa, NULL, memslot, false, true); >> + } else { >> + hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL); >> + if (kvm_is_error_hva(hva)) >> + return -EFAULT; >> + r = __user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, false, >> + true); >> + } >> + >> + if (r != -EAGAIN) >> + break; >> + >> + if (signal_pending(current)) >> + return -EINTR; >> + >> + if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) >> + return -EIO; >> + >> + cond_resched(); >> + }; > > Why do we need another retry loop? Looks like we've already got one in > the arch-generic code. > Good point thanks, I've removed that now. > > Thanks, > Oliver Thanks, Jack
On Mon, Sep 29, 2025 at 02:59:35PM +0100, Thomson, Jack wrote: > Hi Oliver, > > Thanks for reviewing! > > On 11/09/2025 7:42 pm, Oliver Upton wrote: > > On Thu, Sep 11, 2025 at 02:46:45PM +0100, Jack Thomson wrote: > > > @@ -1607,7 +1611,7 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > > > struct kvm_s2_trans *nested, > > > struct kvm_memory_slot *memslot, > > > long *page_size, unsigned long hva, > > > - bool fault_is_perm) > > > + bool fault_is_perm, bool pre_fault) > > > { > > > int ret = 0; > > > bool topup_memcache; > > > @@ -1631,10 +1635,13 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > > > vm_flags_t vm_flags; > > > enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS; > > > + if (pre_fault) > > > + flags |= KVM_PGTABLE_WALK_PRE_FAULT; > > > + > > > if (fault_is_perm) > > > fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); > > > - write_fault = kvm_is_write_fault(vcpu); > > > - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); > > > + write_fault = !pre_fault && kvm_is_write_fault(vcpu); > > > + exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu); > > > > I'm not a fan of this. While user_mem_abort() is already a sloppy mess, > > one thing we could reliably assume is the presence of a valid fault > > context. Now we need to remember to special-case our interpretation of a > > fault on whether or not we're getting invoked for a pre-fault. > > > > I'd rather see the pre-fault infrastructure compose a synthetic fault > > context (HPFAR_EL2, ESR_EL2, etc.). It places the complexity where it > > belongs and the rest of the abort handling code should 'just work'. > > > > Agreed, it looks much better with the synthetic abort. Is this the > approach you had in mind? Pretty much. Thanks for taking a moment to fiddle with it. > +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, > + struct kvm_pre_fault_memory *range) > +{ > + int ret, idx; > + hva_t hva; > + phys_addr_t end; > + u64 esr, hpfar; > + struct kvm_memory_slot *memslot; > + struct kvm_vcpu_fault_info *fault_info; > + > + long page_size = PAGE_SIZE; > + phys_addr_t ipa = range->gpa; > + gfn_t gfn = gpa_to_gfn(range->gpa); > + > + idx = srcu_read_lock(&vcpu->kvm->srcu); > + > + if (ipa >= kvm_phys_size(vcpu->arch.hw_mmu)) { > + ret = -ENOENT; > + goto out_unlock; > + } > + > + memslot = gfn_to_memslot(vcpu->kvm, gfn); > + if (!memslot) { > + ret = -ENOENT; > + goto out_unlock; > + } > + > + fault_info = &vcpu->arch.fault; > + > + esr = fault_info->esr_el2; > + hpfar = fault_info->hpfar_el2; nit: Just snapshot the entire struct, makes this forward-compatible with new fields showing up. > + > + fault_info->esr_el2 = ESR_ELx_FSC_ACCESS_L(KVM_PGTABLE_LAST_LEVEL); A translation fault would be a more accurate representation what you're trying to do Access flag faults aren't expected in user_mem_abort() and instead handled in handle_access_fault(). You're also missing the rest of the ESR fields that are relevant here, such as ESR_ELx.EC which would actually indicate a data abort. I think you'd also want to communicate this as a nISV fault (i.e. ESR_ELx.ISV=0). > + fault_info->hpfar_el2 = HPFAR_EL2_NS | > + ((ipa >> (12 - HPFAR_EL2_FIPA_SHIFT)) & HPFAR_EL2_FIPA_MASK); FIELD_PREP()? > + > + if (kvm_slot_has_gmem(memslot)) { > + ret = gmem_abort(vcpu, ipa, NULL, memslot, false); > + } else { > + hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL); > + if (kvm_is_error_hva(hva)) { > + ret = -EFAULT; > + goto out; > + } > + ret = user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, > + false); > + } > + > + if (ret < 0) > + goto out; > + > + end = (range->gpa & ~(page_size - 1)) + page_size; > + ret = min(range->size, end - range->gpa); > + > +out: > + fault_info->esr_el2 = esr; > + fault_info->hpfar_el2 = hpfar; > +out_unlock: > + srcu_read_unlock(&vcpu->kvm->srcu, idx); > + return ret; > +} Thanks, Oliver
© 2016 - 2025 Red Hat, Inc.