According to the APM, NPT walks are treated as user accesses. In
preparation for supporting NPT mappings, set the 'user' bit on NPTs by
adding a mask of bits to always be set on PTEs in kvm_mmu.
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
---
tools/testing/selftests/kvm/include/x86/processor.h | 4 ++++
tools/testing/selftests/kvm/lib/x86/processor.c | 6 ++++--
tools/testing/selftests/kvm/lib/x86/svm.c | 3 +++
3 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 920abd14f3a6..d41245e2521b 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -1450,6 +1450,8 @@ struct pte_masks {
uint64_t x;
uint64_t c;
uint64_t s;
+
+ uint64_t always_set;
};
struct kvm_mmu {
@@ -1469,6 +1471,8 @@ struct kvm_mmu {
#define PTE_C_MASK(mmu) ((mmu)->pte_masks.c)
#define PTE_S_MASK(mmu) ((mmu)->pte_masks.s)
+#define PTE_ALWAYS_SET_MASK(mmu) ((mmu)->pte_masks.always_set)
+
#define pte_present(mmu, pte) (!!(*(pte) & PTE_PRESENT_MASK(mmu)))
#define pte_writable(mmu, pte) (!!(*(pte) & PTE_WRITABLE_MASK(mmu)))
#define pte_user(mmu, pte) (!!(*(pte) & PTE_USER_MASK(mmu)))
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index b22c8c1bfdc3..749ae7522473 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -227,7 +227,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
paddr = vm_untag_gpa(vm, paddr);
if (!pte_present(mmu, pte)) {
- *pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_X_MASK(mmu);
+ *pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu)
+ | PTE_X_MASK(mmu) | PTE_ALWAYS_SET_MASK(mmu);
if (current_level == target_level)
*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
else
@@ -293,7 +294,8 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!pte_present(mmu, pte),
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
- *pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_X_MASK(mmu)
+ *pte = PTE_PRESENT_MASK(mmu) | PTE_WRITABLE_MASK(mmu)
+ | PTE_X_MASK(mmu) | PTE_ALWAYS_SET_MASK(mmu)
| (paddr & PHYSICAL_PAGE_MASK);
/*
diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c
index cf3b98802164..838f218545af 100644
--- a/tools/testing/selftests/kvm/lib/x86/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86/svm.c
@@ -73,6 +73,9 @@ void vm_enable_npt(struct kvm_vm *vm)
pte_masks.c = 0;
pte_masks.s = 0;
+ /* NPT walks are treated as user accesses, so set the 'user' bit */
+ pte_masks.always_set = pte_masks.user;
+
vm->arch.nested.mmu = mmu_create(vm, vm->pgtable_levels, &pte_masks);
}
--
2.52.0.158.g65b55ccf14-goog