From nobody Sun Feb 8 21:51:17 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 65DDBC433F5 for ; Thu, 6 Oct 2022 00:46:00 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229983AbiJFAp6 (ORCPT ); Wed, 5 Oct 2022 20:45:58 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:50812 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229785AbiJFAph (ORCPT ); Wed, 5 Oct 2022 20:45:37 -0400 Received: from mail-yb1-xb4a.google.com (mail-yb1-xb4a.google.com [IPv6:2607:f8b0:4864:20::b4a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B5CCA3FD51 for ; Wed, 5 Oct 2022 17:45:28 -0700 (PDT) Received: by mail-yb1-xb4a.google.com with SMTP id k11-20020a5b038b000000b006bbf786c30aso399530ybp.8 for ; Wed, 05 Oct 2022 17:45:28 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=KCatH2eHT7/5mI1zch6fVZ/CaNm/H7x84O4ctVrQdBM=; b=aTmaEH8j0hr0fqaSoWuhmNSPOZcc3S7bVLBCg7BCWyc/RhBb4HDtYG8k686FcYjOhh lSdI/ve6Zwbt+bKsGdctvay9z9O5eBnuaczc1iLomTYzRF+7iYIBn9wRQ1/xczECYN+j x3ZJUUtve+/fIBUmXsam32XNL+ie9P5OcRpsbTvfwmoITUX8xn6ABsNYR1D9AOTqdW1A Ge4iIiFkVX0QvdQHvdR5xp6SCqzILEEF/vnJdbXS3c+ex6UszWv8RuelYd3yrsFBs3je CKSAEhy8wsIyu/aRGUCKpnfYKpUwgnZ7l1z1e8ALZJ3OGc+FdEsTQUwWsFoPtMpi02rC kRNw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=KCatH2eHT7/5mI1zch6fVZ/CaNm/H7x84O4ctVrQdBM=; b=aEIhdey/7EAuuBF03gCJRqG/YlO8hmDBUM+UdZgvNht8GpVLV1hBCYbq1H3SEZmJPR Hlm6UKKjpCTszYF6ZcOLB5yAU1SqUohjnPgtzg9vliVBwDAsUZMRvWyxIV8GjvCba996 HV735hUH7MwihJG5z0azvrgII+vCeA5c4rb5hEIWMRwEgOyveYxjn61nKW5rnf649w4Y fzB1jbx48pml9sjpKZuuiqvQdgVY/cd0NQap5OK6ReBbiA8qU2A6F48jlApSnuQGPH5D ApYs+BQFqUWSkD7zm0fxgU1s7l1QuR7h0Yz700phZiU1r7LrPGk+oCPMvvGvyXDzoT4J snUA== X-Gm-Message-State: ACrzQf36bX3Lx5sl6kzdfZsWRKhVPxDYOKsOaHnEDMmmj8iXg/C42WWg 9ADD7aOE/CN33wKBcv6N+yuqpwnlMEs= X-Google-Smtp-Source: AMsMyM5Ljc/bq5g7GYkNzisAVo14HEUR1A2lErcNiPuRHKi7WkmVAQL8hcijU8XRhrYpj46emfAbBKLc8Bk= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a25:b745:0:b0:6b5:338b:f686 with SMTP id e5-20020a25b745000000b006b5338bf686mr2641561ybm.216.1665017127069; Wed, 05 Oct 2022 17:45:27 -0700 (PDT) Reply-To: Sean Christopherson Date: Thu, 6 Oct 2022 00:45:12 +0000 In-Reply-To: <20221006004512.666529-1-seanjc@google.com> Mime-Version: 1.0 References: <20221006004512.666529-1-seanjc@google.com> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog Message-ID: <20221006004512.666529-8-seanjc@google.com> Subject: [PATCH 7/7] KVM: selftests: Play nice with huge pages when getting PTEs/GPAs From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Play nice with huge pages when getting PTEs and translating GVAs to GPAs, there's no reason to disallow using huge pages in selftests. Use PG_LEVEL_NONE to indicate that the caller doesn't care about the mapping level and just wants to get the pte+level. Signed-off-by: Sean Christopherson --- .../selftests/kvm/include/x86_64/processor.h | 11 ++++- .../selftests/kvm/lib/x86_64/processor.c | 45 ++++++++++++++++--- 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools= /testing/selftests/kvm/include/x86_64/processor.h index 9676a3464758..e000e35c948f 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -172,11 +172,16 @@ struct kvm_x86_cpu_feature { #define PTE_GLOBAL_MASK BIT_ULL(8) #define PTE_NX_MASK BIT_ULL(63) =20 +#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) + #define PAGE_SHIFT 12 #define PAGE_SIZE (1ULL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK) + +#define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) +#define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x)) +#define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK) =20 -#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) #define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK) #define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT) =20 @@ -828,6 +833,8 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t= val) =20 bool kvm_is_tdp_enabled(void); =20 +uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, + int *level); uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); =20 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/tes= ting/selftests/kvm/lib/x86_64/processor.c index 053f64191122..efa20d0f9927 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -245,10 +245,26 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr= , uint64_t paddr, } } =20 -uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) +static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level) +{ + if (*pte & PTE_LARGE_MASK) { + TEST_ASSERT(*level =3D=3D PG_LEVEL_NONE || + *level =3D=3D current_level, + "Unexpected hugepage at level %d\n", current_level); + *level =3D current_level; + } + + return *level =3D=3D current_level; +} + +uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, + int *level) { uint64_t *pml4e, *pdpe, *pde; =20 + TEST_ASSERT(*level >=3D PG_LEVEL_NONE && *level < PG_LEVEL_NUM, + "Invalid PG_LEVEL_* '%d'", *level); + TEST_ASSERT(vm->mode =3D=3D VM_MODE_PXXV48_4K, "Attempt to use " "unknown or unsupported guest mode, mode: 0x%x", vm->mode); TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, @@ -263,18 +279,27 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, = uint64_t vaddr) "Canonical check failed. The virtual address is invalid."); =20 pml4e =3D virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G); + if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G)) + return pml4e; =20 pdpe =3D virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G); - TEST_ASSERT(!(*pdpe & PTE_LARGE_MASK), - "Expected pdpe to map a pde not a 1-GByte page."); + if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G)) + return pdpe; =20 pde =3D virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M); - TEST_ASSERT(!(*pde & PTE_LARGE_MASK), - "Expected pde to map a pte not a 2-MByte page."); + if (vm_is_target_pte(pde, level, PG_LEVEL_2M)) + return pde; =20 return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); } =20 +uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) +{ + int level =3D PG_LEVEL_4K; + + return __vm_get_page_table_entry(vm, vaddr, &level); +} + void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) { uint64_t *pml4e, *pml4e_start; @@ -458,11 +483,17 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_= vm *vm, uint16_t selector, =20 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) { - uint64_t *pte =3D vm_get_page_table_entry(vm, gva); + int level =3D PG_LEVEL_NONE; + uint64_t *pte =3D __vm_get_page_table_entry(vm, gva, &level); =20 TEST_ASSERT(*pte & PTE_PRESENT_MASK, "Leaf PTE not PRESENT for gva: 0x%08lx", gva); - return PTE_GET_PA(*pte) | (gva & ~PAGE_MASK); + + /* + * No need for a hugepage mask on the PTE, x86-64 requires the "unused" + * address bits to be zero. + */ + return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level)); } =20 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) --=20 2.38.0.rc1.362.ged0d419d3c-goog