From nobody Mon Feb 9 04:16:12 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 447F0C54EBE for ; Tue, 10 Jan 2023 17:51:25 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234945AbjAJRvW (ORCPT ); Tue, 10 Jan 2023 12:51:22 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35960 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234172AbjAJRvJ (ORCPT ); Tue, 10 Jan 2023 12:51:09 -0500 Received: from mail-pg1-x549.google.com (mail-pg1-x549.google.com [IPv6:2607:f8b0:4864:20::549]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 363ED34D66 for ; Tue, 10 Jan 2023 09:51:08 -0800 (PST) Received: by mail-pg1-x549.google.com with SMTP id f132-20020a636a8a000000b00473d0b600ebso5546681pgc.14 for ; Tue, 10 Jan 2023 09:51:08 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; h=cc:to:from:subject:references:mime-version:message-id:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=HEafxR5smy9VpjPjO6IaK/ycja7cEDG1FQOzKPOmrvg=; b=M8l0s71GXEiAd1irLSOXHosGqiLUsf9u7hMTKJrNdITyMK+Eqv5AJ3+GI1+pq+mfsP prOS78xMVy7Llhncf+9yNn2fkaUP8QQiFfg8Qdt8eg4NiaLxrXWXC+S/QZRN5PPTY0nr lk0K1EfXGz6EpjqbseQYcROjBUAP42GrjXwhxtaQboyN5Cbw2b9cNnLgDoGOf9oXTiy4 5pqO9E4tYyWPbyw49QaVmIZBVrbkqnOD6YmJnlQgRardJQw3yIUO/Z4en1ZmugApzSw9 kE8I7nl/jNnMyWceEzT15p1yFj/W/GDw6nUuQsJYj9qjukUZYLIBcxFMI9fukxHIRBCW 1K8g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=cc:to:from:subject:references:mime-version:message-id:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=HEafxR5smy9VpjPjO6IaK/ycja7cEDG1FQOzKPOmrvg=; b=CAnd0sP8a5NK3r5gJk/ND64nOIm3dzg6hYh/hRAtD1Q7f5FcP9sdU7XAVyRWIHgYRW /HIAbCWAyciFNFtbe1+vxPuEiIUZKEiy7NJHLWUmCo7himt2VbuBaa2C2IXl3ShTnAJA N2+gd522bZsEPQHjwaZCV6oSxdQORULIKobW3Cde8fQnlGv8yhnou5FTf1RSmnjANgKQ QglQPVoNoPdDpi85n2khxufxWnAAzZprL5mn9Kskuu9E3sIreefFcXnykA2SDWb+x6l1 D9j8l8dHVKrAehV3k1wOs5Mw/seftWvRQItGBrmcekxGQegkTgDUQW5Y/VKDmroOVv4A 9p4w== X-Gm-Message-State: AFqh2kpZ2F6KgkYwutBPCCSCniYjDTW61JRVpuSNSAX2Ar+2ODEipAIb EP/FpaAL5xRR1eIu3NIZ2WWxghibGtI= X-Google-Smtp-Source: AMrXdXti+Iwjyna3LMdSgPjaVvI4CHqbzL19eNkQkJ+RXWo4qC9XPkTz511QRSQsPpvJgE4ZiHuGhZ1C1/c= X-Received: from pgonda1.kir.corp.google.com ([2620:0:1008:11:8358:4c2a:eae1:4752]) (user=pgonda job=sendgmr) by 2002:a17:902:bb8f:b0:192:fa87:f109 with SMTP id m15-20020a170902bb8f00b00192fa87f109mr1326191pls.173.1673373067783; Tue, 10 Jan 2023 09:51:07 -0800 (PST) Date: Tue, 10 Jan 2023 09:50:52 -0800 In-Reply-To: <20230110175057.715453-1-pgonda@google.com> Message-Id: <20230110175057.715453-3-pgonda@google.com> Mime-Version: 1.0 References: <20230110175057.715453-1-pgonda@google.com> X-Mailer: git-send-email 2.39.0.314.g84b9a713c41-goog Subject: [PATCH V6 2/7] KVM: selftests: add hooks for managing protected guest memory From: Peter Gonda To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org Cc: Peter Gonda , Paolo Bonzini , Sean Christopherson , Vishal Annapurve , Ackerley Tng , Andrew Jones , Michael Roth Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Add kvm_vm.protected metadata. Protected VMs memory, potentially register and other state may not be accessible to KVM. This combined with a new protected_phy_pages bitmap will allow the selftests to check if a given pages is accessible. Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Vishal Annapurve Cc: Ackerley Tng cc: Andrew Jones Originally-by: Michael Roth Signed-off-by: Peter Gonda --- .../selftests/kvm/include/kvm_util_base.h | 14 ++++++++++++-- tools/testing/selftests/kvm/lib/kvm_util.c | 16 +++++++++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/te= sting/selftests/kvm/include/kvm_util_base.h index fbc2a79369b8..015b59a0b80e 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -45,6 +45,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) v= irtual address */ struct userspace_mem_region { struct kvm_userspace_memory_region region; struct sparsebit *unused_phy_pages; + struct sparsebit *protected_phy_pages; int fd; off_t offset; enum vm_mem_backing_src_type backing_src_type; @@ -111,6 +112,9 @@ struct kvm_vm { vm_vaddr_t handlers; uint32_t dirty_ring_size; =20 + /* VM protection enabled: SEV, etc*/ + bool protected; + /* Cache of information for binary stats interface */ int stats_fd; struct kvm_stats_header stats_header; @@ -679,10 +683,16 @@ const char *exit_reason_str(unsigned int exit_reason); =20 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t _vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot, bool protected); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); =20 +static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) +{ + return _vm_phy_pages_alloc(vm, num, paddr_min, memslot, vm->protected); +} + /* * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also * loads the test binary into guest memory and creates an IRQ chip (x86 on= ly). diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/sel= ftests/kvm/lib/kvm_util.c index 56d5ea949cbb..63913b219b42 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -663,6 +663,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); =20 sparsebit_free(®ion->unused_phy_pages); + sparsebit_free(®ion->protected_phy_pages); ret =3D munmap(region->mmap_start, region->mmap_size); TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); if (region->fd >=3D 0) { @@ -1010,6 +1011,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, =20 region->backing_src_type =3D src_type; region->unused_phy_pages =3D sparsebit_alloc(); + region->protected_phy_pages =3D sparsebit_alloc(); sparsebit_set_num(region->unused_phy_pages, guest_paddr >> vm->page_shift, npages); region->region.slot =3D slot; @@ -1799,6 +1801,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_= t indent) region->host_mem); fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); sparsebit_dump(stream, region->unused_phy_pages, 0); + if (vm->protected) { + fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, ""); + sparsebit_dump(stream, region->protected_phy_pages, 0); + } } fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); sparsebit_dump(stream, vm->vpages_mapped, indent + 2); @@ -1895,8 +1901,9 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, - vm_paddr_t paddr_min, uint32_t memslot) +vm_paddr_t _vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot, + bool protected) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; @@ -1929,8 +1936,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, siz= e_t num, abort(); } =20 - for (pg =3D base; pg < base + num; ++pg) + for (pg =3D base; pg < base + num; ++pg) { sparsebit_clear(region->unused_phy_pages, pg); + if (protected) + sparsebit_set(region->protected_phy_pages, pg); + } =20 return base * vm->page_size; } --=20 2.39.0.314.g84b9a713c41-goog