From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D425BC77B70 for ; Fri, 7 Apr 2023 23:33:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230060AbjDGXdF (ORCPT ); Fri, 7 Apr 2023 19:33:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55290 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229672AbjDGXdA (ORCPT ); Fri, 7 Apr 2023 19:33:00 -0400 Received: from mail-pl1-x649.google.com (mail-pl1-x649.google.com [IPv6:2607:f8b0:4864:20::649]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D5E1F46BF for ; Fri, 7 Apr 2023 16:32:59 -0700 (PDT) Received: by mail-pl1-x649.google.com with SMTP id d9443c01a7336-1a526495c3bso58435ad.0 for ; Fri, 07 Apr 2023 16:32:59 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910379; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=tX6CHwqi/+vmAQd+dDGZvcYGjpDNB6U9rI2y1SgCLzw=; b=RLzZoZSEkChrmMCPyzSpJD0n7OXG3YTfsqfzcbpwWeuBNjcXghAd1GPxNF2dBSM95T MZVmw2W5OIhJkicZCCx9GyxPygEc5yjfPd53kEqF8EZ8fAVF2dMpv2T+/TOGe1MLlE4f 8+nTWr7oZLD1NvwZ2MtMeQmLByizaUHQEv/PYwM8wQJZ4HgKMATa2iR2KJikY6EPkz/l 7qXw1fH6XUkewjscrAfl1tqyedbr8Xhji+gWZxZt4E5NgUOd//MTwZrOuAn5oBfoUIoi YioelxJ9i1FChJ4Ra7nSiubcpLS4Kf2w8anZe2xsByMn6PHmdK8nmXRoZBRXWJOSsHbG yyDQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910379; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=tX6CHwqi/+vmAQd+dDGZvcYGjpDNB6U9rI2y1SgCLzw=; b=Jr08GjpP2UYkTYBxsIk5FYYR5KDFKh777sk7d3/6wPlUdBdN1pIXx+qIv+DMWFOBhe QcX6FDVuEcN4CAJmQ4BlJNBKZudz7EveI15AwIT4Mzvr0XkUM+vrGMUNtkoYKDdZM4eL t9TZKyA5qRozcKRaweaxGTDJelcpb8y6hMDc1jCW8BFdsiBr+6zJQGbK+h/JqOlxvQei T8n07UKMMQW7KSytsiwJ/xaddQ06JT3zBemwujQJTj0TZSoI8nPp2zdrDM9DrH8z3ac9 u6zYFvqs4PVSLO3FH5sYHjvwbNxkA8Zmsvu7+OPw/yJ8dOGiQA/K14fzb/nxlBI4DOhi 3Osw== X-Gm-Message-State: AAQBX9diOfVTqqU96h2/n35+RqjgK5SmJ8npNY2ZN46bl5sPyAc8Z0w8 kJlMB85BpY8LXaoKE3skQEHGfkMbvW8= X-Google-Smtp-Source: AKy350ZGq+TQ7AlXgBYJyAMCF0BuViAvumLgn7XWPmozlegCdHMqWepTb3jR1sXsT7XitimErc5F4qNdd6g= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a05:6a00:1354:b0:62d:e8f1:edbf with SMTP id k20-20020a056a00135400b0062de8f1edbfmr1967544pfu.5.1680910379429; Fri, 07 Apr 2023 16:32:59 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:49 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-2-seanjc@google.com> Subject: [PATCH v4 1/6] KVM: selftests: Add a common helper for the PMU event filter guest code From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Aaron Lewis Split out the common parts of the Intel and AMD guest code in the PMU event filter test into a helper function. This is in preparation for adding additional counters to the test. No functional changes intended. Signed-off-by: Aaron Lewis Signed-off-by: Sean Christopherson --- .../kvm/x86_64/pmu_event_filter_test.c | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 1f60dfae69e0..a00a9d6ea41e 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -100,6 +100,15 @@ static void check_msr(uint32_t msr, uint64_t bits_to_f= lip) GUEST_SYNC(0); } =20 +static uint64_t run_and_measure_loop(uint32_t msr_base) +{ + uint64_t branches_retired =3D rdmsr(msr_base + 0); + + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + + return rdmsr(msr_base + 0) - branches_retired; +} + static void intel_guest_code(void) { check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1); @@ -108,16 +117,15 @@ static void intel_guest_code(void) GUEST_SYNC(1); =20 for (;;) { - uint64_t br0, br1; + uint64_t count; =20 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1); - br0 =3D rdmsr(MSR_IA32_PMC0); - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); - br1 =3D rdmsr(MSR_IA32_PMC0); - GUEST_SYNC(br1 - br0); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); + + count =3D run_and_measure_loop(MSR_IA32_PMC0); + GUEST_SYNC(count); } } =20 @@ -133,15 +141,14 @@ static void amd_guest_code(void) GUEST_SYNC(1); =20 for (;;) { - uint64_t br0, br1; + uint64_t count; =20 wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); - br0 =3D rdmsr(MSR_K7_PERFCTR0); - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); - br1 =3D rdmsr(MSR_K7_PERFCTR0); - GUEST_SYNC(br1 - br0); + + count =3D run_and_measure_loop(MSR_K7_PERFCTR0); + GUEST_SYNC(count); } } =20 --=20 2.40.0.577.gac1e443424-goog From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 29EFBC77B71 for ; Fri, 7 Apr 2023 23:33:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230108AbjDGXdI (ORCPT ); Fri, 7 Apr 2023 19:33:08 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55290 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230013AbjDGXdC (ORCPT ); Fri, 7 Apr 2023 19:33:02 -0400 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D05D6A27B for ; Fri, 7 Apr 2023 16:33:01 -0700 (PDT) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-54ee17afd6eso1006357b3.6 for ; Fri, 07 Apr 2023 16:33:01 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910381; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=29DoY8Y1qdOHbQr7Smx+ml+g3M6SNjpQzRfMcm5ydbU=; b=MiFxIUgbCYIWHfUESe4fCpAY4485JgxuhEl+NmRNHaPMr3jwbY5Nkh7AGnPbfr2dpY S2xIsM0P3ui5BsaMmPWushLpdF68/jofg47hR2KoOG6J8v5JD8Em6w0blIYRcV9ftfAe JDJb1rSklENmtsjFH0gNg+HIunh2Mt8N4twrWW2HZ1m4CM1ISVdIHd6MB2ju9lDnVnpg mEzaJsG6adLsBLKhJVWJ5OxeP4z1j/p1FWJwnzs0Oa2wjidMNdrNqoP0kOOUJT9gwWfJ nLx7cvfiTdpkl17WLgTrBj9G9KNVXfebYloujXKCJpub0aRQXdCLuhIrwiFupdU/sqOx k/Ww== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910381; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=29DoY8Y1qdOHbQr7Smx+ml+g3M6SNjpQzRfMcm5ydbU=; b=gyBHyagRLbbjxwarz1dXlirWw4P67o9ZXl+C0CY67nnO3EIQUEbytCERa7x+qF2MGy GS7gGoNNqF66lSafVY/P6FYzYyu+6PM17DFx8oRBEuYQLyj9GiN7pgGbugxh5EfnWkGH dFh+ZxMyF5USPs6BRgTYSbCW4ocN6tQ3ZvcNapBFwyQcpIw+jiamYTt8yW7Ce+VDGNaP IZRbM8Qh32thnGlUhlYwbZFO2mYLi6p/XiFQALHkYjrE/AYTmx+FB4NiNXk2KUp/JgK2 8LjvXQwqorZ3MF9ZUAxESDM5E8EDvlJEETiDFPW+a8R0Dajpteqx51Tn8JSx4nGq4bVD /sSg== X-Gm-Message-State: AAQBX9dZbadplsEJwrtFELEAQY/x3ZddDywWbKIKwOYRA6uJQyrDPzk/ /HIrhgsYye1ECJ7Bjbtfv3LLc/5SaPI= X-Google-Smtp-Source: AKy350aqL2ZMekVnvF+rIk7Pq5lInsvesMWd0PTyGqxS5bzhCQNJzMWGVCS94FaEZmTm+SHqy9ULVBJSFQM= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a81:430d:0:b0:541:9063:8e9e with SMTP id q13-20020a81430d000000b0054190638e9emr1883364ywa.2.1680910381152; Fri, 07 Apr 2023 16:33:01 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:50 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-3-seanjc@google.com> Subject: [PATCH v4 2/6] KVM: selftests: Add helpers for PMC asserts in PMU event filter test From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Aaron Lewis Add helper macros to consolidate the asserts that a PMC is/isn't counting (branch) instructions retired. This will make it easier to add additional asserts related to counting instructions later on. No functional changes intended. Signed-off-by: Aaron Lewis [sean: add "INSTRUCTIONS", massage changelog] Signed-off-by: Sean Christopherson --- .../kvm/x86_64/pmu_event_filter_test.c | 52 ++++++++++--------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index a00a9d6ea41e..9b53e02a0565 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -244,14 +244,27 @@ static struct kvm_pmu_event_filter *remove_event(stru= ct kvm_pmu_event_filter *f, return f; } =20 +#define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ +do { \ + if (count !=3D NUM_BRANCHES) \ + pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", \ + __func__, count, NUM_BRANCHES); \ + TEST_ASSERT(count, "Allowed PMU event is not counting."); \ +} while (0) + +#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ +do { \ + if (count) \ + pr_info("%s: Branch instructions retired =3D %lu (expected 0)\n", \ + __func__, count); \ + TEST_ASSERT(!count, "Disallowed PMU Event is counting"); \ +} while (0) + static void test_without_filter(struct kvm_vcpu *vcpu) { uint64_t count =3D run_vcpu_to_sync(vcpu); =20 - if (count !=3D NUM_BRANCHES) - pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } =20 static uint64_t test_with_filter(struct kvm_vcpu *vcpu, @@ -269,12 +282,9 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) =20 f =3D create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); count =3D test_with_filter(vcpu, f); - free(f); - if (count !=3D NUM_BRANCHES) - pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } =20 static void test_member_deny_list(struct kvm_vcpu *vcpu) @@ -283,10 +293,8 @@ static void test_member_deny_list(struct kvm_vcpu *vcp= u) uint64_t count =3D test_with_filter(vcpu, f); =20 free(f); - if (count) - pr_info("%s: Branch instructions retired =3D %lu (expected 0)\n", - __func__, count); - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); + + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); } =20 static void test_member_allow_list(struct kvm_vcpu *vcpu) @@ -295,10 +303,8 @@ static void test_member_allow_list(struct kvm_vcpu *vc= pu) uint64_t count =3D test_with_filter(vcpu, f); =20 free(f); - if (count !=3D NUM_BRANCHES) - pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } =20 static void test_not_member_deny_list(struct kvm_vcpu *vcpu) @@ -310,10 +316,8 @@ static void test_not_member_deny_list(struct kvm_vcpu = *vcpu) remove_event(f, AMD_ZEN_BR_RETIRED); count =3D test_with_filter(vcpu, f); free(f); - if (count !=3D NUM_BRANCHES) - pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", - __func__, count, NUM_BRANCHES); - TEST_ASSERT(count, "Allowed PMU event is not counting"); + + ASSERT_PMC_COUNTING_INSTRUCTIONS(count); } =20 static void test_not_member_allow_list(struct kvm_vcpu *vcpu) @@ -325,10 +329,8 @@ static void test_not_member_allow_list(struct kvm_vcpu= *vcpu) remove_event(f, AMD_ZEN_BR_RETIRED); count =3D test_with_filter(vcpu, f); free(f); - if (count) - pr_info("%s: Branch instructions retired =3D %lu (expected 0)\n", - __func__, count); - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); + + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); } =20 /* --=20 2.40.0.577.gac1e443424-goog From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B644CC76196 for ; Fri, 7 Apr 2023 23:33:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230070AbjDGXdL (ORCPT ); Fri, 7 Apr 2023 19:33:11 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55460 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230038AbjDGXdE (ORCPT ); Fri, 7 Apr 2023 19:33:04 -0400 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9649DAD21 for ; Fri, 7 Apr 2023 16:33:03 -0700 (PDT) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-54ee1fd7876so796427b3.23 for ; Fri, 07 Apr 2023 16:33:03 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910383; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=wA6lfngRXZVpklaLjiriPrH5qpw5LclbO2CmToyI5iQ=; b=DidCns1eogzfnGA7ctKYZPnpPHf0RhdJlC3C/yPBnzoxvT+wirVFqTWv2RQkAqpd9n BFNi7kjwiJtvnwtJrpIooNFGblDc74yKXGxGV7xhAFBQlLJMNNdCKD7fC0rMwbUISL3N vw8v5YAYW8Z3CwjExOzArfn3Bx13yBivsxwdGfkF9nCG0S8nSYNUNn+xxlLuR0vj/0X1 gaRZaVJUvbsCZ6GB4luXJ9/ze0E+6lQq1DTCJETUKPz5mCDx4ncvs1RGKxlq/SYQ8drN ZxcKdVjCcKLFDXjTEUgON52RVrHpPSD7cCwzJYoAkwAtIP+hx5NSGYIyFifaNtHe/yEt 4aLw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910383; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=wA6lfngRXZVpklaLjiriPrH5qpw5LclbO2CmToyI5iQ=; b=OTMhNdgAjYOZYNKCETv+9Xq8VBSga1enq8ao/hF+XsbZ8pJ6Q4bV14qtyaQLcJOTRT qV7jJPWCXYXSsOV+QyXoJOj1fr7BpEfHQi0wqNMlc9hAo/dS9ZJg0akVGLLBZzDohxyG kAai2Bfa10m//BqC0uI6Jm1TERzcxVOA7mC9YkZAQdA7PbZenP60/wZkeAMtbpnTTvh6 yRPt8KjznCoEdNPfWEOANzCP3m9eZRcb/r3cH5e4W+M17dzTRG+s20PHTkeJOUzZ/AkL XKkUEE95X++TShIUQfe2ZYAagmPpd3j0F6OT+/+2LMxPl4JmcDsV43+k9JgGGBHj284M TGKA== X-Gm-Message-State: AAQBX9dOd+5An+QfyQtMU8I/9zy9BJR1gGOz8blFEUIL9q0If5eycela I1KWIKmHvdGG3vGXVoze1LN+/8zdskU= X-Google-Smtp-Source: AKy350ZAiwHhmlimNWrndNmW7v+NzI1e8jrkXIbHi3JEOTpcDfOdc/5MEbD06TRRNtnlqtR7N9VHXsXBGBM= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a25:ab67:0:b0:b75:e15a:a91b with SMTP id u94-20020a25ab67000000b00b75e15aa91bmr5293109ybi.6.1680910382893; Fri, 07 Apr 2023 16:33:02 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:51 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-4-seanjc@google.com> Subject: [PATCH v4 3/6] KVM: selftests: Print detailed info in PMU event filter asserts From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Aaron Lewis Provide the actual vs. expected count in the PMU event filter test's asserts instead of relying on pr_info() to provide the context, e.g. so that all information needed to triage a failure is readily available even if the environment in which the test is run captures only the assert itself. Signed-off-by: Aaron Lewis [sean: rewrite changelog] Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 9b53e02a0565..ef07aaca2168 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -246,18 +246,17 @@ static struct kvm_pmu_event_filter *remove_event(stru= ct kvm_pmu_event_filter *f, =20 #define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ do { \ - if (count !=3D NUM_BRANCHES) \ + if (count && count !=3D NUM_BRANCHES) \ pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", \ __func__, count, NUM_BRANCHES); \ - TEST_ASSERT(count, "Allowed PMU event is not counting."); \ + TEST_ASSERT(count, "%s: Branch instructions retired =3D %lu (expected > 0= )", \ + __func__, count); \ } while (0) =20 #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ do { \ - if (count) \ - pr_info("%s: Branch instructions retired =3D %lu (expected 0)\n", \ - __func__, count); \ - TEST_ASSERT(!count, "Disallowed PMU Event is counting"); \ + TEST_ASSERT(!count, "%s: Branch instructions retired =3D %lu (expected 0)= ", \ + __func__, count); \ } while (0) =20 static void test_without_filter(struct kvm_vcpu *vcpu) --=20 2.40.0.577.gac1e443424-goog From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7E241C77B6E for ; Fri, 7 Apr 2023 23:33:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230207AbjDGXdN (ORCPT ); Fri, 7 Apr 2023 19:33:13 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55688 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230079AbjDGXdG (ORCPT ); Fri, 7 Apr 2023 19:33:06 -0400 Received: from mail-pf1-x449.google.com (mail-pf1-x449.google.com [IPv6:2607:f8b0:4864:20::449]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 421C5E1B1 for ; Fri, 7 Apr 2023 16:33:05 -0700 (PDT) Received: by mail-pf1-x449.google.com with SMTP id d2e1a72fcca58-62e42058bd1so234346b3a.2 for ; Fri, 07 Apr 2023 16:33:05 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910384; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=aiOj1F/qQQct1lZ8XuhvK+i0Mw9vSBJh2jkcfeFretc=; b=ZRkMTRpoMrC9Hdpa8inRWs/7begZ7CFFlAol9azLBfjJCRngGsB2a3j9Sr2Z4s0ptR 6VxJ2Xs6bvq+xqsldOgxRepFSaCYMVv/FLGYoe3d2hPb2SNUmou+fjrXcVMmqiG/emga qBVdyv6WDZ6Som3B37vzNL12p7IiaXe8CLY+KeGIylh+OAaf+2+b9VS74yu7ytOEOngK dmoy6jkqhRKGudokmhhD+2590KWeybu9jy9VlFmuSImXIRjlEepjXKaY9yrqWMp5/+X/ 251urdxai7jlN/smwBFL2M+qYBEpOR+zKU1jwW89vl4RE2nI8p16EldtSJ5bPeNP/+eD ddkw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910384; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=aiOj1F/qQQct1lZ8XuhvK+i0Mw9vSBJh2jkcfeFretc=; b=WpqOssa1skrde4/TyWeuvt/6HbdAspADmtL2EyHhl1G0GxFA5Re+9jL/FougQRapTJ GEFwkgATNx2TSYKwqpnWbfb7y7SwVnfcyh40D4zbTcXWr9pvEOKz2p8yqdFHIjNEi7HK nKX52sK6GtaXpf/CV7WKScb22mU3V7nK0K6MH2dlAAl6GRFWWopEElJ9e5CdQMg9SKg3 SlYDkngLaUvbRZmw39sOo39gas4WGSK7xqNLSFELqYLc570y35kOvvd9HlK4CMTZsUmv XLF0HyvgHrIsaJCVTwgyy4Ssgm48Jj+M4hZpOPU9d/VoNRD8NT1JDcgEBGYw3FiOrizQ zv7A== X-Gm-Message-State: AAQBX9d9Jjbx1VrRKTPH5+P2yb/asHCI66anIcz/LCo33Hf4VH2R+ClD U7882ICd7zKjk477dMxak/lZmNgKjGg= X-Google-Smtp-Source: AKy350aPgAgV1JOAuyPtSRiJ5zGS5nD6D/bTqwqihHu2VYHBZhARPOJtOViZlIiGbq+rvfpJ4f0TxE92do0= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a05:6a00:2182:b0:628:123c:99be with SMTP id h2-20020a056a00218200b00628123c99bemr1877157pfi.2.1680910384747; Fri, 07 Apr 2023 16:33:04 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:52 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-5-seanjc@google.com> Subject: [PATCH v4 4/6] KVM: selftests: Use error codes to signal errors in PMU event filter test From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Use '0' to signal success and '-errno' to signal failure in the PMU event filter test so that the values are slightly less magical/arbitrary. Using '0' in the error paths is especially confusing as understanding it's an error value requires following the breadcrumbs to the host code that ultimately consumes the value. Arguably there should also be a #define for "success", but 0/-errno is a common enough pattern that defining another macro on top would likely do more harm than good. Signed-off-by: Sean Christopherson --- .../selftests/kvm/x86_64/pmu_event_filter_test.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index ef07aaca2168..0432ba347b22 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -77,7 +77,7 @@ static const uint64_t event_list[] =3D { */ static void guest_gp_handler(struct ex_regs *regs) { - GUEST_SYNC(0); + GUEST_SYNC(-EFAULT); } =20 /* @@ -92,12 +92,12 @@ static void check_msr(uint32_t msr, uint64_t bits_to_fl= ip) =20 wrmsr(msr, v); if (rdmsr(msr) !=3D v) - GUEST_SYNC(0); + GUEST_SYNC(-EIO); =20 v ^=3D bits_to_flip; wrmsr(msr, v); if (rdmsr(msr) !=3D v) - GUEST_SYNC(0); + GUEST_SYNC(-EIO); } =20 static uint64_t run_and_measure_loop(uint32_t msr_base) @@ -114,7 +114,7 @@ static void intel_guest_code(void) check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1); check_msr(MSR_P6_EVNTSEL0, 0xffff); check_msr(MSR_IA32_PMC0, 0xffff); - GUEST_SYNC(1); + GUEST_SYNC(0); =20 for (;;) { uint64_t count; @@ -138,7 +138,7 @@ static void amd_guest_code(void) { check_msr(MSR_K7_EVNTSEL0, 0xffff); check_msr(MSR_K7_PERFCTR0, 0xffff); - GUEST_SYNC(1); + GUEST_SYNC(0); =20 for (;;) { uint64_t count; @@ -178,13 +178,13 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcp= u) */ static bool sanity_check_pmu(struct kvm_vcpu *vcpu) { - bool success; + uint64_t r; =20 vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler); - success =3D run_vcpu_to_sync(vcpu); + r =3D run_vcpu_to_sync(vcpu); vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL); =20 - return success; + return !r; } =20 static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevent= s) --=20 2.40.0.577.gac1e443424-goog From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8153AC76196 for ; Fri, 7 Apr 2023 23:33:17 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230098AbjDGXdQ (ORCPT ); Fri, 7 Apr 2023 19:33:16 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55690 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230102AbjDGXdI (ORCPT ); Fri, 7 Apr 2023 19:33:08 -0400 Received: from mail-pg1-x549.google.com (mail-pg1-x549.google.com [IPv6:2607:f8b0:4864:20::549]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1538CE192 for ; Fri, 7 Apr 2023 16:33:07 -0700 (PDT) Received: by mail-pg1-x549.google.com with SMTP id e193-20020a6369ca000000b00513f15ffaceso5198102pgc.12 for ; Fri, 07 Apr 2023 16:33:07 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910386; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=scyiM5JietKX432Rv/g9zTfwsV+CmtnlW20m9otH4Hc=; b=mIOrqOHV5ql36umbkmioOg/5MERf77KpCS8SMMNlsDGBFlC6b5gayjaR/QlidX6hEl 1AuKMXnsZ7jdifAVR+HQ0wCytqANt5XlGU7TnUMhLqA7mvTKtaHW2QIkAUaC06VPTjAC ZptJJh4g3RjfoC7pbhhq4UBT9wFDxoSqsZf7X1vbvkls0wu9HvHWJ9BsUcwIgyW8x4Rm k9eN83ve9RSiVCwAb5kOkIynSZGBdcR3Z1Nh+gdy/DKrMpDnL9GIECy9rZQOkZANi5Vo cmdsLgfCRXMVpJnqvIMdp4YwJzmjhD8rE2DdoGAtPr0PvtLriS+nbq/eFBeqTaGJzNnO gZpg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910386; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=scyiM5JietKX432Rv/g9zTfwsV+CmtnlW20m9otH4Hc=; b=2lGSeTyzIQ0G2e61XBrFx0TwWjKWXl/w/rbxR9FQ3hzpHknTFVQd0VR47oeaQJZB4Z Nh2wHvR34J7a9jRpIpv1b0X2EOp7IfvQyVl3pxXtce6h600KNckpmC8d3ccuhPKqFfFi FvVnsQFfEL6tekItLHFwQjsWSkeXQ6jE5X99AyCNlvgsKr+EY65Z7WX5/NcjHpguV0Bn M0vKonh3SnFyZmnA8mkPSUMObPoSXN/99w7ngtQA0JYKHcIL5cfE6g2pkPu5aQwJAV1v nABH2kLxJNMEO1j/8/kRIuG5F5k6nPTmlYEieeIifkxvMU1JvrNEAt4zM5U2gwJf1AKf QNmw== X-Gm-Message-State: AAQBX9cNa5CD2nIwPbFEkxlaSsa1LtRfeon/QU/827veDvay2E6Nv8lP vu6J/glmwDIaH7CSPkHSXKqR5h9yVEY= X-Google-Smtp-Source: AKy350Z/7F1CKXl+O9g+6FIqIBw6R/U9SAzn0TgAWeqim5ip027qrweJGcB3qpNPDP3ntzAa9dLz/LewNRo= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a17:902:e552:b0:1a0:41ea:b9ba with SMTP id n18-20020a170902e55200b001a041eab9bamr1273410plf.8.1680910386608; Fri, 07 Apr 2023 16:33:06 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:53 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-6-seanjc@google.com> Subject: [PATCH v4 5/6] KVM: selftests: Copy full counter values from guest in PMU event filter test From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Use a single struct to track all PMC event counts in the PMU filter test, and copy the full struct to/from the guest when running and measuring each guest workload. Using a common struct avoids naming conflicts, e.g. the loads/stores testcase has claimed "perf_counter", and eliminates the unnecessary truncation of the counter values when they are propagated from the guest MSRs to the host structs. Zero the struct before running the guest workload to ensure that the test doesn't get a false pass due to consuming data from a previous run. Signed-off-by: Sean Christopherson --- .../kvm/x86_64/pmu_event_filter_test.c | 170 +++++++++--------- 1 file changed, 80 insertions(+), 90 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 0432ba347b22..5112aece3f95 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -71,6 +71,13 @@ static const uint64_t event_list[] =3D { AMD_ZEN_BR_RETIRED, }; =20 +struct { + uint64_t loads; + uint64_t stores; + uint64_t loads_stores; + uint64_t branches_retired; +} pmc_results; + /* * If we encounter a #GP during the guest PMU sanity check, then the guest * PMU is not functional. Inform the hypervisor via GUEST_SYNC(0). @@ -100,13 +107,13 @@ static void check_msr(uint32_t msr, uint64_t bits_to_= flip) GUEST_SYNC(-EIO); } =20 -static uint64_t run_and_measure_loop(uint32_t msr_base) +static void run_and_measure_loop(uint32_t msr_base) { - uint64_t branches_retired =3D rdmsr(msr_base + 0); + const uint64_t branches_retired =3D rdmsr(msr_base + 0); =20 __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); =20 - return rdmsr(msr_base + 0) - branches_retired; + pmc_results.branches_retired =3D rdmsr(msr_base + 0) - branches_retired; } =20 static void intel_guest_code(void) @@ -117,15 +124,13 @@ static void intel_guest_code(void) GUEST_SYNC(0); =20 for (;;) { - uint64_t count; - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); =20 - count =3D run_and_measure_loop(MSR_IA32_PMC0); - GUEST_SYNC(count); + run_and_measure_loop(MSR_IA32_PMC0); + GUEST_SYNC(0); } } =20 @@ -141,14 +146,12 @@ static void amd_guest_code(void) GUEST_SYNC(0); =20 for (;;) { - uint64_t count; - wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); =20 - count =3D run_and_measure_loop(MSR_K7_PERFCTR0); - GUEST_SYNC(count); + run_and_measure_loop(MSR_K7_PERFCTR0); + GUEST_SYNC(0); } } =20 @@ -168,6 +171,19 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) return uc.args[1]; } =20 +static void run_vcpu_and_sync_pmc_results(struct kvm_vcpu *vcpu) +{ + uint64_t r; + + memset(&pmc_results, 0, sizeof(pmc_results)); + sync_global_to_guest(vcpu->vm, pmc_results); + + r =3D run_vcpu_to_sync(vcpu); + TEST_ASSERT(!r, "Unexpected sync value: 0x%lx", r); + + sync_global_from_guest(vcpu->vm, pmc_results); +} + /* * In a nested environment or if the vPMU is disabled, the guest PMU * might not work as architected (accessing the PMU MSRs may raise @@ -244,92 +260,93 @@ static struct kvm_pmu_event_filter *remove_event(stru= ct kvm_pmu_event_filter *f, return f; } =20 -#define ASSERT_PMC_COUNTING_INSTRUCTIONS(count) \ +#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ - if (count && count !=3D NUM_BRANCHES) \ + uint64_t br =3D pmc_results.branches_retired; \ + \ + if (br && br !=3D NUM_BRANCHES) \ pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", \ - __func__, count, NUM_BRANCHES); \ - TEST_ASSERT(count, "%s: Branch instructions retired =3D %lu (expected > 0= )", \ - __func__, count); \ + __func__, br, NUM_BRANCHES); \ + TEST_ASSERT(br, "%s: Branch instructions retired =3D %lu (expected > 0)",= \ + __func__, br); \ } while (0) =20 -#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count) \ +#define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ - TEST_ASSERT(!count, "%s: Branch instructions retired =3D %lu (expected 0)= ", \ - __func__, count); \ + uint64_t br =3D pmc_results.branches_retired; \ + \ + TEST_ASSERT(!br, "%s: Branch instructions retired =3D %lu (expected 0)", = \ + __func__, br); \ } while (0) =20 static void test_without_filter(struct kvm_vcpu *vcpu) { - uint64_t count =3D run_vcpu_to_sync(vcpu); + run_vcpu_and_sync_pmc_results(vcpu); =20 - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } =20 -static uint64_t test_with_filter(struct kvm_vcpu *vcpu, - struct kvm_pmu_event_filter *f) +static void test_with_filter(struct kvm_vcpu *vcpu, + struct kvm_pmu_event_filter *f) { vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); - return run_vcpu_to_sync(vcpu); + run_vcpu_and_sync_pmc_results(vcpu); } =20 static void test_amd_deny_list(struct kvm_vcpu *vcpu) { uint64_t event =3D EVENT(0x1C2, 0); struct kvm_pmu_event_filter *f; - uint64_t count; =20 f =3D create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); - count =3D test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); =20 - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } =20 static void test_member_deny_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_DENY); - uint64_t count =3D test_with_filter(vcpu, f); =20 + test_with_filter(vcpu, f); free(f); =20 - ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } =20 static void test_member_allow_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_ALLOW); - uint64_t count =3D test_with_filter(vcpu, f); =20 + test_with_filter(vcpu, f); free(f); =20 - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } =20 static void test_not_member_deny_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_DENY); - uint64_t count; =20 remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); - count =3D test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); =20 - ASSERT_PMC_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_COUNTING_INSTRUCTIONS(); } =20 static void test_not_member_allow_list(struct kvm_vcpu *vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_ALLOW); - uint64_t count; =20 remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); - count =3D test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); =20 - ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(count); + ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS(); } =20 /* @@ -458,51 +475,30 @@ static bool supports_event_mem_inst_retired(void) #define EXCLUDE_MASKED_ENTRY(event_select, mask, match) \ KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, true) =20 -struct perf_counter { - union { - uint64_t raw; - struct { - uint64_t loads:22; - uint64_t stores:22; - uint64_t loads_stores:20; - }; - }; -}; - -static uint64_t masked_events_guest_test(uint32_t msr_base) +static void masked_events_guest_test(uint32_t msr_base) { - uint64_t ld0, ld1, st0, st1, ls0, ls1; - struct perf_counter c; - int val; - /* - * The acutal value of the counters don't determine the outcome of + * The actual value of the counters don't determine the outcome of * the test. Only that they are zero or non-zero. */ - ld0 =3D rdmsr(msr_base + 0); - st0 =3D rdmsr(msr_base + 1); - ls0 =3D rdmsr(msr_base + 2); + const uint64_t loads =3D rdmsr(msr_base + 0); + const uint64_t stores =3D rdmsr(msr_base + 1); + const uint64_t loads_stores =3D rdmsr(msr_base + 2); + int val; + =20 __asm__ __volatile__("movl $0, %[v];" "movl %[v], %%eax;" "incl %[v];" : [v]"+m"(val) :: "eax"); =20 - ld1 =3D rdmsr(msr_base + 0); - st1 =3D rdmsr(msr_base + 1); - ls1 =3D rdmsr(msr_base + 2); - - c.loads =3D ld1 - ld0; - c.stores =3D st1 - st0; - c.loads_stores =3D ls1 - ls0; - - return c.raw; + pmc_results.loads =3D rdmsr(msr_base + 0) - loads; + pmc_results.stores =3D rdmsr(msr_base + 1) - stores; + pmc_results.loads_stores =3D rdmsr(msr_base + 2) - loads_stores; } =20 static void intel_masked_events_guest_code(void) { - uint64_t r; - for (;;) { wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); =20 @@ -515,16 +511,13 @@ static void intel_masked_events_guest_code(void) =20 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x7); =20 - r =3D masked_events_guest_test(MSR_IA32_PMC0); - - GUEST_SYNC(r); + masked_events_guest_test(MSR_IA32_PMC0); + GUEST_SYNC(0); } } =20 static void amd_masked_events_guest_code(void) { - uint64_t r; - for (;;) { wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL1, 0); @@ -537,26 +530,22 @@ static void amd_masked_events_guest_code(void) wrmsr(MSR_K7_EVNTSEL2, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | LS_DISPATCH_LOAD_STORE); =20 - r =3D masked_events_guest_test(MSR_K7_PERFCTR0); - - GUEST_SYNC(r); + masked_events_guest_test(MSR_K7_PERFCTR0); + GUEST_SYNC(0); } } =20 -static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu, - const uint64_t masked_events[], - const int nmasked_events) +static void run_masked_events_test(struct kvm_vcpu *vcpu, + const uint64_t masked_events[], + const int nmasked_events) { struct kvm_pmu_event_filter *f; - struct perf_counter r; =20 f =3D create_pmu_event_filter(masked_events, nmasked_events, KVM_PMU_EVENT_ALLOW, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); - r.raw =3D test_with_filter(vcpu, f); + test_with_filter(vcpu, f); free(f); - - return r; } =20 /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ @@ -681,7 +670,6 @@ static void run_masked_events_tests(struct kvm_vcpu *vc= pu, uint64_t *events, int nevents) { int ntests =3D ARRAY_SIZE(test_cases); - struct perf_counter c; int i, n; =20 for (i =3D 0; i < ntests; i++) { @@ -693,13 +681,15 @@ static void run_masked_events_tests(struct kvm_vcpu *= vcpu, uint64_t *events, =20 n =3D append_test_events(test, events, nevents); =20 - c =3D run_masked_events_test(vcpu, events, n); - TEST_ASSERT(bool_eq(c.loads, test->flags & ALLOW_LOADS) && - bool_eq(c.stores, test->flags & ALLOW_STORES) && - bool_eq(c.loads_stores, + run_masked_events_test(vcpu, events, n); + + TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) && + bool_eq(pmc_results.stores, test->flags & ALLOW_STORES) && + bool_eq(pmc_results.loads_stores, test->flags & ALLOW_LOADS_STORES), - "%s loads: %u, stores: %u, loads + stores: %u", - test->msg, c.loads, c.stores, c.loads_stores); + "%s loads: %lu, stores: %lu, loads + stores: %lu", + test->msg, pmc_results.loads, pmc_results.stores, + pmc_results.loads_stores); } } =20 --=20 2.40.0.577.gac1e443424-goog From nobody Wed Feb 11 14:04:38 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id F373FC76196 for ; Fri, 7 Apr 2023 23:33:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230285AbjDGXdT (ORCPT ); Fri, 7 Apr 2023 19:33:19 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55688 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230172AbjDGXdK (ORCPT ); Fri, 7 Apr 2023 19:33:10 -0400 Received: from mail-yw1-x114a.google.com (mail-yw1-x114a.google.com [IPv6:2607:f8b0:4864:20::114a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 20E1CE181 for ; Fri, 7 Apr 2023 16:33:09 -0700 (PDT) Received: by mail-yw1-x114a.google.com with SMTP id 00721157ae682-54c08e501d2so71530887b3.11 for ; Fri, 07 Apr 2023 16:33:09 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20210112; t=1680910388; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:from:to:cc:subject:date:message-id:reply-to; bh=LjYQi0+v1Gz3bnNu0ej5VXIh69X5CJjJWC5nav17cUU=; b=tLWkBtqU7eXFk3IWVSj+vF5W2EMxUvN0O93ECEFffu0DjOJq8QI03R1Djfx5xUtiBs 6eR8jx3EpY9ditBySK/apDVrJ4rxBBFZDOfGqCijsurno0CzuK4JBQ1aNPWuXTLuGHnG nmxmKtSoKXMmz9pFXxSIu4N6Y06A/8GqIzjeo19AOEkhukjkt9MRVh8Islta60ImPhUE f/cPq8FKg1ki91AXJ6JQw/y2qVVo3jinOrLXZi3/Ai4ghEkaMy/1a9YE2K5ZNoBQkb/b 2niLpe7IvL/laZMt1SNE1l0hc3jggzp+LMevymcgugam8b3aHEo5sRU1P+nnDEYR6J+t YOzQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1680910388; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:reply-to:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=LjYQi0+v1Gz3bnNu0ej5VXIh69X5CJjJWC5nav17cUU=; b=lTeLBPxu/qSJUdAVNWCjtYkfqrPudG+pSu7O9S1tRAdb145ai2WTyYAN7eQUN0Ph79 V0gEGG/qQKXOajVRly8ip6GuyPufaksrGq4lLMmegD1CutSzmXv6khqaFD9i5uMKjmrZ zHL9+UL7BDAdmPOZY1U3kHKFc8x+zYYluTv56zpBL+fCVFJ9jcWEgqDj5m4RXBxiBfec gRADagVLD5nCI7pVY8zQy7L22hOJCgZpGXSEsxaYUDn8lpPkhr5x7zgO1NL7N4gUHTto Vfzf/giuDrXsRZEU9/9FJNS5Zg+CR0tW+dllPP6/kx1FXRrBn0ziB4zd/gIOsJAHotF3 tXcw== X-Gm-Message-State: AAQBX9f4Uu4vlPW68PvBBwWLwzDnAxU2n/ozX9SSKgsUuOxrI1Pv0Vd5 JY9LdcQ/hOiS5znKqjQimuCf6Bk8ot8= X-Google-Smtp-Source: AKy350ZGIgvvzPtmg5NGdNmrgppTBIcVWejRFYC/Vp/S+jdoW6dCFo3d557DTiyaJ6BwtZEEUMFAvk3oBNs= X-Received: from zagreus.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5c37]) (user=seanjc job=sendgmr) by 2002:a25:cf49:0:b0:b8b:fca4:7454 with SMTP id f70-20020a25cf49000000b00b8bfca47454mr2051230ybg.4.1680910388316; Fri, 07 Apr 2023 16:33:08 -0700 (PDT) Reply-To: Sean Christopherson Date: Fri, 7 Apr 2023 16:32:54 -0700 In-Reply-To: <20230407233254.957013-1-seanjc@google.com> Mime-Version: 1.0 References: <20230407233254.957013-1-seanjc@google.com> X-Mailer: git-send-email 2.40.0.577.gac1e443424-goog Message-ID: <20230407233254.957013-7-seanjc@google.com> Subject: [PATCH v4 6/6] KVM: selftests: Test the PMU event "Instructions retired" From: Sean Christopherson To: Paolo Bonzini Cc: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Aaron Lewis , Sean Christopherson Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Aaron Lewis Add testing for the event "Instructions retired" (0xc0) in the PMU event filter on both Intel and AMD to ensure that the event doesn't count when it is disallowed. Unlike most of the other events, the event "Instructions retired" will be incremented by KVM when an instruction is emulated. Test that this case is being properly handled and that KVM doesn't increment the counter when that event is disallowed. Signed-off-by: Aaron Lewis Link: https://lore.kernel.org/r/20230307141400.1486314-6-aaronlewis@google.= com Signed-off-by: Sean Christopherson --- .../kvm/x86_64/pmu_event_filter_test.c | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/t= ools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 5112aece3f95..40507ed9fe8a 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -54,6 +54,21 @@ =20 #define AMD_ZEN_BR_RETIRED EVENT(0xc2, 0) =20 + +/* + * "Retired instructions", from Processor Programming Reference + * (PPR) for AMD Family 17h Model 01h, Revision B1 Processors, + * Preliminary Processor Programming Reference (PPR) for AMD Family + * 17h Model 31h, Revision B0 Processors, and Preliminary Processor + * Programming Reference (PPR) for AMD Family 19h Model 01h, Revision + * B1 Processors Volume 1 of 2. + * --- and --- + * "Instructions retired", from the Intel SDM, volume 3, + * "Pre-defined Architectural Performance Events." + */ + +#define INST_RETIRED EVENT(0xc0, 0) + /* * This event list comprises Intel's eight architectural events plus * AMD's "retired branch instructions" for Zen[123] (and possibly @@ -61,7 +76,7 @@ */ static const uint64_t event_list[] =3D { EVENT(0x3c, 0), - EVENT(0xc0, 0), + INST_RETIRED, EVENT(0x3c, 1), EVENT(0x2e, 0x4f), EVENT(0x2e, 0x41), @@ -76,6 +91,7 @@ struct { uint64_t stores; uint64_t loads_stores; uint64_t branches_retired; + uint64_t instructions_retired; } pmc_results; =20 /* @@ -110,10 +126,12 @@ static void check_msr(uint32_t msr, uint64_t bits_to_= flip) static void run_and_measure_loop(uint32_t msr_base) { const uint64_t branches_retired =3D rdmsr(msr_base + 0); + const uint64_t insn_retired =3D rdmsr(msr_base + 1); =20 __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); =20 pmc_results.branches_retired =3D rdmsr(msr_base + 0) - branches_retired; + pmc_results.instructions_retired =3D rdmsr(msr_base + 1) - insn_retired; } =20 static void intel_guest_code(void) @@ -127,7 +145,9 @@ static void intel_guest_code(void) wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED); - wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1); + wrmsr(MSR_P6_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); =20 run_and_measure_loop(MSR_IA32_PMC0); GUEST_SYNC(0); @@ -149,6 +169,8 @@ static void amd_guest_code(void) wrmsr(MSR_K7_EVNTSEL0, 0); wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED); + wrmsr(MSR_K7_EVNTSEL1, ARCH_PERFMON_EVENTSEL_ENABLE | + ARCH_PERFMON_EVENTSEL_OS | INST_RETIRED); =20 run_and_measure_loop(MSR_K7_PERFCTR0); GUEST_SYNC(0); @@ -263,20 +285,26 @@ static struct kvm_pmu_event_filter *remove_event(stru= ct kvm_pmu_event_filter *f, #define ASSERT_PMC_COUNTING_INSTRUCTIONS() \ do { \ uint64_t br =3D pmc_results.branches_retired; \ + uint64_t ir =3D pmc_results.instructions_retired; \ \ if (br && br !=3D NUM_BRANCHES) \ pr_info("%s: Branch instructions retired =3D %lu (expected %u)\n", \ __func__, br, NUM_BRANCHES); \ TEST_ASSERT(br, "%s: Branch instructions retired =3D %lu (expected > 0)",= \ __func__, br); \ + TEST_ASSERT(ir, "%s: Instructions retired =3D %lu (expected > 0)", \ + __func__, ir); \ } while (0) =20 #define ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS() \ do { \ uint64_t br =3D pmc_results.branches_retired; \ + uint64_t ir =3D pmc_results.instructions_retired; \ \ TEST_ASSERT(!br, "%s: Branch instructions retired =3D %lu (expected 0)", = \ __func__, br); \ + TEST_ASSERT(!ir, "%s: Instructions retired =3D %lu (expected 0)", \ + __func__, ir); \ } while (0) =20 static void test_without_filter(struct kvm_vcpu *vcpu) @@ -329,6 +357,7 @@ static void test_not_member_deny_list(struct kvm_vcpu *= vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_DENY); =20 + remove_event(f, INST_RETIRED); remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); test_with_filter(vcpu, f); @@ -341,6 +370,7 @@ static void test_not_member_allow_list(struct kvm_vcpu = *vcpu) { struct kvm_pmu_event_filter *f =3D event_filter(KVM_PMU_EVENT_ALLOW); =20 + remove_event(f, INST_RETIRED); remove_event(f, INTEL_BR_RETIRED); remove_event(f, AMD_ZEN_BR_RETIRED); test_with_filter(vcpu, f); --=20 2.40.0.577.gac1e443424-goog