From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8A52FC433F5 for ; Fri, 30 Sep 2022 20:21:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232584AbiI3UVc (ORCPT ); Fri, 30 Sep 2022 16:21:32 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55284 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232349AbiI3UV3 (ORCPT ); Fri, 30 Sep 2022 16:21:29 -0400 Received: from mail-pl1-x62d.google.com (mail-pl1-x62d.google.com [IPv6:2607:f8b0:4864:20::62d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D0A7E16F870; Fri, 30 Sep 2022 13:21:24 -0700 (PDT) Received: by mail-pl1-x62d.google.com with SMTP id b2so6106plc.7; Fri, 30 Sep 2022 13:21:24 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=gaKVcdg7nG01aH0xPvFxnu6z2t5ABFSyWMEd87RCeYs=; b=YCCod0UwySnssFIEfMtfvjP10JCgu8WMu8rtHlIuVYORIuX5YEIVZiq2nrroAn/CUK nbpBVrQO4R2uqPx67/kOKoOkZ0wC3rhPwka962/4gvm65eFuI909gg/gG15YepQkYmj0 FoyXyd8QiMlvGf+QRP2XoRz3Hrz31R36DFJdLPUZVS9E9G3McpYCjKALo7cYpG3jPvxu 9Esti7f56mmZOmgvxi8EibcsfEVP41Fju8zZtKS8iJ0Rrn9ifyBJE+81P91Sl9BvE4M1 J7NciwfV9+qvMiXnFnugXzTpF05ClkqPGXxewq1j8J3M3hJfOoCSjnhHt5cZ6/H+MEfJ Xr6w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=gaKVcdg7nG01aH0xPvFxnu6z2t5ABFSyWMEd87RCeYs=; b=0sYGUmiy2DiZQPxqBk2Wz2sHkIMpnx6VAafgRfIDCkQwiXmKDZPkiBRB8fDw3vfpHM 12X5QSjRS8oJLMxGP2g490noS6XCzhIZEg7r4N0KpXEA48fEAqNU8yzEZqQgCO6Cfzrw GtVj2V2mdQA6uh27XV98GSVHpnnlcBQ26XDP985eWRc9pf2LvtlugbWo2C4gTYo+9ZdH pUX/yGtfTb091BKV4xC/Ahwp7Yf9LNmr7GXMCZr73BwJ8l9aXjBU0gkRj7pnv4v+e0C7 QqoaBJBLyUnyf1ZqvkJn4yQJ/955DjM/d/fgC/od5IfT2UPAx6IyDnExHZIMwpCYOlVg eo0w== X-Gm-Message-State: ACrzQf3vN+eHFefA0p6qqcJuC+qUYHfJDqsk8bJKU1SE821NDBrMMQPS GWzMr4PhyLH/afYjruxBfl0= X-Google-Smtp-Source: AMsMyM5OpnGXwipU9JFa2X6RaYbHbXEoJQlgfwdlAVFhMgbV1ny+0nVSMXasBQKPZuCGL0PrUtsLlA== X-Received: by 2002:a17:902:9b88:b0:17c:97d6:5e78 with SMTP id y8-20020a1709029b8800b0017c97d65e78mr4074655plp.62.1664569282769; Fri, 30 Sep 2022 13:21:22 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.21 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:22 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 1/7] perf stat: Convert perf_stat_evsel.res_stats array Date: Fri, 30 Sep 2022 13:21:04 -0700 Message-Id: <20220930202110.845199-2-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" It uses only one member, no need to have it as an array. Reviewed-by: James Clark Signed-off-by: Namhyung Kim --- tools/perf/util/stat-display.c | 2 +- tools/perf/util/stat.c | 10 +++------- tools/perf/util/stat.h | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index b82844cb0ce7..234491f43c36 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -67,7 +67,7 @@ static void print_noise(struct perf_stat_config *config, return; =20 ps =3D evsel->stats; - print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg); + print_noise_pct(config, stddev_stats(&ps->res_stats), avg); } =20 static void print_cgroup(struct perf_stat_config *config, struct evsel *ev= sel) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ce5e9e372fc4..6bcd3dc32a71 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -132,12 +132,9 @@ static void perf_stat_evsel_id_init(struct evsel *evse= l) =20 static void evsel__reset_stat_priv(struct evsel *evsel) { - int i; struct perf_stat_evsel *ps =3D evsel->stats; =20 - for (i =3D 0; i < 3; i++) - init_stats(&ps->res_stats[i]); - + init_stats(&ps->res_stats); perf_stat_evsel_id_init(evsel); } =20 @@ -440,7 +437,7 @@ int perf_stat_process_counter(struct perf_stat_config *= config, struct perf_counts_values *aggr =3D &counter->counts->aggr; struct perf_stat_evsel *ps =3D counter->stats; u64 *count =3D counter->counts->aggr.values; - int i, ret; + int ret; =20 aggr->val =3D aggr->ena =3D aggr->run =3D 0; =20 @@ -458,8 +455,7 @@ int perf_stat_process_counter(struct perf_stat_config *= config, evsel__compute_deltas(counter, -1, -1, aggr); perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); =20 - for (i =3D 0; i < 3; i++) - update_stats(&ps->res_stats[i], count[i]); + update_stats(&ps->res_stats, *count); =20 if (verbose > 0) { fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 72713b344b79..3eba38a1a149 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -43,7 +43,7 @@ enum perf_stat_evsel_id { }; =20 struct perf_stat_evsel { - struct stats res_stats[3]; + struct stats res_stats; enum perf_stat_evsel_id id; u64 *group_data; }; --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B546CC43217 for ; Fri, 30 Sep 2022 20:21:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232642AbiI3UVj (ORCPT ); Fri, 30 Sep 2022 16:21:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55496 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232573AbiI3UVc (ORCPT ); Fri, 30 Sep 2022 16:21:32 -0400 Received: from mail-pj1-x102f.google.com (mail-pj1-x102f.google.com [IPv6:2607:f8b0:4864:20::102f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B0DBC169E67; Fri, 30 Sep 2022 13:21:25 -0700 (PDT) Received: by mail-pj1-x102f.google.com with SMTP id l1-20020a17090a72c100b0020a6949a66aso654526pjk.1; Fri, 30 Sep 2022 13:21:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=EumHp6TLGBfFK/IXQi0XQcmxp+3vp9b8V9xf/+XBZzo=; b=eb2aMmqxUWWIevgv0urAloBKPje/XQNVxtuwz3gBbrA0wZDvcJQUFEIvb3Nw5FgHVw ImS+9OABoBHHLZOhWejBW1c9zBqN97hIlntUWE6IFQhhAe6j3KT2xSI/sfBFUmPsrN80 2CwWS5jicQwzlDUa6vnHbx5Uq3N2NwbkaqqxFex3ZvNVYQ4DukrrKWvHy78YpEXJquSp VtWj1f/1nGzU6wPS8l5BAo9NliFLJ4bYfq2W461q0YpTmPQ8QON9b5tM3F2qX+wIzugt RV4QYMuGcozrzF3601AgbmOl6Ug1sEYKROyyDfhtktzONa63ho7I3IAnPYp6PYz6TeLX ujYg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=EumHp6TLGBfFK/IXQi0XQcmxp+3vp9b8V9xf/+XBZzo=; b=vKEfzVAtP7BxI9M/bbDv4b9AQBblFUyw5zyV56c28Q+rU2W+0+ESoaWEKW3r6a1pft 74vcufZvRIjuwpzqndb510+K3wBN9HJ8bgxgr0I2IDNExHHUB2A1gVAOs83z+nNk8frg 1J74lnIvAcnLH7rgk1H3OFwJbqc72oDUOyQx5NdsGipqkp+AqLhOgYxRXB67GO2M2erR zuwTOK0k3rHjziD5Y1PmzOgpdlE1SIXc4CCgQc3QUKqbjqxyvRVd2jH6dM0xC+vlFvkR PvxKDkzJbwMz1ZhfcXY7K7poofmZmGvubbe3NY/0fsfiu1PmLsVU61Rmufe2Rn3h8DIA ojpA== X-Gm-Message-State: ACrzQf3PViL+cFuaN2Bs8LIBHCHBMLGUmfRO7Nk9NVetpHWDahDr1Sk6 jkUFsorkZgFkCUeoKiwtheU= X-Google-Smtp-Source: AMsMyM4YKeAQMm9yJ3xvbG2PdKlH4wDwkq42WCpaY0GydIJvuBzFeyOym1PHw8yygzEp9TwQmt98Jw== X-Received: by 2002:a17:902:8693:b0:17a:f71:98fd with SMTP id g19-20020a170902869300b0017a0f7198fdmr10672350plo.25.1664569284066; Fri, 30 Sep 2022 13:21:24 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.22 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:23 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 2/7] perf stat: Don't call perf_stat_evsel_id_init() repeatedly Date: Fri, 30 Sep 2022 13:21:05 -0700 Message-Id: <20220930202110.845199-3-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The evsel__reset_stat_priv() is called more than once if user gave -r option for multiple run. But it doesn't need to re-initialize the id. Reviewed-by: James Clark Signed-off-by: Namhyung Kim --- tools/perf/util/stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 6bcd3dc32a71..e1d3152ce664 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -135,7 +135,6 @@ static void evsel__reset_stat_priv(struct evsel *evsel) struct perf_stat_evsel *ps =3D evsel->stats; =20 init_stats(&ps->res_stats); - perf_stat_evsel_id_init(evsel); } =20 static int evsel__alloc_stat_priv(struct evsel *evsel) @@ -143,6 +142,7 @@ static int evsel__alloc_stat_priv(struct evsel *evsel) evsel->stats =3D zalloc(sizeof(struct perf_stat_evsel)); if (evsel->stats =3D=3D NULL) return -ENOMEM; + perf_stat_evsel_id_init(evsel); evsel__reset_stat_priv(evsel); return 0; } --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8E633C4332F for ; Fri, 30 Sep 2022 20:21:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232700AbiI3UVp (ORCPT ); Fri, 30 Sep 2022 16:21:45 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55630 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232589AbiI3UVd (ORCPT ); Fri, 30 Sep 2022 16:21:33 -0400 Received: from mail-pg1-x52e.google.com (mail-pg1-x52e.google.com [IPv6:2607:f8b0:4864:20::52e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0676D178A07; Fri, 30 Sep 2022 13:21:26 -0700 (PDT) Received: by mail-pg1-x52e.google.com with SMTP id bh13so5060898pgb.4; Fri, 30 Sep 2022 13:21:26 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=RZE8i1NNxB8Rbp0z458YT/KCXAc6aKXWhySAkAuvoZw=; b=J9KGrHgHvquRIBixqaskRPFZjzpmXVRO6YOwLxaWcZe0r/PAVL1TOFkNkKlJqNofd+ mlFqT2yNibaBqmd4SfAViA7LK/tAo0U+P5EIQrG0gNE5ShmkbBWLQjU3LZ++jJgsjW93 UMZTK3xSKCsdW9BE+00AF2EL0Af/oJVMrEqEWwo55gTKL7b7rYCFv+tAvKmDPsj9hzID TE5Tux70ZdRHexNSFmyCD8jTbQVYBVMudlEdJQpuJBdy7GtgFRMY4Riw0bHgDOYhW3HQ QdGVLR09czV+slNplA+SgMH2aDH5S9PfLnR3vIpOAQ5+9oDm5iXdFfNgPNnpBdZgxo89 FBAg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=RZE8i1NNxB8Rbp0z458YT/KCXAc6aKXWhySAkAuvoZw=; b=30DzmxMz17N5LbAd71cLpF1xcY/pHiGZgkRMBtk0NbsRA2hT9sKWUjUWLyrskoVeEN JxPA7gfPx8yloNcHS48zuRlUfY5xP4m6q8sAmT4DEs8KHTLtJxx17aIMgDYvu2vqEyK5 FFjTnYyfHSu2Hh3PWTpf/xmG9LL09T81rDvb6HpxWFpaidQijqQD/KSuIlKMqHAd0+3C wtVDNe6BxaJpeYM0+BXr+jOWlqtCCpn7yi0gUu6tshXGFx2lx0uHEQHH/8Zl0d+pSm75 stkDmIelpY93Di2GplDQcYv8CRCfgoMZ67cSt7eHYxG/jK88F9OtUD85MEwvvRS/Dm5C 6SnA== X-Gm-Message-State: ACrzQf176++jBwUZbKONihyPy/zhMgzUbTU+vRonTWqDQ0UA0nZPt72M rlzU8G7o8QjHSlSgQM3zRbY= X-Google-Smtp-Source: AMsMyM4tE9rgxDbPjgt6zozwtXKRsmyE8kx3zv4NdVrsK6/UBGtM3facf4iJo0HuSm+6gEgpJ/kYAQ== X-Received: by 2002:a62:585:0:b0:55f:203a:1903 with SMTP id 127-20020a620585000000b0055f203a1903mr686082pff.11.1664569285437; Fri, 30 Sep 2022 13:21:25 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.24 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:25 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 3/7] perf stat: Rename saved_value->cpu_map_idx Date: Fri, 30 Sep 2022 13:21:06 -0700 Message-Id: <20220930202110.845199-4-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The cpu_map_idx fields is just to differentiate values from other entries. It doesn't need to be strictly cpu map index. Actually we can pass thread map index or aggr map index. So rename the fields first. No functional change intended. Signed-off-by: Namhyung Kim --- tools/perf/util/stat-shadow.c | 308 +++++++++++++++++----------------- tools/perf/util/stat.h | 6 +- 2 files changed, 157 insertions(+), 157 deletions(-) diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 9e1eddeff21b..945c40c10423 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -33,7 +33,7 @@ struct saved_value { struct evsel *evsel; enum stat_type type; int ctx; - int cpu_map_idx; + int map_idx; /* cpu map index */ struct cgroup *cgrp; struct runtime_stat *stat; struct stats stats; @@ -48,8 +48,8 @@ static int saved_value_cmp(struct rb_node *rb_node, const= void *entry) rb_node); const struct saved_value *b =3D entry; =20 - if (a->cpu_map_idx !=3D b->cpu_map_idx) - return a->cpu_map_idx - b->cpu_map_idx; + if (a->map_idx !=3D b->map_idx) + return a->map_idx - b->map_idx; =20 /* * Previously the rbtree was used to link generic metrics. @@ -106,7 +106,7 @@ static void saved_value_delete(struct rblist *rblist __= maybe_unused, } =20 static struct saved_value *saved_value_lookup(struct evsel *evsel, - int cpu_map_idx, + int map_idx, bool create, enum stat_type type, int ctx, @@ -116,7 +116,7 @@ static struct saved_value *saved_value_lookup(struct ev= sel *evsel, struct rblist *rblist; struct rb_node *nd; struct saved_value dm =3D { - .cpu_map_idx =3D cpu_map_idx, + .map_idx =3D map_idx, .evsel =3D evsel, .type =3D type, .ctx =3D ctx, @@ -215,10 +215,10 @@ struct runtime_stat_data { =20 static void update_runtime_stat(struct runtime_stat *st, enum stat_type type, - int cpu_map_idx, u64 count, + int map_idx, u64 count, struct runtime_stat_data *rsd) { - struct saved_value *v =3D saved_value_lookup(NULL, cpu_map_idx, true, typ= e, + struct saved_value *v =3D saved_value_lookup(NULL, map_idx, true, type, rsd->ctx, st, rsd->cgrp); =20 if (v) @@ -231,7 +231,7 @@ static void update_runtime_stat(struct runtime_stat *st, * instruction rates, etc: */ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, - int cpu_map_idx, struct runtime_stat *st) + int map_idx, struct runtime_stat *st) { u64 count_ns =3D count; struct saved_value *v; @@ -243,88 +243,88 @@ void perf_stat__update_shadow_stats(struct evsel *cou= nter, u64 count, count *=3D counter->scale; =20 if (evsel__is_clock(counter)) - update_runtime_stat(st, STAT_NSECS, cpu_map_idx, count_ns, &rsd); + update_runtime_stat(st, STAT_NSECS, map_idx, count_ns, &rsd); else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_runtime_stat(st, STAT_CYCLES, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_CYCLES, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) - update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_CYCLES_IN_TX, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TRANSACTION_START)) - update_runtime_stat(st, STAT_TRANSACTION, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_TRANSACTION, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, ELISION_START)) - update_runtime_stat(st, STAT_ELISION, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_ELISION, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING)) update_runtime_stat(st, STAT_TOPDOWN_RETIRING, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC)) update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS)) update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT)) update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT)) update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND)) update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) update_runtime_stat(st, STAT_STALLED_CYCLES_BACK, - cpu_map_idx, count, &rsd); + map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_runtime_stat(st, STAT_BRANCHES, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_BRANCHES, map_idx, count, &rsd); else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_runtime_stat(st, STAT_CACHEREFS, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_CACHEREFS, map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) - update_runtime_stat(st, STAT_L1_DCACHE, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_L1_DCACHE, map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) - update_runtime_stat(st, STAT_L1_ICACHE, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_L1_ICACHE, map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL)) - update_runtime_stat(st, STAT_LL_CACHE, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_LL_CACHE, map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) - update_runtime_stat(st, STAT_DTLB_CACHE, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_DTLB_CACHE, map_idx, count, &rsd); else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) - update_runtime_stat(st, STAT_ITLB_CACHE, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_ITLB_CACHE, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, SMI_NUM)) - update_runtime_stat(st, STAT_SMI_NUM, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_SMI_NUM, map_idx, count, &rsd); else if (perf_stat_evsel__is(counter, APERF)) - update_runtime_stat(st, STAT_APERF, cpu_map_idx, count, &rsd); + update_runtime_stat(st, STAT_APERF, map_idx, count, &rsd); =20 if (counter->collect_stat) { - v =3D saved_value_lookup(counter, cpu_map_idx, true, STAT_NONE, 0, st, + v =3D saved_value_lookup(counter, map_idx, true, STAT_NONE, 0, st, rsd.cgrp); update_stats(&v->stats, count); if (counter->metric_leader) v->metric_total +=3D count; } else if (counter->metric_leader) { v =3D saved_value_lookup(counter->metric_leader, - cpu_map_idx, true, STAT_NONE, 0, st, rsd.cgrp); + map_idx, true, STAT_NONE, 0, st, rsd.cgrp); v->metric_total +=3D count; v->metric_other++; } @@ -466,12 +466,12 @@ void perf_stat__collect_metric_expr(struct evlist *ev= sel_list) } =20 static double runtime_stat_avg(struct runtime_stat *st, - enum stat_type type, int cpu_map_idx, + enum stat_type type, int map_idx, struct runtime_stat_data *rsd) { struct saved_value *v; =20 - v =3D saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rs= d->cgrp); + v =3D saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->c= grp); if (!v) return 0.0; =20 @@ -479,12 +479,12 @@ static double runtime_stat_avg(struct runtime_stat *s= t, } =20 static double runtime_stat_n(struct runtime_stat *st, - enum stat_type type, int cpu_map_idx, + enum stat_type type, int map_idx, struct runtime_stat_data *rsd) { struct saved_value *v; =20 - v =3D saved_value_lookup(NULL, cpu_map_idx, false, type, rsd->ctx, st, rs= d->cgrp); + v =3D saved_value_lookup(NULL, map_idx, false, type, rsd->ctx, st, rsd->c= grp); if (!v) return 0.0; =20 @@ -492,7 +492,7 @@ static double runtime_stat_n(struct runtime_stat *st, } =20 static void print_stalled_cycles_frontend(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -500,7 +500,7 @@ static void print_stalled_cycles_frontend(struct perf_s= tat_config *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -515,7 +515,7 @@ static void print_stalled_cycles_frontend(struct perf_s= tat_config *config, } =20 static void print_stalled_cycles_backend(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -523,7 +523,7 @@ static void print_stalled_cycles_backend(struct perf_st= at_config *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -534,7 +534,7 @@ static void print_stalled_cycles_backend(struct perf_st= at_config *config, } =20 static void print_branch_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -542,7 +542,7 @@ static void print_branch_misses(struct perf_stat_config= *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_BRANCHES, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_BRANCHES, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -553,7 +553,7 @@ static void print_branch_misses(struct perf_stat_config= *config, } =20 static void print_l1_dcache_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -561,7 +561,7 @@ static void print_l1_dcache_misses(struct perf_stat_con= fig *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_L1_DCACHE, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_L1_DCACHE, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -572,7 +572,7 @@ static void print_l1_dcache_misses(struct perf_stat_con= fig *config, } =20 static void print_l1_icache_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -580,7 +580,7 @@ static void print_l1_icache_misses(struct perf_stat_con= fig *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_L1_ICACHE, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_L1_ICACHE, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -590,7 +590,7 @@ static void print_l1_icache_misses(struct perf_stat_con= fig *config, } =20 static void print_dtlb_cache_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -598,7 +598,7 @@ static void print_dtlb_cache_misses(struct perf_stat_co= nfig *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_DTLB_CACHE, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_DTLB_CACHE, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -608,7 +608,7 @@ static void print_dtlb_cache_misses(struct perf_stat_co= nfig *config, } =20 static void print_itlb_cache_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -616,7 +616,7 @@ static void print_itlb_cache_misses(struct perf_stat_co= nfig *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_ITLB_CACHE, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_ITLB_CACHE, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -626,7 +626,7 @@ static void print_itlb_cache_misses(struct perf_stat_co= nfig *config, } =20 static void print_ll_cache_misses(struct perf_stat_config *config, - int cpu_map_idx, double avg, + int map_idx, double avg, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -634,7 +634,7 @@ static void print_ll_cache_misses(struct perf_stat_conf= ig *config, double total, ratio =3D 0.0; const char *color; =20 - total =3D runtime_stat_avg(st, STAT_LL_CACHE, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_LL_CACHE, map_idx, rsd); =20 if (total) ratio =3D avg / total * 100.0; @@ -692,61 +692,61 @@ static double sanitize_val(double x) return x; } =20 -static double td_total_slots(int cpu_map_idx, struct runtime_stat *st, +static double td_total_slots(int map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { - return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu_map_idx, rsd); + return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, map_idx, rsd); } =20 -static double td_bad_spec(int cpu_map_idx, struct runtime_stat *st, +static double td_bad_spec(int map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double bad_spec =3D 0; double total_slots; double total; =20 - total =3D runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu_map_idx, rs= d) - - runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu_map_idx, rsd) + - runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu_map_idx, rsd); + total =3D runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, map_idx, rsd) - + runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, map_idx, rsd) + + runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, map_idx, rsd); =20 - total_slots =3D td_total_slots(cpu_map_idx, st, rsd); + total_slots =3D td_total_slots(map_idx, st, rsd); if (total_slots) bad_spec =3D total / total_slots; return sanitize_val(bad_spec); } =20 -static double td_retiring(int cpu_map_idx, struct runtime_stat *st, +static double td_retiring(int map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double retiring =3D 0; - double total_slots =3D td_total_slots(cpu_map_idx, st, rsd); + double total_slots =3D td_total_slots(map_idx, st, rsd); double ret_slots =3D runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, - cpu_map_idx, rsd); + map_idx, rsd); =20 if (total_slots) retiring =3D ret_slots / total_slots; return retiring; } =20 -static double td_fe_bound(int cpu_map_idx, struct runtime_stat *st, +static double td_fe_bound(int map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { double fe_bound =3D 0; - double total_slots =3D td_total_slots(cpu_map_idx, st, rsd); + double total_slots =3D td_total_slots(map_idx, st, rsd); double fetch_bub =3D runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES, - cpu_map_idx, rsd); + map_idx, rsd); =20 if (total_slots) fe_bound =3D fetch_bub / total_slots; return fe_bound; } =20 -static double td_be_bound(int cpu_map_idx, struct runtime_stat *st, +static double td_be_bound(int map_idx, struct runtime_stat *st, struct runtime_stat_data *rsd) { - double sum =3D (td_fe_bound(cpu_map_idx, st, rsd) + - td_bad_spec(cpu_map_idx, st, rsd) + - td_retiring(cpu_map_idx, st, rsd)); + double sum =3D (td_fe_bound(map_idx, st, rsd) + + td_bad_spec(map_idx, st, rsd) + + td_retiring(map_idx, st, rsd)); if (sum =3D=3D 0) return 0; return sanitize_val(1.0 - sum); @@ -757,15 +757,15 @@ static double td_be_bound(int cpu_map_idx, struct run= time_stat *st, * the ratios we need to recreate the sum. */ =20 -static double td_metric_ratio(int cpu_map_idx, enum stat_type type, +static double td_metric_ratio(int map_idx, enum stat_type type, struct runtime_stat *stat, struct runtime_stat_data *rsd) { - double sum =3D runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx,= rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) + - runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd); - double d =3D runtime_stat_avg(stat, type, cpu_map_idx, rsd); + double sum =3D runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd= ) + + runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) + + runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) + + runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd); + double d =3D runtime_stat_avg(stat, type, map_idx, rsd); =20 if (sum) return d / sum; @@ -777,23 +777,23 @@ static double td_metric_ratio(int cpu_map_idx, enum s= tat_type type, * We allow two missing. */ =20 -static bool full_td(int cpu_map_idx, struct runtime_stat *stat, +static bool full_td(int map_idx, struct runtime_stat *stat, struct runtime_stat_data *rsd) { int c =3D 0; =20 - if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu_map_idx, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu_map_idx, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu_map_idx, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, map_idx, rsd) > 0) c++; - if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu_map_idx, rsd) > 0) + if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, map_idx, rsd) > 0) c++; return c >=3D 2; } =20 -static void print_smi_cost(struct perf_stat_config *config, int cpu_map_id= x, +static void print_smi_cost(struct perf_stat_config *config, int map_idx, struct perf_stat_output_ctx *out, struct runtime_stat *st, struct runtime_stat_data *rsd) @@ -801,9 +801,9 @@ static void print_smi_cost(struct perf_stat_config *con= fig, int cpu_map_idx, double smi_num, aperf, cycles, cost =3D 0.0; const char *color =3D NULL; =20 - smi_num =3D runtime_stat_avg(st, STAT_SMI_NUM, cpu_map_idx, rsd); - aperf =3D runtime_stat_avg(st, STAT_APERF, cpu_map_idx, rsd); - cycles =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, rsd); + smi_num =3D runtime_stat_avg(st, STAT_SMI_NUM, map_idx, rsd); + aperf =3D runtime_stat_avg(st, STAT_APERF, map_idx, rsd); + cycles =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, rsd); =20 if ((cycles =3D=3D 0) || (aperf =3D=3D 0)) return; @@ -820,7 +820,7 @@ static void print_smi_cost(struct perf_stat_config *con= fig, int cpu_map_idx, static int prepare_metric(struct evsel **metric_events, struct metric_ref *metric_refs, struct expr_parse_ctx *pctx, - int cpu_map_idx, + int map_idx, struct runtime_stat *st) { double scale; @@ -859,7 +859,7 @@ static int prepare_metric(struct evsel **metric_events, abort(); } } else { - v =3D saved_value_lookup(metric_events[i], cpu_map_idx, false, + v =3D saved_value_lookup(metric_events[i], map_idx, false, STAT_NONE, 0, st, metric_events[i]->cgrp); if (!v) @@ -897,7 +897,7 @@ static void generic_metric(struct perf_stat_config *con= fig, const char *metric_name, const char *metric_unit, int runtime, - int cpu_map_idx, + int map_idx, struct perf_stat_output_ctx *out, struct runtime_stat *st) { @@ -915,7 +915,7 @@ static void generic_metric(struct perf_stat_config *con= fig, pctx->sctx.user_requested_cpu_list =3D strdup(config->user_requested_cpu= _list); pctx->sctx.runtime =3D runtime; pctx->sctx.system_wide =3D config->system_wide; - i =3D prepare_metric(metric_events, metric_refs, pctx, cpu_map_idx, st); + i =3D prepare_metric(metric_events, metric_refs, pctx, map_idx, st); if (i < 0) { expr__ctx_free(pctx); return; @@ -960,7 +960,7 @@ static void generic_metric(struct perf_stat_config *con= fig, expr__ctx_free(pctx); } =20 -double test_generic_metric(struct metric_expr *mexp, int cpu_map_idx, stru= ct runtime_stat *st) +double test_generic_metric(struct metric_expr *mexp, int map_idx, struct r= untime_stat *st) { struct expr_parse_ctx *pctx; double ratio =3D 0.0; @@ -969,7 +969,7 @@ double test_generic_metric(struct metric_expr *mexp, in= t cpu_map_idx, struct run if (!pctx) return NAN; =20 - if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, cpu_map_= idx, st) < 0) + if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, map_idx,= st) < 0) goto out; =20 if (expr__parse(&ratio, pctx, mexp->metric_expr)) @@ -982,7 +982,7 @@ double test_generic_metric(struct metric_expr *mexp, in= t cpu_map_idx, struct run =20 void perf_stat__print_shadow_stats(struct perf_stat_config *config, struct evsel *evsel, - double avg, int cpu_map_idx, + double avg, int map_idx, struct perf_stat_output_ctx *out, struct rblist *metric_events, struct runtime_stat *st) @@ -1001,7 +1001,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, if (config->iostat_run) { iostat_print_metric(config, evsel, out); } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { - total =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); =20 if (total) { ratio =3D avg / total; @@ -1011,11 +1011,11 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0); } =20 - total =3D runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu_map_idx, &= rsd); + total =3D runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, map_idx, &rsd); =20 total =3D max(total, runtime_stat_avg(st, STAT_STALLED_CYCLES_BACK, - cpu_map_idx, &rsd)); + map_idx, &rsd)); =20 if (total && avg) { out->new_line(config, ctxp); @@ -1025,8 +1025,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, ratio); } } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { - if (runtime_stat_n(st, STAT_BRANCHES, cpu_map_idx, &rsd) !=3D 0) - print_branch_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_BRANCHES, map_idx, &rsd) !=3D 0) + print_branch_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all branches", 0); } else if ( @@ -1035,8 +1035,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { =20 - if (runtime_stat_n(st, STAT_L1_DCACHE, cpu_map_idx, &rsd) !=3D 0) - print_l1_dcache_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_L1_DCACHE, map_idx, &rsd) !=3D 0) + print_l1_dcache_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0); } else if ( @@ -1045,8 +1045,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { =20 - if (runtime_stat_n(st, STAT_L1_ICACHE, cpu_map_idx, &rsd) !=3D 0) - print_l1_icache_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_L1_ICACHE, map_idx, &rsd) !=3D 0) + print_l1_icache_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0); } else if ( @@ -1055,8 +1055,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { =20 - if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu_map_idx, &rsd) !=3D 0) - print_dtlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_DTLB_CACHE, map_idx, &rsd) !=3D 0) + print_dtlb_cache_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0); } else if ( @@ -1065,8 +1065,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { =20 - if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu_map_idx, &rsd) !=3D 0) - print_itlb_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_ITLB_CACHE, map_idx, &rsd) !=3D 0) + print_itlb_cache_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0); } else if ( @@ -1075,27 +1075,27 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) { =20 - if (runtime_stat_n(st, STAT_LL_CACHE, cpu_map_idx, &rsd) !=3D 0) - print_ll_cache_misses(config, cpu_map_idx, avg, out, st, &rsd); + if (runtime_stat_n(st, STAT_LL_CACHE, map_idx, &rsd) !=3D 0) + print_ll_cache_misses(config, map_idx, avg, out, st, &rsd); else print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0); } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) { - total =3D runtime_stat_avg(st, STAT_CACHEREFS, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CACHEREFS, map_idx, &rsd); =20 if (total) ratio =3D avg * 100 / total; =20 - if (runtime_stat_n(st, STAT_CACHEREFS, cpu_map_idx, &rsd) !=3D 0) + if (runtime_stat_n(st, STAT_CACHEREFS, map_idx, &rsd) !=3D 0) print_metric(config, ctxp, NULL, "%8.3f %%", "of all cache refs", ratio); else print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0); } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { - print_stalled_cycles_frontend(config, cpu_map_idx, avg, out, st, &rsd); + print_stalled_cycles_frontend(config, map_idx, avg, out, st, &rsd); } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles_backend(config, cpu_map_idx, avg, out, st, &rsd); + print_stalled_cycles_backend(config, map_idx, avg, out, st, &rsd); } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { - total =3D runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd); =20 if (total) { ratio =3D avg / total; @@ -1104,7 +1104,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, print_metric(config, ctxp, NULL, NULL, "Ghz", 0); } } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) { - total =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); =20 if (total) print_metric(config, ctxp, NULL, @@ -1114,8 +1114,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, print_metric(config, ctxp, NULL, NULL, "transactional cycles", 0); } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) { - total =3D runtime_stat_avg(st, STAT_CYCLES, cpu_map_idx, &rsd); - total2 =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES, map_idx, &rsd); + total2 =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); =20 if (total2 < avg) total2 =3D avg; @@ -1125,19 +1125,19 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, else print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0); } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) { - total =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); =20 if (avg) ratio =3D total / avg; =20 - if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd) !=3D 0) + if (runtime_stat_n(st, STAT_CYCLES_IN_TX, map_idx, &rsd) !=3D 0) print_metric(config, ctxp, NULL, "%8.0f", "cycles / transaction", ratio); else print_metric(config, ctxp, NULL, NULL, "cycles / transaction", 0); } else if (perf_stat_evsel__is(evsel, ELISION_START)) { - total =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_CYCLES_IN_TX, map_idx, &rsd); =20 if (avg) ratio =3D total / avg; @@ -1150,28 +1150,28 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, else print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) { - double fe_bound =3D td_fe_bound(cpu_map_idx, st, &rsd); + double fe_bound =3D td_fe_bound(map_idx, st, &rsd); =20 if (fe_bound > 0.2) color =3D PERF_COLOR_RED; print_metric(config, ctxp, color, "%8.1f%%", "frontend bound", fe_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) { - double retiring =3D td_retiring(cpu_map_idx, st, &rsd); + double retiring =3D td_retiring(map_idx, st, &rsd); =20 if (retiring > 0.7) color =3D PERF_COLOR_GREEN; print_metric(config, ctxp, color, "%8.1f%%", "retiring", retiring * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) { - double bad_spec =3D td_bad_spec(cpu_map_idx, st, &rsd); + double bad_spec =3D td_bad_spec(map_idx, st, &rsd); =20 if (bad_spec > 0.1) color =3D PERF_COLOR_RED; print_metric(config, ctxp, color, "%8.1f%%", "bad speculation", bad_spec * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) { - double be_bound =3D td_be_bound(cpu_map_idx, st, &rsd); + double be_bound =3D td_be_bound(map_idx, st, &rsd); const char *name =3D "backend bound"; static int have_recovery_bubbles =3D -1; =20 @@ -1184,14 +1184,14 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, =20 if (be_bound > 0.2) color =3D PERF_COLOR_RED; - if (td_total_slots(cpu_map_idx, st, &rsd) > 0) + if (td_total_slots(map_idx, st, &rsd) > 0) print_metric(config, ctxp, color, "%8.1f%%", name, be_bound * 100.); else print_metric(config, ctxp, NULL, NULL, name, 0); } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) && - full_td(cpu_map_idx, st, &rsd)) { - double retiring =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd)) { + double retiring =3D td_metric_ratio(map_idx, STAT_TOPDOWN_RETIRING, st, &rsd); if (retiring > 0.7) @@ -1199,8 +1199,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, print_metric(config, ctxp, color, "%8.1f%%", "Retiring", retiring * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) && - full_td(cpu_map_idx, st, &rsd)) { - double fe_bound =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd)) { + double fe_bound =3D td_metric_ratio(map_idx, STAT_TOPDOWN_FE_BOUND, st, &rsd); if (fe_bound > 0.2) @@ -1208,8 +1208,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound", fe_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) && - full_td(cpu_map_idx, st, &rsd)) { - double be_bound =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd)) { + double be_bound =3D td_metric_ratio(map_idx, STAT_TOPDOWN_BE_BOUND, st, &rsd); if (be_bound > 0.2) @@ -1217,8 +1217,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound", be_bound * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) && - full_td(cpu_map_idx, st, &rsd)) { - double bad_spec =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd)) { + double bad_spec =3D td_metric_ratio(map_idx, STAT_TOPDOWN_BAD_SPEC, st, &rsd); if (bad_spec > 0.1) @@ -1226,11 +1226,11 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation", bad_spec * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) && - full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { - double retiring =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { + double retiring =3D td_metric_ratio(map_idx, STAT_TOPDOWN_RETIRING, st, &rsd); - double heavy_ops =3D td_metric_ratio(cpu_map_idx, + double heavy_ops =3D td_metric_ratio(map_idx, STAT_TOPDOWN_HEAVY_OPS, st, &rsd); double light_ops =3D retiring - heavy_ops; @@ -1246,11 +1246,11 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, print_metric(config, ctxp, color, "%8.1f%%", "Light Operations", light_ops * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) && - full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { - double bad_spec =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { + double bad_spec =3D td_metric_ratio(map_idx, STAT_TOPDOWN_BAD_SPEC, st, &rsd); - double br_mis =3D td_metric_ratio(cpu_map_idx, + double br_mis =3D td_metric_ratio(map_idx, STAT_TOPDOWN_BR_MISPREDICT, st, &rsd); double m_clears =3D bad_spec - br_mis; @@ -1266,11 +1266,11 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears", m_clears * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) && - full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { - double fe_bound =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { + double fe_bound =3D td_metric_ratio(map_idx, STAT_TOPDOWN_FE_BOUND, st, &rsd); - double fetch_lat =3D td_metric_ratio(cpu_map_idx, + double fetch_lat =3D td_metric_ratio(map_idx, STAT_TOPDOWN_FETCH_LAT, st, &rsd); double fetch_bw =3D fe_bound - fetch_lat; @@ -1286,11 +1286,11 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth", fetch_bw * 100.); } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) && - full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) { - double be_bound =3D td_metric_ratio(cpu_map_idx, + full_td(map_idx, st, &rsd) && (config->topdown_level > 1)) { + double be_bound =3D td_metric_ratio(map_idx, STAT_TOPDOWN_BE_BOUND, st, &rsd); - double mem_bound =3D td_metric_ratio(cpu_map_idx, + double mem_bound =3D td_metric_ratio(map_idx, STAT_TOPDOWN_MEM_BOUND, st, &rsd); double core_bound =3D be_bound - mem_bound; @@ -1308,12 +1308,12 @@ void perf_stat__print_shadow_stats(struct perf_stat= _config *config, } else if (evsel->metric_expr) { generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL, evsel->name, evsel->metric_name, NULL, 1, - cpu_map_idx, out, st); - } else if (runtime_stat_n(st, STAT_NSECS, cpu_map_idx, &rsd) !=3D 0) { + map_idx, out, st); + } else if (runtime_stat_n(st, STAT_NSECS, map_idx, &rsd) !=3D 0) { char unit =3D ' '; char unit_buf[10] =3D "/sec"; =20 - total =3D runtime_stat_avg(st, STAT_NSECS, cpu_map_idx, &rsd); + total =3D runtime_stat_avg(st, STAT_NSECS, map_idx, &rsd); if (total) ratio =3D convert_unit_double(1000000000.0 * avg / total, &unit); =20 @@ -1321,7 +1321,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit); print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio); } else if (perf_stat_evsel__is(evsel, SMI_NUM)) { - print_smi_cost(config, cpu_map_idx, out, st, &rsd); + print_smi_cost(config, map_idx, out, st, &rsd); } else { num =3D 0; } @@ -1335,7 +1335,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_c= onfig *config, generic_metric(config, mexp->metric_expr, mexp->metric_events, mexp->metric_refs, evsel->name, mexp->metric_name, mexp->metric_unit, mexp->runtime, - cpu_map_idx, out, st); + map_idx, out, st); } } if (num =3D=3D 0) diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 3eba38a1a149..93f6ca0d9761 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -234,7 +234,7 @@ void perf_stat__init_shadow_stats(void); void perf_stat__reset_shadow_stats(void); void perf_stat__reset_shadow_per_stat(struct runtime_stat *st); void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, - int cpu_map_idx, struct runtime_stat *st); + int map_idx, struct runtime_stat *st); struct perf_stat_output_ctx { void *ctx; print_metric_t print_metric; @@ -244,7 +244,7 @@ struct perf_stat_output_ctx { =20 void perf_stat__print_shadow_stats(struct perf_stat_config *config, struct evsel *evsel, - double avg, int cpu, + double avg, int map_idx, struct perf_stat_output_ctx *out, struct rblist *metric_events, struct runtime_stat *st); @@ -279,5 +279,5 @@ void evlist__print_counters(struct evlist *evlist, stru= ct perf_stat_config *conf struct target *_target, struct timespec *ts, int argc, const char *= *argv); =20 struct metric_expr; -double test_generic_metric(struct metric_expr *mexp, int cpu_map_idx, stru= ct runtime_stat *st); +double test_generic_metric(struct metric_expr *mexp, int map_idx, struct r= untime_stat *st); #endif --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A54A1C433FE for ; Fri, 30 Sep 2022 20:21:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232616AbiI3UVw (ORCPT ); Fri, 30 Sep 2022 16:21:52 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55706 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232602AbiI3UVe (ORCPT ); Fri, 30 Sep 2022 16:21:34 -0400 Received: from mail-pj1-x102b.google.com (mail-pj1-x102b.google.com [IPv6:2607:f8b0:4864:20::102b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C006917C232; Fri, 30 Sep 2022 13:21:28 -0700 (PDT) Received: by mail-pj1-x102b.google.com with SMTP id e11-20020a17090a77cb00b00205edbfd646so10120879pjs.1; Fri, 30 Sep 2022 13:21:28 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=afOdAtbVr8CZasnuoKPPBTl38yrTRQkWcVjl3k0D4Y0=; b=VgGsWyqQdfoK9WevyfVeuj9bzvkwjXym3zoMcRJa0UzixcyBtVgUW6gZ1kwp4BmHPJ QvUQJh6kWXk+zT0Or9Hv0hp8w3pORdO3cdu3LVrlumDbM1lbDrW6OkkOfHDh7hJFsLa3 mev8E7nWtHZ8RCaXcqKEwK93GaRDWku7vly909MF6qzpWrxgW7zrz7m9kY+nVizGKBoo IjWz8eq0icg9NIOa0AsCOSywvbNa3dK+96doODGqKMl7PNMwGcWk2L8CXWJgMFst1fU1 pS9JBxg28Ldd/YBIq2VOJkiNXn80OGpGOYN/uJ3ci/yJCxu1heiHMVwZCn+seiqEgmDe bN2g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=afOdAtbVr8CZasnuoKPPBTl38yrTRQkWcVjl3k0D4Y0=; b=mKUb5v+97E8+eXuNnefrC3C7vWSEOqeVzz95D9e/CKl8y+aLezP8hNP75FnWVkFCGn SqVKeWlikU1s8WvMkU3etThyJjZ35bO4J7++R+j1xH7ViN82BEs6QyxkT7OJYvl4veZ+ KrBG3O/s8v5q/09S1z/X9PMegSWQDsz1y+U5YQvYPnymdm8ggAZ/jJNdkf8jZjyPS4xI iUAC9Wco8BAZ7bMYzJ4SmZoWDxVE1W1y/3yZW+HY5sxQUL+QQo2au9yh+dfT42uT2HhQ au07fUwj0NLCTzEif/v8ZOSuO/ZAi6CVdiL0CzNECtfZXMBiyx5QhFxKbzDyZC6IZ9ua EMdQ== X-Gm-Message-State: ACrzQf0ZrP4HW7dBX8OH09CYH59MRjTz/PhSmi2C1yH8cAJKlR8ymAF4 oQcXMY0DbfCxNIp6tYtPTKA= X-Google-Smtp-Source: AMsMyM6xXN3xh9LvhD8qn91lRoyMX0/R+7v3OaK5eIp+WsAws/YxQs/810heOcCQfT6IiAVLA7zrqQ== X-Received: by 2002:a17:903:509:b0:179:ffcf:d275 with SMTP id jn9-20020a170903050900b00179ffcfd275mr10588974plb.150.1664569286688; Fri, 30 Sep 2022 13:21:26 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.25 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:26 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 4/7] perf stat: Use thread map index for shadow stat Date: Fri, 30 Sep 2022 13:21:07 -0700 Message-Id: <20220930202110.845199-5-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" When AGGR_THREAD is active, it aggregates the values for each thread. Previously it used cpu map index which is invalid for AGGR_THREAD so it had to use separate runtime stats with index 0. But it can just use the rt_stat with thread_map_index. Rename the first_shadow_map_idx() and make it return the thread index. Reviewed-by: James Clark Signed-off-by: Namhyung Kim --- tools/perf/util/stat-display.c | 20 +++++++++----------- tools/perf/util/stat-shadow.c | 2 +- tools/perf/util/stat.c | 8 ++------ 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 234491f43c36..570e2c04d47d 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -442,7 +442,7 @@ static void print_metric_header(struct perf_stat_config= *config, fprintf(os->fh, "%*s ", config->metric_only_len, unit); } =20 -static int first_shadow_cpu_map_idx(struct perf_stat_config *config, +static int first_shadow_map_idx(struct perf_stat_config *config, struct evsel *evsel, const struct aggr_cpu_id *id) { struct perf_cpu_map *cpus =3D evsel__cpus(evsel); @@ -452,6 +452,9 @@ static int first_shadow_cpu_map_idx(struct perf_stat_co= nfig *config, if (config->aggr_mode =3D=3D AGGR_NONE) return perf_cpu_map__idx(cpus, id->cpu); =20 + if (config->aggr_mode =3D=3D AGGR_THREAD) + return id->thread; + if (!config->aggr_get_id) return 0; =20 @@ -646,7 +649,7 @@ static void printout(struct perf_stat_config *config, s= truct aggr_cpu_id id, int } =20 perf_stat__print_shadow_stats(config, counter, uval, - first_shadow_cpu_map_idx(config, counter, &id), + first_shadow_map_idx(config, counter, &id), &out, &config->metric_events, st); if (!config->csv_output && !config->metric_only && !config->json_output) { print_noise(config, counter, noise); @@ -676,7 +679,7 @@ static void aggr_update_shadow(struct perf_stat_config = *config, val +=3D perf_counts(counter->counts, idx, 0)->val; } perf_stat__update_shadow_stats(counter, val, - first_shadow_cpu_map_idx(config, counter, &id), + first_shadow_map_idx(config, counter, &id), &rt_stat); } } @@ -979,14 +982,9 @@ static void print_aggr_thread(struct perf_stat_config = *config, fprintf(output, "%s", prefix); =20 id =3D buf[thread].id; - if (config->stats) - printout(config, id, 0, buf[thread].counter, buf[thread].uval, - prefix, buf[thread].run, buf[thread].ena, 1.0, - &config->stats[id.thread]); - else - printout(config, id, 0, buf[thread].counter, buf[thread].uval, - prefix, buf[thread].run, buf[thread].ena, 1.0, - &rt_stat); + printout(config, id, 0, buf[thread].counter, buf[thread].uval, + prefix, buf[thread].run, buf[thread].ena, 1.0, + &rt_stat); fputc('\n', output); } =20 diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 945c40c10423..0676ee9986ff 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -33,7 +33,7 @@ struct saved_value { struct evsel *evsel; enum stat_type type; int ctx; - int map_idx; /* cpu map index */ + int map_idx; /* cpu or thread map index */ struct cgroup *cgrp; struct runtime_stat *stat; struct stats stats; diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index e1d3152ce664..21137c9d5259 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -389,12 +389,8 @@ process_counter_values(struct perf_stat_config *config= , struct evsel *evsel, } =20 if (config->aggr_mode =3D=3D AGGR_THREAD) { - if (config->stats) - perf_stat__update_shadow_stats(evsel, - count->val, 0, &config->stats[thread]); - else - perf_stat__update_shadow_stats(evsel, - count->val, 0, &rt_stat); + perf_stat__update_shadow_stats(evsel, count->val, + thread, &rt_stat); } break; case AGGR_GLOBAL: --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 62C89C433FE for ; Fri, 30 Sep 2022 20:21:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232672AbiI3UVn (ORCPT ); Fri, 30 Sep 2022 16:21:43 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55492 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232568AbiI3UVc (ORCPT ); Fri, 30 Sep 2022 16:21:32 -0400 Received: from mail-pj1-x102c.google.com (mail-pj1-x102c.google.com [IPv6:2607:f8b0:4864:20::102c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9B284CBADC; Fri, 30 Sep 2022 13:21:29 -0700 (PDT) Received: by mail-pj1-x102c.google.com with SMTP id u59-20020a17090a51c100b00205d3c44162so10114820pjh.2; Fri, 30 Sep 2022 13:21:29 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=LX3MaxzLM/91wKTPvvXsaRbQgWzN38y7nF5WahrQ8cw=; b=DPgrIj014QFaxm/FGWoOMGVak/ONhu8chQBNyVyWvz9fnDvkT+2dwzO5jb17kF308b BmokHv1qJCo9Dw+vMf+B/2+M4SId0LVA1b8dzYh7QtxtZtKYOoeRyUeWIBM1Biv1S4Py atm1l9YLb8oZ3zm5UJQaYdRZLnLypJxhz0G3YcCF47LPvO4tP69KAW94/MEN2bNbG99a kZqrkHVhX91VYQwigx4+X8xEHfxUVU1JIQRTdmcwNJ8JT8YnzePRuyxZfO2irV6ltMD1 7fUdxjDMfSDjch2ed5F8kiGqRHR4BEeBiDN778PbsQL1WgEc2r5tK+plu9yYqH46PT9E xG+Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=LX3MaxzLM/91wKTPvvXsaRbQgWzN38y7nF5WahrQ8cw=; b=oaNaM+b2z1YvGUPzjITlxbP8Amh/rYxobEG8ABJ89b3hQR0Sm3Ka2yNm6NuHJzr+53 SOJkAIhMXRxsdgEWx8r9+2alKugnqrRlEBZrNI97pMWnzBLAsmtzkdir7TlaRZXgNVxv 6evqnqLZFeK1Z+tNFSdiaRYHz0Q/6BgCtOkfxju5Pu6xBA0xZf3ljdpyh9TdEYuzSDxm bhHmgVc500AnuNcQm5ReHorg/m1NISZcYj1mtHta1N3Jzs8Sj8SMmjuZCpeVhgjkyN7c lxlqINdlXrMpjQjpcXHOesc+prGenVE3SYWG3GRbPD7Me2/wz1EVx7vuCpVVQTAwKYO+ UPRg== X-Gm-Message-State: ACrzQf2xPwKbeCQX3OaDd5rH0QvW44tt4BuusgY+MYc0Xxuc9nZXBkMs eC/gpnuVLuf7bjOEgkC6b4WeJhO5UlU= X-Google-Smtp-Source: AMsMyM4pnkrsx7pSqH8sj5biSo8WmUgMq3GjVKIbSEaZjku2097SudWuSIdooQOWvXU+IAKrm7uSZA== X-Received: by 2002:a17:902:ce8f:b0:176:d5af:a175 with SMTP id f15-20020a170902ce8f00b00176d5afa175mr10954746plg.123.1664569288032; Fri, 30 Sep 2022 13:21:28 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.26 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:27 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 5/7] perf stat: Kill unused per-thread runtime stats Date: Fri, 30 Sep 2022 13:21:08 -0700 Message-Id: <20220930202110.845199-6-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Now it's using the global rt_stat, no need to use per-thread stats. Let get rid of them. Reviewed-by: James Clark Signed-off-by: Namhyung Kim --- tools/perf/builtin-stat.c | 54 --------------------------------------- tools/perf/util/stat.h | 2 -- 2 files changed, 56 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e05fe72c1d87..b86ebb25a799 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -292,13 +292,8 @@ static inline void diff_timespec(struct timespec *r, s= truct timespec *a, =20 static void perf_stat__reset_stats(void) { - int i; - evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); - - for (i =3D 0; i < stat_config.stats_num; i++) - perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); } =20 static int process_synthesized_event(struct perf_tool *tool __maybe_unused, @@ -489,46 +484,6 @@ static void read_counters(struct timespec *rs) } } =20 -static int runtime_stat_new(struct perf_stat_config *config, int nthreads) -{ - int i; - - config->stats =3D calloc(nthreads, sizeof(struct runtime_stat)); - if (!config->stats) - return -1; - - config->stats_num =3D nthreads; - - for (i =3D 0; i < nthreads; i++) - runtime_stat__init(&config->stats[i]); - - return 0; -} - -static void runtime_stat_delete(struct perf_stat_config *config) -{ - int i; - - if (!config->stats) - return; - - for (i =3D 0; i < config->stats_num; i++) - runtime_stat__exit(&config->stats[i]); - - zfree(&config->stats); -} - -static void runtime_stat_reset(struct perf_stat_config *config) -{ - int i; - - if (!config->stats) - return; - - for (i =3D 0; i < config->stats_num; i++) - perf_stat__reset_shadow_per_stat(&config->stats[i]); -} - static void process_interval(void) { struct timespec ts, rs; @@ -537,7 +492,6 @@ static void process_interval(void) diff_timespec(&rs, &ts, &ref_time); =20 perf_stat__reset_shadow_per_stat(&rt_stat); - runtime_stat_reset(&stat_config); read_counters(&rs); =20 if (STAT_RECORD) { @@ -1018,7 +972,6 @@ static int __run_perf_stat(int argc, const char **argv= , int run_idx) =20 evlist__copy_prev_raw_counts(evsel_list); evlist__reset_prev_raw_counts(evsel_list); - runtime_stat_reset(&stat_config); perf_stat__reset_shadow_per_stat(&rt_stat); } else { update_stats(&walltime_nsecs_stats, t1 - t0); @@ -2514,12 +2467,6 @@ int cmd_stat(int argc, const char **argv) */ if (stat_config.aggr_mode =3D=3D AGGR_THREAD) { thread_map__read_comms(evsel_list->core.threads); - if (target.system_wide) { - if (runtime_stat_new(&stat_config, - perf_thread_map__nr(evsel_list->core.threads))) { - goto out; - } - } } =20 if (stat_config.aggr_mode =3D=3D AGGR_NODE) @@ -2660,7 +2607,6 @@ int cmd_stat(int argc, const char **argv) evlist__delete(evsel_list); =20 metricgroup__rblist_exit(&stat_config.metric_events); - runtime_stat_delete(&stat_config); evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_c= onfig.ctl_fd_close); =20 return status; diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 93f6ca0d9761..b0899c6e002f 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -153,8 +153,6 @@ struct perf_stat_config { int run_count; int print_free_counters_hint; int print_mixed_hw_group_error; - struct runtime_stat *stats; - int stats_num; const char *csv_sep; struct stats *walltime_nsecs_stats; struct rusage ru_data; --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7C479C433FE for ; Fri, 30 Sep 2022 20:21:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232664AbiI3UV4 (ORCPT ); Fri, 30 Sep 2022 16:21:56 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55624 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232609AbiI3UVf (ORCPT ); Fri, 30 Sep 2022 16:21:35 -0400 Received: from mail-pf1-x436.google.com (mail-pf1-x436.google.com [IPv6:2607:f8b0:4864:20::436]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1AC81174BC5; Fri, 30 Sep 2022 13:21:30 -0700 (PDT) Received: by mail-pf1-x436.google.com with SMTP id i6so5200122pfb.2; Fri, 30 Sep 2022 13:21:30 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=JhAk8jSmxMWkCEVM3knhsAVjv/gWnWdiuhVd0n4baOg=; b=MY2RdwI51+Vo3z3dMvtblPy2m5lc2LUYm26Ha9eKO3Ib496w/QOA3sPY8KToQIk+zp w93Obkl0gxh0ERhoCg4hB2y+q5uzWV5OXERrWqdYSs+v559nPrRgn2EnoCfTKpzC/OMk JRereyO/MANkfeAHe9ZTwbg/74iXDzMiXYthVXHoAYUKt8cThbtMVC4XvIxDQCNjSikd V7opWC38/Xvnv8SKNcnBjW7vxQy8EnsyixUVHntlbivu9CVDM5nzHZ6xg2ZgslCcuekS HvwIPBvocR39bIiiptUQ7bf5mw7CkUAswJzrVkFWU4ulY2UebTFzjwiIpoLim1yZ/1vo zDJg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=JhAk8jSmxMWkCEVM3knhsAVjv/gWnWdiuhVd0n4baOg=; b=ZL/uWDDlmHYT362JG29HgjeXihvXiLxH0vF0ET5+yWV79NFurznGB3GnYl5uZGlWg8 PEilZMwhADYb4jO1iZueIIJSAXyckRIOYFz0FUT2XM1iK1Kz2FDyg4Gjx6m6JDfZJ6Gw PipHXjp1Eu20dwgNeRI5sRCWpdLrZqRuSaPnIhbziFlsYF23M0N1USC8IP1MdVxmO6mc NIoyZblrqXVoiNDONzbn5DVC+Xvrfdj0ShZWtC6CzKw6ByxlQKdbkn08JwECNKLVTBdW lZUUbK28Jj9Z1n/LUQXBZ+ebVSRR3GHER6a0v3t0qYESpyBI5QQZGOoJCeACkAXVwdSO LagQ== X-Gm-Message-State: ACrzQf1sO6ZZGbkEAQE/Bpu94QmaWRj3CLc3KfjJxZe5AlhOZC7ezji8 j7wO4vFrMicL4rlS55wj85w= X-Google-Smtp-Source: AMsMyM5vJWAzdsu8Z1cOrmVbtnJYAGG+mwgJ9tz+7jWN3+D08kvpbOSP5bJmS5Ct0CUOVjgMCoQBNw== X-Received: by 2002:a65:5688:0:b0:3c2:1015:988e with SMTP id v8-20020a655688000000b003c21015988emr9021900pgs.280.1664569289303; Fri, 30 Sep 2022 13:21:29 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.28 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:28 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 6/7] perf stat: Don't compare runtime stat for shadow stats Date: Fri, 30 Sep 2022 13:21:09 -0700 Message-Id: <20220930202110.845199-7-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Now it always uses the global rt_stat. Let's get rid of the field from the saved_value. When the both evsels are NULL, it'd return 0 so remove the block in the saved_value_cmp. Reviewed-by: James Clark Signed-off-by: Namhyung Kim --- tools/perf/util/stat-shadow.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 0676ee9986ff..f08291e55f1b 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -35,7 +35,6 @@ struct saved_value { int ctx; int map_idx; /* cpu or thread map index */ struct cgroup *cgrp; - struct runtime_stat *stat; struct stats stats; u64 metric_total; int metric_other; @@ -67,16 +66,6 @@ static int saved_value_cmp(struct rb_node *rb_node, cons= t void *entry) if (a->cgrp !=3D b->cgrp) return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1; =20 - if (a->evsel =3D=3D NULL && b->evsel =3D=3D NULL) { - if (a->stat =3D=3D b->stat) - return 0; - - if ((char *)a->stat < (char *)b->stat) - return -1; - - return 1; - } - if (a->evsel =3D=3D b->evsel) return 0; if ((char *)a->evsel < (char *)b->evsel) @@ -120,7 +109,6 @@ static struct saved_value *saved_value_lookup(struct ev= sel *evsel, .evsel =3D evsel, .type =3D type, .ctx =3D ctx, - .stat =3D st, .cgrp =3D cgrp, }; =20 --=20 2.38.0.rc1.362.ged0d419d3c-goog From nobody Mon Apr 29 14:23:11 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D5C06C433FE for ; Fri, 30 Sep 2022 20:22:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232704AbiI3UV7 (ORCPT ); Fri, 30 Sep 2022 16:21:59 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55738 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229613AbiI3UVj (ORCPT ); Fri, 30 Sep 2022 16:21:39 -0400 Received: from mail-pl1-x633.google.com (mail-pl1-x633.google.com [IPv6:2607:f8b0:4864:20::633]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 30651E9046; Fri, 30 Sep 2022 13:21:31 -0700 (PDT) Received: by mail-pl1-x633.google.com with SMTP id c24so4896931plo.3; Fri, 30 Sep 2022 13:21:31 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date; bh=iiDxTp/ApqKz1ydeVD0pYoiLL9SAE+fh3JAl3VvTtcI=; b=mDYjwdMVykJEZayLCtxREizVwst0Ik1NCDyduXAqjgLVVFYKslD0f234GIIqIaFTYs j2DE6uR6qIytNBwwiJpkGigG6pPC7v+xVREVytRrchO/zt739AD7ZOsOx3+g8kwACdiE cYI+Y/8LWjb0CSIJyP5y/GCjm5MbaQTgFP2H8bWDg8lmd0s41tnmWLC6Oc8cjsfbCHqS dCjgNAkXqROyoYOUURgkl6QEHRW/T9B9HAOHrX+RHEi//DHEzzkep7Ro7HdGaoFKnpk0 pkKUTE0QJmJX1j5BC2DkOglvH6cFuM+qSPRPnLutv3bSmZqPvut61vRPi0lWwuD05y4M REqg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date; bh=iiDxTp/ApqKz1ydeVD0pYoiLL9SAE+fh3JAl3VvTtcI=; b=z7zxTfUAVVxIFjtH1LlV65ZFYTUB6OQrp4Z5grGXIrjDanePpUB9gxcvRY9xBKtHzt MmleUc1WObAC/++geZmPy5WrLP2MBkFjFCG70IgFOdRqTK5CsrMuV/UppmajdPnyt9dh Eq8kTSO7Y2tFansFFp6EvmBuALFT2lYc+KMe4/OGXGdK7PzeXYGHo0qE4zjEkJvbxNYS QizIfnDvX+Q5DUUNdxBRiF6f3Z3u/e39gXhjhXjXcl5jD6zukdPHKQVTOHjl4U13hPDK hu9BR1I80p8/qWVg8y5pchfcEISP0cCwb+XhU4eIq4BFFwyrQkz1GwjFFh+8RrKPlpYv nnEg== X-Gm-Message-State: ACrzQf2UOEdLFmYE/FOllQlqkc+8pQiWmRcaTzpkwWl1oBMFR54iWdhs +DqyiAWYLUOPlS8QchMitAqnIeJIC/M= X-Google-Smtp-Source: AMsMyM50etQ+fxaz7WnJr6Dwv6re+yvrrWq5arYOtMT5xE+DMoGp9Dz+fSxCwMORGZyGAaX84bSTrg== X-Received: by 2002:a17:902:cecf:b0:178:3fa3:1583 with SMTP id d15-20020a170902cecf00b001783fa31583mr10576518plg.141.1664569290658; Fri, 30 Sep 2022 13:21:30 -0700 (PDT) Received: from youngsil.svl.corp.google.com ([2620:15c:2d4:203:4075:4245:762c:e746]) by smtp.gmail.com with ESMTPSA id s10-20020a63e80a000000b0042fe1914e26sm2105540pgh.37.2022.09.30.13.21.29 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 30 Sep 2022 13:21:30 -0700 (PDT) Sender: Namhyung Kim From: Namhyung Kim To: Arnaldo Carvalho de Melo , Jiri Olsa Cc: Ingo Molnar , Peter Zijlstra , LKML , Ian Rogers , Adrian Hunter , linux-perf-users@vger.kernel.org, Andi Kleen , Kan Liang , Leo Yan , Zhengjun Xing , James Clark Subject: [PATCH 7/7] perf stat: Rename to aggr_cpu_id.thread_idx Date: Fri, 30 Sep 2022 13:21:10 -0700 Message-Id: <20220930202110.845199-8-namhyung@kernel.org> X-Mailer: git-send-email 2.38.0.rc1.362.ged0d419d3c-goog In-Reply-To: <20220930202110.845199-1-namhyung@kernel.org> References: <20220930202110.845199-1-namhyung@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The aggr_cpu_id has a thread value but it's actually an index to the thread_map. To reduce possible confusion, rename it to thread_idx. Suggested-by: Ian Rogers Signed-off-by: Namhyung Kim --- tools/perf/tests/topology.c | 10 +++++----- tools/perf/util/cpumap.c | 8 ++++---- tools/perf/util/cpumap.h | 2 +- tools/perf/util/stat-display.c | 12 ++++++------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 0b4f61b6cc6b..c4630cfc80ea 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -147,7 +147,7 @@ static int check_cpu_topology(char *path, struct perf_c= pu_map *map) TEST_ASSERT_VAL("Cpu map - Die ID doesn't match", session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id =3D=3D id= .die); TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node =3D=3D -1); - TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread =3D=3D -1); + TEST_ASSERT_VAL("Cpu map - Thread IDX is set", id.thread_idx =3D=3D -1); } =20 // Test that core ID contains socket, die and core @@ -163,7 +163,7 @@ static int check_cpu_topology(char *path, struct perf_c= pu_map *map) TEST_ASSERT_VAL("Core map - Die ID doesn't match", session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id =3D=3D id= .die); TEST_ASSERT_VAL("Core map - Node ID is set", id.node =3D=3D -1); - TEST_ASSERT_VAL("Core map - Thread is set", id.thread =3D=3D -1); + TEST_ASSERT_VAL("Core map - Thread IDX is set", id.thread_idx =3D=3D -1); } =20 // Test that die ID contains socket and die @@ -179,7 +179,7 @@ static int check_cpu_topology(char *path, struct perf_c= pu_map *map) TEST_ASSERT_VAL("Die map - Node ID is set", id.node =3D=3D -1); TEST_ASSERT_VAL("Die map - Core is set", id.core =3D=3D -1); TEST_ASSERT_VAL("Die map - CPU is set", id.cpu.cpu =3D=3D -1); - TEST_ASSERT_VAL("Die map - Thread is set", id.thread =3D=3D -1); + TEST_ASSERT_VAL("Die map - Thread IDX is set", id.thread_idx =3D=3D -1); } =20 // Test that socket ID contains only socket @@ -193,7 +193,7 @@ static int check_cpu_topology(char *path, struct perf_c= pu_map *map) TEST_ASSERT_VAL("Socket map - Die ID is set", id.die =3D=3D -1); TEST_ASSERT_VAL("Socket map - Core is set", id.core =3D=3D -1); TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu.cpu =3D=3D -1); - TEST_ASSERT_VAL("Socket map - Thread is set", id.thread =3D=3D -1); + TEST_ASSERT_VAL("Socket map - Thread IDX is set", id.thread_idx =3D=3D -= 1); } =20 // Test that node ID contains only node @@ -205,7 +205,7 @@ static int check_cpu_topology(char *path, struct perf_c= pu_map *map) TEST_ASSERT_VAL("Node map - Die ID is set", id.die =3D=3D -1); TEST_ASSERT_VAL("Node map - Core is set", id.core =3D=3D -1); TEST_ASSERT_VAL("Node map - CPU is set", id.cpu.cpu =3D=3D -1); - TEST_ASSERT_VAL("Node map - Thread is set", id.thread =3D=3D -1); + TEST_ASSERT_VAL("Node map - Thread IDX is set", id.thread_idx =3D=3D -1); } perf_session__delete(session); =20 diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 2389bd3e19b8..8486ca3bec75 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -229,7 +229,7 @@ static int aggr_cpu_id__cmp(const void *a_pointer, cons= t void *b_pointer) else if (a->core !=3D b->core) return a->core - b->core; else - return a->thread - b->thread; + return a->thread_idx - b->thread_idx; } =20 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus, @@ -667,7 +667,7 @@ const struct perf_cpu_map *cpu_map__online(void) /* thr= ead unsafe */ =20 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu= _id *b) { - return a->thread =3D=3D b->thread && + return a->thread_idx =3D=3D b->thread_idx && a->node =3D=3D b->node && a->socket =3D=3D b->socket && a->die =3D=3D b->die && @@ -677,7 +677,7 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, co= nst struct aggr_cpu_id *b =20 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) { - return a->thread =3D=3D -1 && + return a->thread_idx =3D=3D -1 && a->node =3D=3D -1 && a->socket =3D=3D -1 && a->die =3D=3D -1 && @@ -688,7 +688,7 @@ bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) struct aggr_cpu_id aggr_cpu_id__empty(void) { struct aggr_cpu_id ret =3D { - .thread =3D -1, + .thread_idx =3D -1, .node =3D -1, .socket =3D -1, .die =3D -1, diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index fa8a5acdcae1..4a6d029576ee 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -10,7 +10,7 @@ /** Identify where counts are aggregated, -1 implies not to aggregate. */ struct aggr_cpu_id { /** A value in the range 0 to number of threads. */ - int thread; + int thread_idx; /** The numa node X as read from /sys/devices/system/node/nodeX. */ int node; /** diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 570e2c04d47d..df26fb5eb072 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -189,14 +189,14 @@ static void aggr_printout(struct perf_stat_config *co= nfig, case AGGR_THREAD: if (config->json_output) { fprintf(config->output, "\"thread\" : \"%s-%d\", ", - perf_thread_map__comm(evsel->core.threads, id.thread), - perf_thread_map__pid(evsel->core.threads, id.thread)); + perf_thread_map__comm(evsel->core.threads, id.thread_idx), + perf_thread_map__pid(evsel->core.threads, id.thread_idx)); } else { fprintf(config->output, "%*s-%*d%s", config->csv_output ? 0 : 16, - perf_thread_map__comm(evsel->core.threads, id.thread), + perf_thread_map__comm(evsel->core.threads, id.thread_idx), config->csv_output ? 0 : -8, - perf_thread_map__pid(evsel->core.threads, id.thread), + perf_thread_map__pid(evsel->core.threads, id.thread_idx), config->csv_sep); } break; @@ -453,7 +453,7 @@ static int first_shadow_map_idx(struct perf_stat_config= *config, return perf_cpu_map__idx(cpus, id->cpu); =20 if (config->aggr_mode =3D=3D AGGR_THREAD) - return id->thread; + return id->thread_idx; =20 if (!config->aggr_get_id) return 0; @@ -946,7 +946,7 @@ static struct perf_aggr_thread_value *sort_aggr_thread( =20 buf[i].counter =3D counter; buf[i].id =3D aggr_cpu_id__empty(); - buf[i].id.thread =3D thread; + buf[i].id.thread_idx =3D thread; buf[i].uval =3D uval; buf[i].val =3D val; buf[i].run =3D run; --=20 2.38.0.rc1.362.ged0d419d3c-goog