From nobody Wed Dec 17 12:45:13 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8B2CCC4167B for ; Mon, 27 Nov 2023 22:12:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233993AbjK0WMp (ORCPT ); Mon, 27 Nov 2023 17:12:45 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44046 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234200AbjK0WLv (ORCPT ); Mon, 27 Nov 2023 17:11:51 -0500 Received: from mail-yw1-x1149.google.com (mail-yw1-x1149.google.com [IPv6:2607:f8b0:4864:20::1149]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4FE4D272A for ; Mon, 27 Nov 2023 14:10:31 -0800 (PST) Received: by mail-yw1-x1149.google.com with SMTP id 00721157ae682-5cddc35545dso48996117b3.2 for ; Mon, 27 Nov 2023 14:10:31 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1701123030; x=1701727830; darn=vger.kernel.org; h=to:from:subject:references:mime-version:message-id:in-reply-to:date :from:to:cc:subject:date:message-id:reply-to; bh=+fWELMkH9tNwo6Ur6Go2dek57DUDPNQFIgs6C3q455U=; b=iJwQtGoqhfn6R0Lx0socxVOSfGNahvanakg6/wmAHO/m1JSf/bn61lKmSDxeKgNmqh 0JQxj7HBLG7lhizk4RNlJKHp3iVGRQrS+8w8zS8RboLV4FHUUv2aALoRmI6kE7kF+14n crgoTbq1kRekV9govRRau1wc1ytoeTexivYe201Y1VXqj6NG+1wcJhiFgqJxgaXwOGyt YrIgOnmjCNBkBXJikLrmxzi+hUZhYvfO5MrPpYcHqgnzbTbIuEBI+3rBDl4iOaXW1fWC TAh1SehMyeMdV4Pu/qUqFAhPWErTXYZEzuHXbqAXgqwGWo0NX0ismoGsVio3kcas20AV hInA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1701123030; x=1701727830; h=to:from:subject:references:mime-version:message-id:in-reply-to:date :x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=+fWELMkH9tNwo6Ur6Go2dek57DUDPNQFIgs6C3q455U=; b=TSP+Jb5lxUVeX2Z2ivfP9v/KCwOHKjbdgBahPIQKHEUyyK9ep1v6XDvu+kd7zKnMS1 gPh9/fespanrzNhUzZ8l2GVYBFCXnymKZoTqeasheaX5oKZLKRt0DNRl2CzNUZ6q6kIN Soj6B3mJd4Fb5gbm64F8927gOk7Bp7IaI9pwCGjH3qhR3FP41o9IBjsQV950dEq4bIUa Un20J8r24GjcXQ+NVNs8og9Fe/Ov6Bwhy2ZeXTZyBkD+yHccrVkbGk3V+r8pFsQBddaa ODk4Ey6linQ+aQJCbIb7LbaZauOqPvq2BoNzMHbOf2ATCUV+hMLkgSly2VYuJe9Y1wsz WPpw== X-Gm-Message-State: AOJu0YwGm8VI2421zOE9JoRWgP4rXgko+yhr3cw+hie4l8l5ZcG8G69r Mry4HsZYqdq6YcE6Fd5vjeJ3ml51kSZZ X-Google-Smtp-Source: AGHT+IFWDGdlRjWXzgaQ86MPCF7EvmB9XrFnkpLsF1HCRb75u4gj8tLjXwAjWnaM3HcYmIHACputmI+YNJMx X-Received: from irogers.svl.corp.google.com ([2620:15c:2a3:200:829:6e77:9093:f39b]) (user=irogers job=sendgmr) by 2002:a81:ff09:0:b0:5ca:20f3:ca21 with SMTP id k9-20020a81ff09000000b005ca20f3ca21mr465667ywn.1.1701123030440; Mon, 27 Nov 2023 14:10:30 -0800 (PST) Date: Mon, 27 Nov 2023 14:08:41 -0800 In-Reply-To: <20231127220902.1315692-1-irogers@google.com> Message-Id: <20231127220902.1315692-30-irogers@google.com> Mime-Version: 1.0 References: <20231127220902.1315692-1-irogers@google.com> X-Mailer: git-send-email 2.43.0.rc1.413.gea7ed67945-goog Subject: [PATCH v5 29/50] perf maps: Hide maps internals From: Ian Rogers To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Mark Rutland , Alexander Shishkin , Jiri Olsa , Namhyung Kim , Ian Rogers , Adrian Hunter , Nick Terrell , Kan Liang , Andi Kleen , Kajol Jain , Athira Rajeev , Huacai Chen , Masami Hiramatsu , Vincent Whitchurch , "Steinar H. Gunderson" , Liam Howlett , Miguel Ojeda , Colin Ian King , Dmitrii Dolgov <9erthalion6@gmail.com>, Yang Jihong , Ming Wang , James Clark , K Prateek Nayak , Sean Christopherson , Leo Yan , Ravi Bangoria , German Gomez , Changbin Du , Paolo Bonzini , Li Dong , Sandipan Das , liuwenyu , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Guilherme Amadio Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Move the struct into the C file. Add maps__equal to work around exposing the struct for reference count checking. Add accessors for the unwind_libunwind_ops. Move maps_list_node to its only use in symbol.c. Signed-off-by: Ian Rogers --- tools/perf/tests/thread-maps-share.c | 8 +- tools/perf/util/callchain.c | 2 +- tools/perf/util/maps.c | 96 +++++++++++++++++++++++ tools/perf/util/maps.h | 97 +++--------------------- tools/perf/util/symbol.c | 10 +++ tools/perf/util/thread.c | 2 +- tools/perf/util/unwind-libunwind-local.c | 2 +- tools/perf/util/unwind-libunwind.c | 7 +- 8 files changed, 123 insertions(+), 101 deletions(-) diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread= -maps-share.c index 7fa6f7c568e2..e9ecd30a5c05 100644 --- a/tools/perf/tests/thread-maps-share.c +++ b/tools/perf/tests/thread-maps-share.c @@ -46,9 +46,9 @@ static int test__thread_maps_share(struct test_suite *tes= t __maybe_unused, int s TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4); =20 /* test the maps pointer is shared */ - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1))); - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2))); - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2))); + TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3))); =20 /* * Verify the other leader was created by previous call. @@ -73,7 +73,7 @@ static int test__thread_maps_share(struct test_suite *tes= t __maybe_unused, int s other_maps =3D thread__maps(other); TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps))= , 2); =20 - TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps= (other_leader))); + TEST_ASSERT_VAL("maps don't match", maps__equal(other_maps, thread__maps(= other_leader))); =20 /* release thread group */ thread__put(t3); diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 8262f69118db..7517d16c02ec 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -1157,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, str= uct callchain_cursor_node * if (al->map =3D=3D NULL) goto out; } - if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) { + if (maps__equal(al->maps, machine__kernel_maps(machine))) { if (machine__is_host(machine)) { al->cpumode =3D PERF_RECORD_MISC_KERNEL; al->level =3D 'k'; diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index b3937e734cbf..41e9e39b1b4c 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -6,9 +6,63 @@ #include "dso.h" #include "map.h" #include "maps.h" +#include "rwsem.h" #include "thread.h" #include "ui/ui.h" #include "unwind.h" +#include + +/* + * Locking/sorting note: + * + * Sorting is done with the write lock, iteration and binary searching hap= pens + * under the read lock requiring being sorted. There is a race between sor= ting + * releasing the write lock and acquiring the read lock for iteration/sear= ching + * where another thread could insert and break the sorting of the maps. In + * practice inserting maps should be rare meaning that the race shouldn't = lead + * to live lock. Removal of maps doesn't break being sorted. + */ + +DECLARE_RC_STRUCT(maps) { + struct rw_semaphore lock; + /** + * @maps_by_address: array of maps sorted by their starting address if + * maps_by_address_sorted is true. + */ + struct map **maps_by_address; + /** + * @maps_by_name: optional array of maps sorted by their dso name if + * maps_by_name_sorted is true. + */ + struct map **maps_by_name; + struct machine *machine; +#ifdef HAVE_LIBUNWIND_SUPPORT + void *addr_space; + const struct unwind_libunwind_ops *unwind_libunwind_ops; +#endif + refcount_t refcnt; + /** + * @nr_maps: number of maps_by_address, and possibly maps_by_name, + * entries that contain maps. + */ + unsigned int nr_maps; + /** + * @nr_maps_allocated: number of entries in maps_by_address and possibly + * maps_by_name. + */ + unsigned int nr_maps_allocated; + /** + * @last_search_by_name_idx: cache of last found by name entry's index + * as frequent searches for the same dso name are common. + */ + unsigned int last_search_by_name_idx; + /** @maps_by_address_sorted: is maps_by_address sorted. */ + bool maps_by_address_sorted; + /** @maps_by_name_sorted: is maps_by_name sorted. */ + bool maps_by_name_sorted; + /** @ends_broken: does the map contain a map where end values are unset/u= nsorted? */ + bool ends_broken; +}; =20 static void check_invariants(const struct maps *maps __maybe_unused) { @@ -103,6 +157,43 @@ static void maps__set_maps_by_name_sorted(struct maps = *maps, bool value) RC_CHK_ACCESS(maps)->maps_by_name_sorted =3D value; } =20 +struct machine *maps__machine(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->machine; +} + +unsigned int maps__nr_maps(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->nr_maps; +} + +refcount_t *maps__refcnt(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->refcnt; +} + +#ifdef HAVE_LIBUNWIND_SUPPORT +void *maps__addr_space(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->addr_space; +} + +void maps__set_addr_space(struct maps *maps, void *addr_space) +{ + RC_CHK_ACCESS(maps)->addr_space =3D addr_space; +} + +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct= maps *maps) +{ + return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; +} + +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind= _libunwind_ops *ops) +{ + RC_CHK_ACCESS(maps)->unwind_libunwind_ops =3D ops; +} +#endif + static struct rw_semaphore *maps__lock(struct maps *maps) { /* @@ -440,6 +531,11 @@ bool maps__empty(struct maps *maps) return maps__nr_maps(maps) =3D=3D 0; } =20 +bool maps__equal(struct maps *a, struct maps *b) +{ + return RC_CHK_EQUAL(a, b); +} + int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void = *data), void *data) { bool done =3D false; diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index df9dd5a0e3c0..4bcba136ffe5 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -3,80 +3,15 @@ #define __PERF_MAPS_H =20 #include -#include #include #include #include -#include "rwsem.h" -#include =20 struct ref_reloc_sym; struct machine; struct map; struct maps; =20 -struct map_list_node { - struct list_head node; - struct map *map; -}; - -static inline struct map_list_node *map_list_node__new(void) -{ - return malloc(sizeof(struct map_list_node)); -} - -/* - * Locking/sorting note: - * - * Sorting is done with the write lock, iteration and binary searching hap= pens - * under the read lock requiring being sorted. There is a race between sor= ting - * releasing the write lock and acquiring the read lock for iteration/sear= ching - * where another thread could insert and break the sorting of the maps. In - * practice inserting maps should be rare meaning that the race shouldn't = lead - * to live lock. Removal of maps doesn't break being sorted. - */ - -DECLARE_RC_STRUCT(maps) { - struct rw_semaphore lock; - /** - * @maps_by_address: array of maps sorted by their starting address if - * maps_by_address_sorted is true. - */ - struct map **maps_by_address; - /** - * @maps_by_name: optional array of maps sorted by their dso name if - * maps_by_name_sorted is true. - */ - struct map **maps_by_name; - struct machine *machine; -#ifdef HAVE_LIBUNWIND_SUPPORT - void *addr_space; - const struct unwind_libunwind_ops *unwind_libunwind_ops; -#endif - refcount_t refcnt; - /** - * @nr_maps: number of maps_by_address, and possibly maps_by_name, - * entries that contain maps. - */ - unsigned int nr_maps; - /** - * @nr_maps_allocated: number of entries in maps_by_address and possibly - * maps_by_name. - */ - unsigned int nr_maps_allocated; - /** - * @last_search_by_name_idx: cache of last found by name entry's index - * as frequent searches for the same dso name are common. - */ - unsigned int last_search_by_name_idx; - /** @maps_by_address_sorted: is maps_by_address sorted. */ - bool maps_by_address_sorted; - /** @maps_by_name_sorted: is maps_by_name sorted. */ - bool maps_by_name_sorted; - /** @ends_broken: does the map contain a map where end values are unset/u= nsorted? */ - bool ends_broken; -}; - #define KMAP_NAME_LEN 256 =20 struct kmap { @@ -100,36 +35,22 @@ static inline void __maps__zput(struct maps **map) =20 #define maps__zput(map) __maps__zput(&map) =20 +bool maps__equal(struct maps *a, struct maps *b); + /* Iterate over map calling cb for each entry. */ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void = *data), void *data); /* Iterate over map removing an entry if cb returns true. */ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void= *data), void *data); =20 -static inline struct machine *maps__machine(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->machine; -} - -static inline unsigned int maps__nr_maps(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->nr_maps; -} - -static inline refcount_t *maps__refcnt(struct maps *maps) -{ - return &RC_CHK_ACCESS(maps)->refcnt; -} +struct machine *maps__machine(const struct maps *maps); +unsigned int maps__nr_maps(const struct maps *maps); +refcount_t *maps__refcnt(struct maps *maps); =20 #ifdef HAVE_LIBUNWIND_SUPPORT -static inline void *maps__addr_space(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->addr_space; -} - -static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_op= s(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; -} +void *maps__addr_space(const struct maps *maps); +void maps__set_addr_space(struct maps *maps, void *addr_space); +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct= maps *maps); +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind= _libunwind_ops *ops); #endif =20 size_t maps__fprintf(struct maps *maps, FILE *fp); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0785a54e832e..35975189999b 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -63,6 +63,16 @@ struct symbol_conf symbol_conf =3D { .res_sample =3D 0, }; =20 +struct map_list_node { + struct list_head node; + struct map *map; +}; + +static struct map_list_node *map_list_node__new(void) +{ + return malloc(sizeof(struct map_list_node)); +} + static enum dso_binary_type binary_type_symtab[] =3D { DSO_BINARY_TYPE__KALLSYMS, DSO_BINARY_TYPE__GUEST_KALLSYMS, diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 89c47a5098e2..c59ab4d79163 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -383,7 +383,7 @@ static int thread__clone_maps(struct thread *thread, st= ruct thread *parent, bool if (thread__pid(thread) =3D=3D thread__pid(parent)) return thread__prepare_access(thread); =20 - if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) { + if (maps__equal(thread__maps(thread), thread__maps(parent))) { pr_debug("broken map groups on thread %d/%d parent %d/%d\n", thread__pid(thread), thread__tid(thread), thread__pid(parent), thread__tid(parent)); diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unw= ind-libunwind-local.c index 228f1565bd0b..b69dc3a447db 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -706,7 +706,7 @@ static int _unwind__prepare_access(struct maps *maps) { void *addr_space =3D unw_create_addr_space(&accessors, 0); =20 - RC_CHK_ACCESS(maps)->addr_space =3D addr_space; + maps__set_addr_space(maps, addr_space); if (!addr_space) { pr_err("unwind: Can't create unwind address space.\n"); return -ENOMEM; diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-li= bunwind.c index 76cd63de80a8..2728eb4f13ea 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -12,11 +12,6 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwi= nd_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; =20 -static void unwind__register_ops(struct maps *maps, struct unwind_libunwin= d_ops *ops) -{ - RC_CHK_ACCESS(maps)->unwind_libunwind_ops =3D ops; -} - int unwind__prepare_access(struct maps *maps, struct map *map, bool *initi= alized) { const char *arch; @@ -60,7 +55,7 @@ int unwind__prepare_access(struct maps *maps, struct map = *map, bool *initialized return 0; } out_register: - unwind__register_ops(maps, ops); + maps__set_unwind_libunwind_ops(maps, ops); =20 err =3D maps__unwind_libunwind_ops(maps)->prepare_access(maps); if (initialized) --=20 2.43.0.rc1.413.gea7ed67945-goog