From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E86B231D36A for ; Fri, 24 Oct 2025 13:25:54 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312355; cv=none; b=cQIyP+UBsLNzD8HbDl7ED0HVHvLRpUpOb7l7YxI6yfqroDdhm1hunwvGJiy+H+C9Qk2xUJZchAikMg2QlWfCRLgZP5BJ8eUYUiePzhFv0ypAWkitzK6kDlILI4B4Tu7WoAmceZYswyfZHrjXfoflg8Gm+DBD2osUWtknPlX1YoA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312355; c=relaxed/simple; bh=dEvEcEaAEAJ00M+doNnzB97L5BEkFu161uwoejGWcZU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=K6EpKirUOKlYJE5RzqIrCf3ELDegV0K1dOCf5AIwd/dt/SBZi6O/Vj6xQSxR2qd3KU45DPEz7vvy+t1fRrgzfl1pX1xdD3YbMEiNo6mis0S0YjGEPJGifgOSXjjafSOEUFW26ZQB9rZXZAY0+MXUNIzN1cRHpscJIqtXs0eGyGk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=e5OQwXqU; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="e5OQwXqU" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 368B0C113D0; Fri, 24 Oct 2025 13:25:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312354; bh=dEvEcEaAEAJ00M+doNnzB97L5BEkFu161uwoejGWcZU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=e5OQwXqUFf0OI0pvGQDfhppEcdukL6zHGrKAptj2NXzAwW7tDly8Uh6QqVcSaEeVb TSGanVRuG4FFMfKGDwf1MfweRad943ipclXuJ8hJm0NJea84GFQKDUHl+zsBtdm7o4 YHPs6W7FTG4dwOqUjsENtadjvnrV50d6yXsKXPWyVrOmWuoxyqau2Ttog2p9fHs1nf l+LfEUH8xcsgFPOxcBLh19DHMBDZZiHr5oKNd5v/WYsaA1hN7BCpQnG5Mxt1F8CXJH UtSDWT1UsZXsLJrcqMscQAMNe7hRSp/ICeaKbV8Z5BrreH0zgDCCX6GpLnFRxG84AM hPEQmkf3wuvrQ== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 1/6] timers/migration: Convert "while" loops to use "for" Date: Fri, 24 Oct 2025 15:25:31 +0200 Message-ID: <20251024132536.39841-2-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Both the "do while" and "while" loops in tmigr_setup_groups() eventually mimic the behaviour of "for" loops. Simplify accordingly. Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index c0c54dc5314c..1e371f1fdc86 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1642,22 +1642,23 @@ static void tmigr_connect_child_parent(struct tmigr= _group *child, static int tmigr_setup_groups(unsigned int cpu, unsigned int node) { struct tmigr_group *group, *child, **stack; - int top =3D 0, err =3D 0, i =3D 0; + int i, top =3D 0, err =3D 0; struct list_head *lvllist; =20 stack =3D kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); if (!stack) return -ENOMEM; =20 - do { + for (i =3D 0; i < tmigr_hierarchy_levels; i++) { group =3D tmigr_get_group(cpu, node, i); if (IS_ERR(group)) { err =3D PTR_ERR(group); + i--; break; } =20 top =3D i; - stack[i++] =3D group; + stack[i] =3D group; =20 /* * When booting only less CPUs of a system than CPUs are @@ -1667,16 +1668,18 @@ static int tmigr_setup_groups(unsigned int cpu, uns= igned int node) * be different from tmigr_hierarchy_levels, contains only a * single group. */ - if (group->parent || list_is_singular(&tmigr_level_list[i - 1])) + if (group->parent || list_is_singular(&tmigr_level_list[i])) break; + } =20 - } while (i < tmigr_hierarchy_levels); + /* Assert single root without parent */ + if (WARN_ON_ONCE(i >=3D tmigr_hierarchy_levels)) + return -EINVAL; + if (WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level= _list[top]))) + return -EINVAL; =20 - /* Assert single root */ - WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level_lis= t[top])); - - while (i > 0) { - group =3D stack[--i]; + for (; i >=3D 0; i--) { + group =3D stack[i]; =20 if (err < 0) { list_del(&group->list); --=20 2.51.0 From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CBFB831D38A for ; Fri, 24 Oct 2025 13:25:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312355; cv=none; b=twbPVgxfNqV1M5gysmvV2mxyF9TjRXEnpafoz16neBIAhTuA9yIXJTPRMMs4F6jFNeo2mHvUKgbieNJwH9/cXCAaB4NkZLTOEXi4Hq8bzEVuYYLihsG/uP9tEwzjCaSJwTT/dCKtDc2jZpsgVWxScGudPoT/3+HXpwpkgVtIhBA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312355; c=relaxed/simple; bh=uJxdgCSnMHyfpl5uMBt8V/1GCH9ZP0OmTlB+dP0I8kg=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=LxVrBuMnH07KE/AwA2qYOrEDfAA0plJcOFuVsU73ftTDVHcUFkLLcUlS/ALTsaBUaDeTpurAW1KnVnnVaVQGQsMhjjwfNZzToeSe3VEfyi+YEVBuLYtHx4IBBwWsnqRvbLu1kPQl1sT8mIXrJrmDh4sp289VVZep/pWUSjCP4m0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=UZsEUDcL; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="UZsEUDcL" Received: by smtp.kernel.org (Postfix) with ESMTPSA id AAB10C4CEFF; Fri, 24 Oct 2025 13:25:54 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312355; bh=uJxdgCSnMHyfpl5uMBt8V/1GCH9ZP0OmTlB+dP0I8kg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=UZsEUDcLn2pExYlovMDAwTy/Wp0Af+/RrOcnjdVBtikpjBve5mNYsPMA/VWxEMxDB GjDGtZl+1rY8t9IIWac2P18kUqK7+gr6pvOptLsT2EdrozJm8sR7J5bNTn00KsAz69 V7aPwp5KmoNMP6WKCUy5dxTV1diTTy8+RlezyLm24S4EI/qvSJhVA1ONcfv0cgci7h 8ZQYgaKD21lWF0ZRrDPaMCFGhoxYtU0HgZ/PuundHHF0yAaaqwqVjGkctimaNZhpyM E7vc3osgxv5W3+kiQhgBuRCjKQL/eUxmSSF19RDPU4MxRl0WNU435xwNF0UPAC0luw aborjEyRtST7A== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 2/6] timers/migration: Remove locking on group connection Date: Fri, 24 Oct 2025 15:25:32 +0200 Message-ID: <20251024132536.39841-3-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Initializing the tmc's group, the group's number of children and the group's parent can all be done without locking because: 1) Reading the group's parent and its groupmask is done locklessly. 2) The connections prepared for a given CPU hierarchy are visible to the target CPU once online, thanks to the CPU hotplug enforced memory ordering. 3) In case of a newly created upper level, the new root and its connections/initializations are made visible by the CPU which made the connections. When that CPUs goes idle in the future, the new link is published by tmigr_inactive_up() through the atomic RmW on ->migr_state. 4) If CPUs were still walking up the active hierarchy, they could observe the new root earlier. In this case the ordering is enforced by an early initialization of the group mask and by barriers that maintain address dependency as explained in: b729cc1ec21a ("timers/migration: Fix another race between hotplug and i= dle entry/exit") de3ced72a792 ("timers/migration: Enforce group initialization visibility t= o tree walkers") 5) Timers are propagated by a chain of group locking from the bottom to the top. And while doing so, the tree also propagates groups links and initializations. Therefore remote expiration, which also relies on group locking, will observe those links and initialization while holding the root lock before walking the tree remotely and update remote timers. This is especially important for migrators in the active hierarchy that may observe the new root early. Therefore the locking is unecesserary at initialization. If anything, it just brings confusion. Remove it. Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 1e371f1fdc86..5f8aef94ca0f 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1573,9 +1573,6 @@ static void tmigr_connect_child_parent(struct tmigr_g= roup *child, { struct tmigr_walk data; =20 - raw_spin_lock_irq(&child->lock); - raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); - if (activate) { /* * @child is the old top and @parent the new one. In this @@ -1596,9 +1593,6 @@ static void tmigr_connect_child_parent(struct tmigr_g= roup *child, */ smp_store_release(&child->parent, parent); =20 - raw_spin_unlock(&parent->lock); - raw_spin_unlock_irq(&child->lock); - trace_tmigr_connect_child_parent(child); =20 if (!activate) @@ -1695,13 +1689,9 @@ static int tmigr_setup_groups(unsigned int cpu, unsi= gned int node) if (i =3D=3D 0) { struct tmigr_cpu *tmc =3D per_cpu_ptr(&tmigr_cpu, cpu); =20 - raw_spin_lock_irq(&group->lock); - tmc->tmgroup =3D group; tmc->groupmask =3D BIT(group->num_children++); =20 - raw_spin_unlock_irq(&group->lock); - trace_tmigr_connect_cpu_parent(tmc); =20 /* There are no children that need to be connected */ --=20 2.51.0 From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A37AA31D735 for ; Fri, 24 Oct 2025 13:25:57 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312358; cv=none; b=HyVQYJJ8I2FhJmZfxlW4HRiyFEGAmP3kAA27fChmmaTgNLPKXUUPu8desmZ3D31p3cvgydmqZrGpVcp2afMmr8ci3iD5UVT0gIpWNz6wSUF+eKjPt8LOR0E+dyYBaR2h25x1CwKTt54Lwo9VqY3Hw2165dGHP9F6S8rJdydtz30= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312358; c=relaxed/simple; bh=irB9aTJNJxPQEQmFjs1xlRPckhqcedUk8MTId94ZJfE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=XehFCUwgUDqBZgTaU9UVlT9a2kL0G2WTExh6RqXhKTk/rqWST77C8p3w15su4aVuZPhiYKrN15Aerql5L2opFWXN2TCU19p3yiHRifRr89QbYaEbGtV6fkY2i0CGQkM8UecH80Qfev2uBAbXXkFSQPaKlpzOYAbXrmg5npwYpPE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=sQtJ8PW3; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="sQtJ8PW3" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 31C18C4CEF1; Fri, 24 Oct 2025 13:25:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312357; bh=irB9aTJNJxPQEQmFjs1xlRPckhqcedUk8MTId94ZJfE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=sQtJ8PW3x3kpeK/sii5QA+78Nn1/5uXHXLQ2M1BqNFSx0yXcTDg4H3MrJd1Jr85oP qemebu71nncsL7zxQmFeBwjxXhGjFPBMSlLFo12evI+AtUN5tvJpeABLyuF/5Y8Rj+ yUA7O/vKGUrjjck886osViRY0XR+7q616tkmBj/+dtCkrhzhlCZOJpwKbMQj5AmOae gJJIp98d0f1QKtY/sEyWN2QcIlnwZz3ao3CR52C2+nUcPKQENG25L5qLPDnHggLXzq bEqqXZTrB7X/WoE3pEz45iStF4X9+9XvhJbOAGw8vG1OScMM2XM1VZabfNtVnJVXVM iz2Blrul3kz+w== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 3/6] timers/migration: Fix imbalanced NUMA trees Date: Fri, 24 Oct 2025 15:25:33 +0200 Message-ID: <20251024132536.39841-4-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" When a CPU from a new node boots, the old root may happen to be connected to the new root even if their node mismatch, as depicted in the following scenario: 1) CPU 0 boots and creates the first group for node 0. [GRP0:0] node 0 | CPU 0 2) CPU 1 from node 1 boots and creates a new top that corresponds to node 1, but it also connects the old root from node 0 to the new root from node 1 by mistake. [GRP1:0] node 1 / \ / \ [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0 CPU 1 3) This eventually leads to an imbalanced tree where some node 0 CPUs migrate node 1 timers (and vice versa) way before reaching the crossnode groups, resulting in more frequent remote memory accesses than expected. [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:1] node 1 node 0 / \ | / \ [...] [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0... CPU 1... A balanced tree should only contain groups having children that belong to the same node: [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:0] node 0 node 1 / \ / \ / \ / \ [GRP0:0] [...] [...] [GRP0:1] node 0 node 1 | | CPU 0... CPU 1... In order to fix this, the hierarchy must be unfolded up to the crossnode level as soon as a node mismatch is detected. For example the stage 2 above should lead to this layout: [GRP2:0] NUMA_NO_NODE / \ [GRP1:0] [GRP1:1] node 0 node 1 / \ / \ [GRP0:0] [GRP0:1] node 0 node 1 | | CPU 0 CPU 1 This means that not only GRP1:0 must be created but also GRP1:1 and GRP2:0 in order to prepare a balanced tree for next CPUs to boot. Fixes: 7ee988770326 ("timers: Implement the hierarchical pull model") Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 231 +++++++++++++++++++--------------- 1 file changed, 127 insertions(+), 104 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 5f8aef94ca0f..49635a2b7ee2 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -420,6 +420,8 @@ static struct list_head *tmigr_level_list __read_mostly; static unsigned int tmigr_hierarchy_levels __read_mostly; static unsigned int tmigr_crossnode_level __read_mostly; =20 +static struct tmigr_group *tmigr_root; + static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); =20 #define TMIGR_NONE 0xFF @@ -522,11 +524,9 @@ struct tmigr_walk { =20 typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tm= igr_walk *); =20 -static void __walk_groups(up_f up, struct tmigr_walk *data, - struct tmigr_cpu *tmc) +static void __walk_groups_from(up_f up, struct tmigr_walk *data, + struct tmigr_group *child, struct tmigr_group *group) { - struct tmigr_group *child =3D NULL, *group =3D tmc->tmgroup; - do { WARN_ON_ONCE(group->level >=3D tmigr_hierarchy_levels); =20 @@ -544,6 +544,12 @@ static void __walk_groups(up_f up, struct tmigr_walk *= data, } while (group); } =20 +static void __walk_groups(up_f up, struct tmigr_walk *data, + struct tmigr_cpu *tmc) +{ + __walk_groups_from(up, data, NULL, tmc->tmgroup); +} + static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu= *tmc) { lockdep_assert_held(&tmc->lock); @@ -1498,21 +1504,6 @@ static void tmigr_init_group(struct tmigr_group *gro= up, unsigned int lvl, s.seq =3D 0; atomic_set(&group->migr_state, s.state); =20 - /* - * If this is a new top-level, prepare its groupmask in advance. - * This avoids accidents where yet another new top-level is - * created in the future and made visible before the current groupmask. - */ - if (list_empty(&tmigr_level_list[lvl])) { - group->groupmask =3D BIT(0); - /* - * The previous top level has prepared its groupmask already, - * simply account it as the first child. - */ - if (lvl > 0) - group->num_children =3D 1; - } - timerqueue_init_head(&group->events); timerqueue_init(&group->groupevt.nextevt); group->groupevt.nextevt.expires =3D KTIME_MAX; @@ -1567,22 +1558,51 @@ static struct tmigr_group *tmigr_get_group(unsigned= int cpu, int node, return group; } =20 +static bool tmigr_init_root(struct tmigr_group *group, bool activate) +{ + if (!group->parent && group !=3D tmigr_root) { + /* + * This is the new top-level, prepare its groupmask in advance + * to avoid accidents where yet another new top-level is + * created in the future and made visible before this groupmask. + */ + group->groupmask =3D BIT(0); + WARN_ON_ONCE(activate); + + return true; + } + + return false; + +} + static void tmigr_connect_child_parent(struct tmigr_group *child, struct tmigr_group *parent, bool activate) { - struct tmigr_walk data; + if (tmigr_init_root(parent, activate)) { + /* + * The previous top level had prepared its groupmask already, + * simply account it in advance as the first child. If some groups + * have been created between the old and new root due to node + * mismatch, the new root's child will be intialized accordingly. + */ + parent->num_children =3D 1; + } =20 - if (activate) { + /* Connecting old root to new root ? */ + if (!parent->parent && activate) { /* - * @child is the old top and @parent the new one. In this - * case groupmask is pre-initialized and @child already - * accounted, along with its new sibling corresponding to the - * CPU going up. + * @child is the old top, or in case of node mismatch, some + * intermediate group between the old top and the new one in + * @parent. In this case the @child must be pre-accounted above + * as the first child. Its new inactive sibling corresponding + * to the CPU going up has been accounted as the second child. */ - WARN_ON_ONCE(child->groupmask !=3D BIT(0) || parent->num_children !=3D 2= ); + WARN_ON_ONCE(parent->num_children !=3D 2); + child->groupmask =3D BIT(0); } else { - /* Adding @child for the CPU going up to @parent. */ + /* Common case adding @child for the CPU going up to @parent. */ child->groupmask =3D BIT(parent->num_children++); } =20 @@ -1594,56 +1614,28 @@ static void tmigr_connect_child_parent(struct tmigr= _group *child, smp_store_release(&child->parent, parent); =20 trace_tmigr_connect_child_parent(child); - - if (!activate) - return; - - /* - * To prevent inconsistent states, active children need to be active in - * the new parent as well. Inactive children are already marked inactive - * in the parent group: - * - * * When new groups were created by tmigr_setup_groups() starting from - * the lowest level (and not higher then one level below the current - * top level), then they are not active. They will be set active when - * the new online CPU comes active. - * - * * But if a new group above the current top level is required, it is - * mandatory to propagate the active state of the already existing - * child to the new parent. So tmigr_connect_child_parent() is - * executed with the formerly top level group (child) and the newly - * created group (parent). - * - * * It is ensured that the child is active, as this setup path is - * executed in hotplug prepare callback. This is exectued by an - * already connected and !idle CPU. Even if all other CPUs go idle, - * the CPU executing the setup will be responsible up to current top - * level group. And the next time it goes inactive, it will release - * the new childmask and parent to subsequent walkers through this - * @child. Therefore propagate active state unconditionally. - */ - data.childmask =3D child->groupmask; - - /* - * There is only one new level per time (which is protected by - * tmigr_mutex). When connecting the child and the parent and set the - * child active when the parent is inactive, the parent needs to be the - * uppermost level. Otherwise there went something wrong! - */ - WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); } =20 -static int tmigr_setup_groups(unsigned int cpu, unsigned int node) +static int tmigr_setup_groups(unsigned int cpu, unsigned int node, + struct tmigr_group *start, bool activate) { struct tmigr_group *group, *child, **stack; - int i, top =3D 0, err =3D 0; - struct list_head *lvllist; + int i, top =3D 0, err =3D 0, start_lvl =3D 0; + bool root_mismatch =3D false; =20 stack =3D kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); if (!stack) return -ENOMEM; =20 - for (i =3D 0; i < tmigr_hierarchy_levels; i++) { + if (start) { + stack[start->level] =3D start; + start_lvl =3D start->level + 1; + } + + if (tmigr_root) + root_mismatch =3D tmigr_root->numa_node !=3D node; + + for (i =3D start_lvl; i < tmigr_hierarchy_levels; i++) { group =3D tmigr_get_group(cpu, node, i); if (IS_ERR(group)) { err =3D PTR_ERR(group); @@ -1656,23 +1648,25 @@ static int tmigr_setup_groups(unsigned int cpu, uns= igned int node) =20 /* * When booting only less CPUs of a system than CPUs are - * available, not all calculated hierarchy levels are required. + * available, not all calculated hierarchy levels are required, + * unless a node mismatch is detected. * * The loop is aborted as soon as the highest level, which might * be different from tmigr_hierarchy_levels, contains only a - * single group. + * single group, unless the nodes mismatch below tmigr_crossnode_level */ - if (group->parent || list_is_singular(&tmigr_level_list[i])) + if (group->parent) + break; + if ((!root_mismatch || i >=3D tmigr_crossnode_level) && + list_is_singular(&tmigr_level_list[i])) break; } =20 /* Assert single root without parent */ if (WARN_ON_ONCE(i >=3D tmigr_hierarchy_levels)) return -EINVAL; - if (WARN_ON_ONCE(!err && !group->parent && !list_is_singular(&tmigr_level= _list[top]))) - return -EINVAL; =20 - for (; i >=3D 0; i--) { + for (; i >=3D start_lvl; i--) { group =3D stack[i]; =20 if (err < 0) { @@ -1692,48 +1686,63 @@ static int tmigr_setup_groups(unsigned int cpu, uns= igned int node) tmc->tmgroup =3D group; tmc->groupmask =3D BIT(group->num_children++); =20 + tmigr_init_root(group, activate); + trace_tmigr_connect_cpu_parent(tmc); =20 /* There are no children that need to be connected */ continue; } else { child =3D stack[i - 1]; - /* Will be activated at online time */ - tmigr_connect_child_parent(child, group, false); + tmigr_connect_child_parent(child, group, activate); } + } =20 - /* check if uppermost level was newly created */ - if (top !=3D i) - continue; + if (err < 0) + goto out; =20 - WARN_ON_ONCE(top =3D=3D 0); - - lvllist =3D &tmigr_level_list[top]; + if (activate) { + struct tmigr_walk data; =20 /* - * Newly created root level should have accounted the upcoming - * CPU's child group and pre-accounted the old root. + * To prevent inconsistent states, active children need to be active in + * the new parent as well. Inactive children are already marked inactive + * in the parent group: + * + * * When new groups were created by tmigr_setup_groups() starting from + * the lowest level, then they are not active. They will be set active + * when the new online CPU comes active. + * + * * But if new groups above the current top level are required, it is + * mandatory to propagate the active state of the already existing + * child to the new parents. So tmigr_active_up() activates the + * new parents while walking up from the old root to the new. + * + * * It is ensured that @start is active, as this setup path is + * executed in hotplug prepare callback. This is executed by an + * already connected and !idle CPU. Even if all other CPUs go idle, + * the CPU executing the setup will be responsible up to current top + * level group. And the next time it goes inactive, it will release + * the new childmask and parent to subsequent walkers through this + * @child. Therefore propagate active state unconditionally. */ - if (group->num_children =3D=3D 2 && list_is_singular(lvllist)) { - /* - * The target CPU must never do the prepare work, except - * on early boot when the boot CPU is the target. Otherwise - * it may spuriously activate the old top level group inside - * the new one (nevertheless whether old top level group is - * active or not) and/or release an uninitialized childmask. - */ - WARN_ON_ONCE(cpu =3D=3D raw_smp_processor_id()); + WARN_ON_ONCE(!start->parent); + data.childmask =3D start->groupmask; + __walk_groups_from(tmigr_active_up, &data, start, start->parent); + } =20 - lvllist =3D &tmigr_level_list[top - 1]; - list_for_each_entry(child, lvllist, list) { - if (child->parent) - continue; - - tmigr_connect_child_parent(child, group, true); - } + /* Root update */ + if (list_is_singular(&tmigr_level_list[top])) { + group =3D list_first_entry(&tmigr_level_list[top], + typeof(*group), list); + WARN_ON_ONCE(group->parent); + if (tmigr_root) { + /* Old root should be the same or below */ + WARN_ON_ONCE(tmigr_root->level > top); } + tmigr_root =3D group; } - +out: kfree(stack); =20 return err; @@ -1741,12 +1750,26 @@ static int tmigr_setup_groups(unsigned int cpu, uns= igned int node) =20 static int tmigr_add_cpu(unsigned int cpu) { + struct tmigr_group *old_root =3D tmigr_root; int node =3D cpu_to_node(cpu); int ret; =20 - mutex_lock(&tmigr_mutex); - ret =3D tmigr_setup_groups(cpu, node); - mutex_unlock(&tmigr_mutex); + guard(mutex)(&tmigr_mutex); + + ret =3D tmigr_setup_groups(cpu, node, NULL, false); + + /* Root has changed? Connect the old one to the new */ + if (ret >=3D 0 && old_root && old_root !=3D tmigr_root) { + /* + * The target CPU must never do the prepare work, except + * on early boot when the boot CPU is the target. Otherwise + * it may spuriously activate the old top level group inside + * the new one (nevertheless whether old top level group is + * active or not) and/or release an uninitialized childmask. + */ + WARN_ON_ONCE(cpu =3D=3D raw_smp_processor_id()); + ret =3D tmigr_setup_groups(-1, old_root->numa_node, old_root, true); + } =20 return ret; } --=20 2.51.0 From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1CBB631DDA0 for ; Fri, 24 Oct 2025 13:25:58 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312359; cv=none; b=i/0OU0Y71/yj8z4Dc8+qRiJq8m8ukzMvb2wLltKOBPh0W6ytSVHPcEEK8vD5PdW+4jI1Ogwlgl439uPEFUPNKGU/YAG7jARdYnITY3QIK+QgMlZrAHxw1Ih4keTIsMuEbmeMaVsblNyGxreIGOtJRP7T/CuGqTrWGYgWE0VdFoo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312359; c=relaxed/simple; bh=1k6GPfiODPV2ef9SPd16iFtuNoH+CMuvx4kCvmktKNQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=P47mcCeSXc7T2qqIF6juhBMdSz4lUXHADapvqXiu/m+eo2QAeUNxQdUmtP2CDyeA06gLiBaqpddv6la6nKIbA06Q36tih4EeuTP+necdM8mgwFxYcWPOSpnGxo5NS+YMKqAvKjEm+DUPSxMkGMlkY6injkf+cNEM/O50sUk4pHo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iF+FP/8+; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iF+FP/8+" Received: by smtp.kernel.org (Postfix) with ESMTPSA id A325FC4CEF5; Fri, 24 Oct 2025 13:25:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312358; bh=1k6GPfiODPV2ef9SPd16iFtuNoH+CMuvx4kCvmktKNQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iF+FP/8+eDHFA/VrzHc9LGjGqOLUIo2kNsUGNGN13bKhhIhgHdqoQ1yknILKXXleo 8fSWGywDW4VB5HwDTXp0FSYViPpwe9G46UTzRNj0R+EOP1rm8RfW8lcV1n+/WrG1gJ V4qB4oc2IMUP4uJYPVAI+iVGaGx76jGFiAg7/BrndlEU8QUv4rr6PTlk80jzUNkFeR iH8tSnFhAZ2WAhzy16skQOm8WOa7BuuvKlJSl4/kTcR9rvtSHtT9nMHMyM2wTyFGBK 0tUyX8zApIRiOK6m4v5eITgR7y1s6jJi2J6DLyh0fCLjk56NmnMwjzmGP5UWXxc6NH hL4nMq/5ememA== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 4/6] timers/migration: Assert that hotplug preparing CPU is part of stable active hierarchy Date: Fri, 24 Oct 2025 15:25:34 +0200 Message-ID: <20251024132536.39841-5-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The CPU doing the prepare work for a remote target must be online from the tree point of view and its hierarchy must be active, otherwise propagating its active state up to the new root branch would be either incorrect or racy. Assert those conditions with more sanity checks. Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 49635a2b7ee2..bddd816faaeb 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1703,6 +1703,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsig= ned int node, =20 if (activate) { struct tmigr_walk data; + union tmigr_state state; =20 /* * To prevent inconsistent states, active children need to be active in @@ -1726,6 +1727,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsig= ned int node, * the new childmask and parent to subsequent walkers through this * @child. Therefore propagate active state unconditionally. */ + state.state =3D atomic_read(&start->migr_state); + WARN_ON_ONCE(!state.active); WARN_ON_ONCE(!start->parent); data.childmask =3D start->groupmask; __walk_groups_from(tmigr_active_up, &data, start, start->parent); @@ -1768,6 +1771,11 @@ static int tmigr_add_cpu(unsigned int cpu) * active or not) and/or release an uninitialized childmask. */ WARN_ON_ONCE(cpu =3D=3D raw_smp_processor_id()); + /* + * The (likely) current CPU is expected to be online in the hierarchy, + * otherwise the old root may not be active as expected. + */ + WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->online); ret =3D tmigr_setup_groups(-1, old_root->numa_node, old_root, true); } =20 --=20 2.51.0 From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9FB1B31E0FD for ; Fri, 24 Oct 2025 13:26:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312360; cv=none; b=S9SddsHz+cw40yWmfnY58cka5TgyFU+XjBpCzTc92iz+TyoZZB0Yp6kXnH/Ev8dzPPrIYVX19o9WhcI4US72bLuumvBoWZvM+WbzzmG/E+XXx68R3O8+RisQY/wgkPZHFzwcak/rkEDaoZWWan5Tb6S7eOzNrB/O+MyIQLEOmdc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312360; c=relaxed/simple; bh=fY1ij3id1DmAx5iQpPb3kIdcEzsoQu3dp77o1unz2j4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=E+7YXHd1oOJJAqk/8T1jMJvShN5AP2ra8Diy0D5weVDOHjOxOF/TrwUVB3D1BQ3fguxqQZ3WpOdY9eZYe8aLOn5uC0oztEwCuOQBHJBBcOXjOJ62+Gau9SP52TztRd5KALCr2vOtx2HPnLNcVO70wWiZNgsa1qPEXYzKeYNKi5c= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=IJQxfl9U; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="IJQxfl9U" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 12C0BC4CEFF; Fri, 24 Oct 2025 13:25:58 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312360; bh=fY1ij3id1DmAx5iQpPb3kIdcEzsoQu3dp77o1unz2j4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=IJQxfl9UCaK0s46rGdxuoczHrOPUep51SUbri8lupcN82mCmB2oncyAl9MgtEe5V1 9NLzXJapBrM17B+cXA+H7fX4BNSJs+c6MHT/+3CJHTKPGwQCyrnSbbUiVQnLAg1U9+ 64AZq5tDSZDzqTpgRtH3c/AyVYd++O7xN/2uQdM/rIdqaz02NgMGyz0snGgIiSiPcD 0R+N9OEfEOy3mrx5omr5/gs0+sSNTmc3aQ2/3PZAsyAjBrPE+ST/BNujPWw4ic+RUS vxAlAkIsIARTFTvXAM20Zs9rjMsG+48DwaZbgHXK7OY+6ALocWMJpOGe83O1VI8x4R +mdB8SS+nPoaw== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 5/6] timers/migration: Remove unused "cpu" parameter from tmigr_get_group() Date: Fri, 24 Oct 2025 15:25:35 +0200 Message-ID: <20251024132536.39841-6-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index bddd816faaeb..73d9b0648116 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1511,8 +1511,7 @@ static void tmigr_init_group(struct tmigr_group *grou= p, unsigned int lvl, group->groupevt.ignore =3D true; } =20 -static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, - unsigned int lvl) +static struct tmigr_group *tmigr_get_group(int node, unsigned int lvl) { struct tmigr_group *tmp, *group =3D NULL; =20 @@ -1636,7 +1635,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsig= ned int node, root_mismatch =3D tmigr_root->numa_node !=3D node; =20 for (i =3D start_lvl; i < tmigr_hierarchy_levels; i++) { - group =3D tmigr_get_group(cpu, node, i); + group =3D tmigr_get_group(node, i); if (IS_ERR(group)) { err =3D PTR_ERR(group); i--; --=20 2.51.0 From nobody Sat Feb 7 21:53:03 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E7388320A1A for ; Fri, 24 Oct 2025 13:26:01 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312362; cv=none; b=qdw0TFGyqAYsRxaNCOvRbVPZvTxuMEDxs5U9WNwOo6QYGxKmIme7lmV/RNw9MiquM/id4l/m2Tlp08A+UJRnC25GRfmUMKDV8T7AfJhRo6DOmAMS2Z8QRsHj/a8cBQzr/cbjmAhNTvMzJ2zXj9hoREQnOfgRxksnerA+ZwMUe/w= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1761312362; c=relaxed/simple; bh=lNiRZwtesmiFcKiHJWflQehWlUXukBZMLmjaD5n1mCc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=FIqqgxA0nQBpCj59UA/RyrlNdxD8MYZf+3Ygixh1AHsbgjjqVKTOGLGZUCtvXBxsBkxHaLZTCM/8r4cOBYzhpU9bV6wUvvd1lCjyCT8DQcZM8riLeW6pxnUxxMfCalqnl6gg0JcKK7h2/phdW13ZPdbXF8ctrTFmbN9sh8l7Vwc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Q82RxeHx; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Q82RxeHx" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 87D59C4CEF1; Fri, 24 Oct 2025 13:26:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1761312361; bh=lNiRZwtesmiFcKiHJWflQehWlUXukBZMLmjaD5n1mCc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Q82RxeHxgjGxZWN16Dqhqw/nHK0LGV9qYpFai9Snpj6Z82SWY8dNfkYrhAr0qYOHe EGJ/5BfIJeKWfP3o5QXsLmkN2QAZuh2jHplSheGpTZu8uYy/S3dO+DwMiUvsELQzbn 1zwnYi+oNPbiN31KfMi2KwoBLgk/enZ2YgBbFL7xzw+ybt8QCSFh8ZOmpuT+kp8K6g 2iC1Kcc4HgiBeT8jjb+vTf+UebadGw9+EWnxkl5vmPYHHol+LOPhuIvmBMBpiKgDKe Yo34K00ptfSK3SSyGL3oGQgjLhDvWtV3RgwdCH0ZDSh3FJo3YCLRL4scxbCrNa2aT5 jLvNh8PSFzyPw== From: Frederic Weisbecker To: Thomas Gleixner Cc: LKML , Frederic Weisbecker , Anna-Maria Behnsen Subject: [PATCH 6/6] timers/migration: Remove dead code handling idle CPU checking for remote timers Date: Fri, 24 Oct 2025 15:25:36 +0200 Message-ID: <20251024132536.39841-7-frederic@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251024132536.39841-1-frederic@kernel.org> References: <20251024132536.39841-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Idle migrators don't walk the whole tree in order to find out if there are timers to migrate because they recorded the next deadline to be verified within a single check in tmigr_requires_handle_remote(). Remove the related dead code and data. Signed-off-by: Frederic Weisbecker --- kernel/time/timer_migration.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 73d9b0648116..19ddfa96b9df 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -504,11 +504,6 @@ static bool tmigr_check_lonely(struct tmigr_group *gro= up) * @now: timer base monotonic * @check: is set if there is the need to handle remote timers; * required in tmigr_requires_handle_remote() only - * @tmc_active: this flag indicates, whether the CPU which triggers - * the hierarchy walk is !idle in the timer migration - * hierarchy. When the CPU is idle and the whole hierarchy is - * idle, only the first event of the top level has to be - * considered. */ struct tmigr_walk { u64 nextexp; @@ -519,7 +514,6 @@ struct tmigr_walk { unsigned long basej; u64 now; bool check; - bool tmc_active; }; =20 typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tm= igr_walk *); @@ -1119,15 +1113,6 @@ static bool tmigr_requires_handle_remote_up(struct t= migr_group *group, */ if (!tmigr_check_migrator(group, childmask)) return true; - - /* - * When there is a parent group and the CPU which triggered the - * hierarchy walk is not active, proceed the walk to reach the top level - * group before reading the next_expiry value. - */ - if (group->parent && !data->tmc_active) - return false; - /* * The lock is required on 32bit architectures to read the variable * consistently with a concurrent writer. On 64bit the lock is not @@ -1172,7 +1157,6 @@ bool tmigr_requires_handle_remote(void) data.now =3D get_jiffies_update(&jif); data.childmask =3D tmc->groupmask; data.firstexp =3D KTIME_MAX; - data.tmc_active =3D !tmc->idle; data.check =3D false; =20 /* --=20 2.51.0