From nobody Sun Dec 14 11:17:27 2025 Delivered-To: importer@patchew.org Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail(p=reject dis=none) header.from=rsg.ci.i.u-tokyo.ac.jp Return-Path: Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) by mx.zohomail.com with SMTPS id 1765604592195133.54812340612568; Fri, 12 Dec 2025 21:43:12 -0800 (PST) Received: from localhost ([::1] helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1vUIOA-00061C-Lv; Sat, 13 Dec 2025 00:41:55 -0500 Received: from eggs.gnu.org ([2001:470:142:3::10]) by lists.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1vUIO7-000604-6L for qemu-devel@nongnu.org; Sat, 13 Dec 2025 00:41:51 -0500 Received: from www3579.sakura.ne.jp ([49.212.243.89]) by eggs.gnu.org with esmtps (TLS1.2:ECDHE_RSA_AES_256_GCM_SHA384:256) (Exim 4.90_1) (envelope-from ) id 1vUIO4-0002lt-9K for qemu-devel@nongnu.org; Sat, 13 Dec 2025 00:41:50 -0500 Received: from [10.200.7.128] (fs96f9c361.tkyc007.ap.nuro.jp [150.249.195.97]) (authenticated bits=0) by www3579.sakura.ne.jp (8.16.1/8.16.1) with ESMTPSA id 5BD5fMh9021206 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NO); Sat, 13 Dec 2025 14:41:36 +0900 (JST) (envelope-from odaki@rsg.ci.i.u-tokyo.ac.jp) DKIM-Signature: a=rsa-sha256; bh=xW6nxdP1WnT6uah7iD4+3rpmvC/VKj++w+/6JQL3bzo=; c=relaxed/relaxed; d=rsg.ci.i.u-tokyo.ac.jp; h=From:Date:Subject:Message-Id:To; s=rs20250326; t=1765604496; v=1; b=DPCjaOYbJNdA884t7C2pVUZuDdn2oMm5X3F59j1fCQIi8NDHtK5hWPgFnzwZzW2A 3lKfLtoclGG7mDcFygDB+ajCyAWB20tGLJVJD6LiNTTTQNmACsWVaDmVbCFWSgP+ y5H11yDAvevV1PHVx9XbvYGB9VCAyCJGJH11ZKCgywn//VZSxu2/V85ss0q/rNEG y+YDFuYFws+QirK9tuQKaVSpk+o0JX/UwVZlNmxwkKzXTg4NEmK9Fb4jcYk/FEx8 mNIpWcyLLJO2SjWfbjdTEMVcohWlN7FUV59qlIJvbfT0QPM+FzXyw8SoM9Bg0gND 4GrqpfElXLV/TV0zJJFRug== From: Akihiko Odaki Date: Sat, 13 Dec 2025 14:41:24 +0900 Subject: [PATCH v2 5/6] rcu: Wake the RCU thread when draining MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20251213-force_rcu-v2-5-1de1ca84c6d6@rsg.ci.i.u-tokyo.ac.jp> References: <20251213-force_rcu-v2-0-1de1ca84c6d6@rsg.ci.i.u-tokyo.ac.jp> In-Reply-To: <20251213-force_rcu-v2-0-1de1ca84c6d6@rsg.ci.i.u-tokyo.ac.jp> To: qemu-devel@nongnu.org, Dmitry Osipenko Cc: Paolo Bonzini , "Michael S. Tsirkin" , =?utf-8?q?Alex_Benn=C3=A9e?= , Akihiko Odaki X-Mailer: b4 0.15-dev-179e8 Received-SPF: pass (zohomail.com: domain of gnu.org designates 209.51.188.17 as permitted sender) client-ip=209.51.188.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Received-SPF: pass client-ip=49.212.243.89; envelope-from=odaki@rsg.ci.i.u-tokyo.ac.jp; helo=www3579.sakura.ne.jp X-Spam_score_int: -16 X-Spam_score: -1.7 X-Spam_bar: - X-Spam_report: (-1.7 / 5.0 requ) BAYES_00=-1.9, DKIM_INVALID=0.1, DKIM_SIGNED=0.1, RCVD_IN_VALIDITY_RPBL_BLOCKED=0.001, RCVD_IN_VALIDITY_SAFE_BLOCKED=0.001, SPF_HELO_NONE=0.001, SPF_PASS=-0.001 autolearn=no autolearn_force=no X-Spam_action: no action X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: qemu-devel-bounces+importer=patchew.org@nongnu.org X-ZM-MESSAGEID: 1765604592616158500 drain_call_rcu() triggers the force quiescent state, but it can be delayed if the RCU thread is sleeping. Ensure the force quiescent state is immediately triggered by waking the RCU thread up. The logic to trigger the force quiescent state is decoupled as force_rcu() so that it can be used independently. Signed-off-by: Akihiko Odaki --- include/qemu/rcu.h | 1 + util/rcu.c | 51 +++++++++++++++++++++++++++++++-------------------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 020dbe4d8b77..d6aa4e5854d3 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -118,6 +118,7 @@ static inline void rcu_read_unlock(void) } } =20 +void force_rcu(void); void synchronize_rcu(void); =20 /* diff --git a/util/rcu.c b/util/rcu.c index 3c4af9d213c8..2f58f3627f74 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -45,12 +45,14 @@ =20 =20 #define RCU_CALL_MIN_SIZE 30 +#define RCU_CALL_STATE_FORCED ((uint32_t)1 << 31) +#define RCU_CALL_STATE_COUNT (~RCU_CALL_STATE_FORCED) =20 unsigned long rcu_gp_ctr =3D RCU_GP_LOCKED; =20 QemuEvent rcu_gp_event; -static int in_drain_call_rcu; -static int rcu_call_count; +static QemuEvent rcu_call_force_event; +static uint32_t rcu_call_state; static QemuMutex rcu_registry_lock; =20 /* @@ -74,28 +76,35 @@ QEMU_DEFINE_CO_TLS(struct rcu_reader_data, rcu_reader) typedef QLIST_HEAD(, rcu_reader_data) ThreadList; static ThreadList registry =3D QLIST_HEAD_INITIALIZER(registry); =20 +void force_rcu(void) +{ + qatomic_or(&rcu_call_state, RCU_CALL_STATE_FORCED); + qemu_event_set(&rcu_call_force_event); +} + /* Wait for previous parity/grace period to be empty of readers. */ -static void wait_for_readers(void) +static void wait_for_readers(bool force) { ThreadList qsreaders =3D QLIST_HEAD_INITIALIZER(qsreaders); struct rcu_reader_data *index, *tmp; - int sleeps =3D 0; + int sleeps =3D force ? 0 : 5; bool forced =3D false; =20 + qemu_event_reset(&rcu_call_force_event); + for (;;) { /* * Force the grace period to end and wait for it if any of the * following heuristical conditions are satisfied: * - A decent number of callbacks piled up. + * - force_rcu() was called. * - It timed out. - * - It is in a drain_call_rcu() call. * * Otherwise, periodically poll the grace period, hoping it ends * promptly. */ if (!forced && - (qatomic_read(&rcu_call_count) >=3D RCU_CALL_MIN_SIZE || - sleeps >=3D 5 || qatomic_read(&in_drain_call_rcu))) { + (qatomic_read(&rcu_call_state) >=3D RCU_CALL_MIN_SIZE || !slee= ps)) { forced =3D true; =20 QLIST_FOREACH(index, ®istry, node) { @@ -159,8 +168,8 @@ static void wait_for_readers(void) */ qemu_event_reset(&rcu_gp_event); } else { - g_usleep(10000); - sleeps++; + qemu_event_timedwait(&rcu_call_force_event, 10); + sleeps--; } =20 qemu_mutex_lock(&rcu_registry_lock); @@ -170,7 +179,7 @@ static void wait_for_readers(void) QLIST_SWAP(®istry, &qsreaders, node); } =20 -static void enter_qs(void) +static void enter_qs(bool force) { /* Write RCU-protected pointers before reading p_rcu_reader->ctr. * Pairs with smp_mb_placeholder() in rcu_read_lock(). @@ -189,14 +198,14 @@ static void enter_qs(void) * Switch parity: 0 -> 1, 1 -> 0. */ qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); - wait_for_readers(); + wait_for_readers(force); qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); } else { /* Increment current grace period. */ qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); } =20 - wait_for_readers(); + wait_for_readers(force); } } =20 @@ -282,15 +291,17 @@ static void *call_rcu_thread(void *opaque) rcu_register_thread(); =20 for (;;) { - int n; + uint32_t state; + uint32_t n; =20 /* - * Fetch rcu_call_count now, we only must process elements that we= re + * Fetch rcu_call_state now, we only must process elements that we= re * added before enter_qs() starts. */ for (;;) { qemu_event_reset(&rcu_call_ready_event); - n =3D qatomic_read(&rcu_call_count); + state =3D qatomic_fetch_and(&rcu_call_state, RCU_CALL_STATE_CO= UNT); + n =3D state & RCU_CALL_STATE_COUNT; if (n) { break; } @@ -301,8 +312,8 @@ static void *call_rcu_thread(void *opaque) qemu_event_wait(&rcu_call_ready_event); } =20 - enter_qs(); - qatomic_sub(&rcu_call_count, n); + enter_qs(state & RCU_CALL_STATE_FORCED); + qatomic_sub(&rcu_call_state, n); bql_lock(); while (n > 0) { node =3D try_dequeue(); @@ -329,7 +340,7 @@ void call_rcu1(struct rcu_head *node, void (*func)(stru= ct rcu_head *node)) { node->func =3D func; enqueue(node); - qatomic_inc(&rcu_call_count); + qatomic_inc(&rcu_call_state); qemu_event_set(&rcu_call_ready_event); } =20 @@ -388,10 +399,9 @@ void drain_call_rcu(void) * assumed. */ =20 - qatomic_inc(&in_drain_call_rcu); call_rcu(&sync, sync_rcu_callback, rcu); + force_rcu(); qemu_event_wait(&sync.complete_event); - qatomic_dec(&in_drain_call_rcu); =20 if (locked) { bql_lock(); @@ -435,6 +445,7 @@ static void rcu_init_complete(void) qemu_mutex_init(&rcu_registry_lock); qemu_event_init(&rcu_gp_event, true); =20 + qemu_event_init(&rcu_call_force_event, false); qemu_event_init(&rcu_call_ready_event, false); =20 /* The caller is assumed to have BQL, so the call_rcu thread --=20 2.52.0