From nobody Tue Sep 9 22:08:38 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 33784C6FD1D for ; Mon, 27 Mar 2023 20:26:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232278AbjC0U0F (ORCPT ); Mon, 27 Mar 2023 16:26:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54632 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232139AbjC0UZx (ORCPT ); Mon, 27 Mar 2023 16:25:53 -0400 Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0BF0D3C02 for ; Mon, 27 Mar 2023 13:24:28 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1679948668; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=gReWeVsxbG5vsOjLGeKXuMaVbUqU0YcjGeF+0CMOcxA=; b=PoLQMsxUuYoMheAUEunRG08KYU0zQLtVI/Arj86K9HYJbJUKitr0Fh0JziclW84v8ZwoyT F2ujqjRMk/o1kC7M3QpymV86EMOmhJgc02dZpIVzhsVSKCojYX+qIh/ghgMfkVDCQla+zz Ih320d0Z+rNGMaDthw3T0kV9AJrAvK8= Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-347-T0TqC2SmOWWrmoZMZTPLpw-1; Mon, 27 Mar 2023 16:24:24 -0400 X-MC-Unique: T0TqC2SmOWWrmoZMZTPLpw-1 Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.rdu2.redhat.com [10.11.54.4]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 79EB7884EC4; Mon, 27 Mar 2023 20:24:24 +0000 (UTC) Received: from llong.com (unknown [10.22.17.245]) by smtp.corp.redhat.com (Postfix) with ESMTP id 422B4202701E; Mon, 27 Mar 2023 20:24:24 +0000 (UTC) From: Waiman Long To: Peter Zijlstra , Ingo Molnar , Will Deacon , Boqun Feng Cc: linux-kernel@vger.kernel.org Subject: [PATCH v2 6/8] locking/rwsem: Unify wait loop Date: Mon, 27 Mar 2023 16:24:11 -0400 Message-Id: <20230327202413.1955856-7-longman@redhat.com> In-Reply-To: <20230327202413.1955856-1-longman@redhat.com> References: <20230327202413.1955856-1-longman@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Scanned-By: MIMEDefang 3.1 on 10.11.54.4 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Peter Zijlstra Now that the reader and writer wait loops are identical, share the code. Signed-off-by: Peter Zijlstra (Intel) --- kernel/locking/rwsem.c | 121 +++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 70 deletions(-) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 0bc262dc77fd..ee8861effcc2 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -650,13 +650,11 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, * optionally wake up waiters before it returns. */ static inline void -rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waite= r, - struct wake_q_head *wake_q) +rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waite= r) __releases(&sem->wait_lock) { bool first =3D rwsem_first_waiter(sem) =3D=3D waiter; - - wake_q_init(wake_q); + DEFINE_WAKE_Q(wake_q); =20 /* * If the wait_list isn't empty and the waiter to be deleted is @@ -664,10 +662,10 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struc= t rwsem_waiter *waiter, * be eligible to acquire or spin on the lock. */ if (rwsem_del_waiter(sem, waiter) && first) - rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q); + rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); - if (!wake_q_empty(wake_q)) - wake_up_q(wake_q); + if (!wake_q_empty(&wake_q)) + wake_up_q(&wake_q); } =20 /* @@ -997,6 +995,50 @@ static inline void rwsem_cond_wake_waiter(struct rw_se= maphore *sem, long count, rwsem_mark_wake(sem, wake_type, wake_q); } =20 +#define lockevent_rw_inc(rd, evr, evw) do { \ + lockevent_cond_inc(evr, (rd)); \ + lockevent_cond_inc(evw, !(rd)); \ +} while (0) + +static __always_inline struct rw_semaphore * +rwsem_waiter_wait(struct rw_semaphore *sem, struct rwsem_waiter *waiter, + int state, bool reader) +{ + trace_contention_begin(sem, reader ? LCB_F_READ : LCB_F_WRITE); + + /* wait to be given the lock */ + for (;;) { + set_current_state(state); + if (!smp_load_acquire(&waiter->task)) { + /* Matches rwsem_waiter_wake()'s smp_store_release(). */ + break; + } + if (signal_pending_state(state, current)) { + raw_spin_lock_irq(&sem->wait_lock); + if (waiter->task) + goto out_nolock; + raw_spin_unlock_irq(&sem->wait_lock); + /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ + break; + } + schedule_preempt_disabled(); + lockevent_rw_inc(reader, rwsem_sleep_reader, rwsem_sleep_writer); + } + + __set_current_state(TASK_RUNNING); + + lockevent_rw_inc(reader, rwsem_rlock, rwsem_wlock); + trace_contention_end(sem, 0); + return sem; + +out_nolock: + rwsem_del_wake_waiter(sem, waiter); + __set_current_state(TASK_RUNNING); + lockevent_rw_inc(reader, rwsem_rlock_fail, rwsem_wlock_fail); + trace_contention_end(sem, -EINTR); + return ERR_PTR(-EINTR); +} + /* * Wait for the read lock to be granted */ @@ -1074,38 +1116,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, l= ong count, unsigned int stat if (!wake_q_empty(&wake_q)) wake_up_q(&wake_q); =20 - trace_contention_begin(sem, LCB_F_READ); - - /* wait to be given the lock */ - for (;;) { - set_current_state(state); - if (!smp_load_acquire(&waiter.task)) { - /* Matches rwsem_waiter_wake()'s smp_store_release(). */ - break; - } - if (signal_pending_state(state, current)) { - raw_spin_lock_irq(&sem->wait_lock); - if (waiter.task) - goto out_nolock; - raw_spin_unlock_irq(&sem->wait_lock); - /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ - break; - } - schedule_preempt_disabled(); - lockevent_inc(rwsem_sleep_reader); - } - - __set_current_state(TASK_RUNNING); - lockevent_inc(rwsem_rlock); - trace_contention_end(sem, 0); - return sem; - -out_nolock: - rwsem_del_wake_waiter(sem, &waiter, &wake_q); - __set_current_state(TASK_RUNNING); - lockevent_inc(rwsem_rlock_fail); - trace_contention_end(sem, -EINTR); - return ERR_PTR(-EINTR); + return rwsem_waiter_wait(sem, &waiter, state, true); } =20 /* @@ -1155,37 +1166,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, = int state) } raw_spin_unlock_irq(&sem->wait_lock); =20 - /* wait until we successfully acquire the lock */ - trace_contention_begin(sem, LCB_F_WRITE); - - for (;;) { - set_current_state(state); - if (!smp_load_acquire(&waiter.task)) { - /* Matches rwsem_waiter_wake()'s smp_store_release(). */ - break; - } - if (signal_pending_state(state, current)) { - raw_spin_lock_irq(&sem->wait_lock); - if (waiter.task) - goto out_nolock; - raw_spin_unlock_irq(&sem->wait_lock); - /* Ordered by sem->wait_lock against rwsem_mark_wake(). */ - break; - } - schedule_preempt_disabled(); - lockevent_inc(rwsem_sleep_writer); - } - __set_current_state(TASK_RUNNING); - lockevent_inc(rwsem_wlock); - trace_contention_end(sem, 0); - return sem; - -out_nolock: - rwsem_del_wake_waiter(sem, &waiter, &wake_q); - __set_current_state(TASK_RUNNING); - lockevent_inc(rwsem_wlock_fail); - trace_contention_end(sem, -EINTR); - return ERR_PTR(-EINTR); + return rwsem_waiter_wait(sem, &waiter, state, false); } =20 /* --=20 2.31.1