From nobody Thu Nov 6 16:23:15 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; dkim=fail; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1542395173256522.9209038709864; Fri, 16 Nov 2018 11:06:13 -0800 (PST) Received: from localhost ([::1]:46449 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1gNjRN-0000OD-WB for importer@patchew.org; Fri, 16 Nov 2018 14:06:06 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:52628) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1gNjNj-0006c7-KQ for qemu-devel@nongnu.org; Fri, 16 Nov 2018 14:02:22 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1gNjNb-0004fT-5i for qemu-devel@nongnu.org; Fri, 16 Nov 2018 14:02:19 -0500 Received: from mail-wm1-x32d.google.com ([2a00:1450:4864:20::32d]:38116) by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16) (Exim 4.71) (envelope-from ) id 1gNjNa-0004f3-U8 for qemu-devel@nongnu.org; Fri, 16 Nov 2018 14:02:11 -0500 Received: by mail-wm1-x32d.google.com with SMTP id f2-v6so21939254wme.3 for ; Fri, 16 Nov 2018 11:02:10 -0800 (PST) Received: from mocramis-ultrabook.localdomain ([178.208.16.32]) by smtp.gmail.com with ESMTPSA id t4-v6sm17435978wrb.67.2018.11.16.11.02.08 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 16 Nov 2018 11:02:08 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=blade-group.com; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=QDEcqhdrHl6oDlJ5Ob0uxO1nGuuAjJj/ofcOu/7+xAU=; b=NTsGu89LZmEZ6TchMjN1j2AGXU7+1QzzZdIz0AKlANYq0WISgHoJU2c9N6Q0zkrYn8 mp5WHjLLyZYtLAFX/qqdjMmu5P59nwvonn/0EnX421B8swvKiqClLrnIYNXr1/UhfApY 7oFzKb94HLPSbS2AavX1pr9qTPNEcCLWQnE/ISskHmxjXCjWecxMCADW88IVvngDWFmz 53I73ES0w0ZROJ5PVKQRvSeEKvHAkJBdWxYLuVafXZnqPlfaMCSTr0Mmk2izQTflXOq9 cQiD3MwMkHWDub1l8EDOTaGhXvo+4uIRv3Hb1hMy7RsMYiYlJ6W+Lwj7GNdl8GGrSIXc CCKg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=QDEcqhdrHl6oDlJ5Ob0uxO1nGuuAjJj/ofcOu/7+xAU=; b=VWRPKFPRPQqdg4skAMgujgqPsVzfDx2XfdmsrvXAkz5JPIS02LaBoIggyfy5Zs4lIP CKamALdOKA/00c+FcGKMN/v35vQR5ab5Avue57KBhfaMGHZIzHVSLKU8gQm7SisejfKw 4IomgBYMOPoBQH5Xt8DP63/50Fims0tW6YxdSZ6NTRa9Ur/XLv61jpvStAF5KijQ20cg c+OIJeraT3CTooGN0GdTQP45x4orqq/imUmy6Ynab/Kh3LOfSI0skQKqEngTs5oadVzV cza5/4RDAr6H2ElgCck+SvluOw7sPzsb2qrNaoGCugUJe+7616gCEAZI06gLM4nXMAeT 2pvQ== X-Gm-Message-State: AA+aEWaIFYhUdKfEBnO0CQzztrpb/EXE8wvRct7JB2EczwHL2F+wFmuf PNdrQ0voOZ7UK43JKfc7v4jt70AiPDU= X-Google-Smtp-Source: AFSGD/XGZUbizjbSoIPZRaAMehY1l0wlkOEQJjZqFRQIqMYHm+pJ+Gc6MpvDyODp0K6TowU7/IfzAw== X-Received: by 2002:a1c:4d12:: with SMTP id o18mr2024955wmh.92.1542394929399; Fri, 16 Nov 2018 11:02:09 -0800 (PST) From: remy.noel@blade-group.com To: qemu-devel@nongnu.org Date: Fri, 16 Nov 2018 20:02:09 +0100 Message-Id: <20181116190211.17622-3-remy.noel@blade-group.com> X-Mailer: git-send-email 2.19.1 In-Reply-To: <20181116190211.17622-1-remy.noel@blade-group.com> References: <20181116190211.17622-1-remy.noel@blade-group.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 2a00:1450:4864:20::32d Subject: [Qemu-devel] util/aio-posix: Use RCU for handler insertion. X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Stefan Weil , Remy Noel , Fam Zheng , "open list:Block I/O path" , Stefan Hajnoczi Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail-DKIM: fail (Header signature does not verify) Content-Type: text/plain; charset="utf-8" From: Remy Noel get rid of the delete attribute. We still need to get rid of the context list lock. Signed-off-by: Remy Noel --- util/aio-posix.c | 75 ++++++++++++++++++++++-------------------------- util/aio-win32.c | 43 ++++++++++----------------- 2 files changed, 49 insertions(+), 69 deletions(-) diff --git a/util/aio-posix.c b/util/aio-posix.c index b34d97292a..83db3f65f4 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -16,6 +16,7 @@ #include "qemu/osdep.h" #include "qemu-common.h" #include "block/block.h" +#include "qemu/rcu.h" #include "qemu/rcu_queue.h" #include "qemu/sockets.h" #include "qemu/cutils.h" @@ -26,13 +27,14 @@ =20 struct AioHandler { + struct rcu_head rcu; + GPollFD pfd; IOHandler *io_read; IOHandler *io_write; AioPollFn *io_poll; IOHandler *io_poll_begin; IOHandler *io_poll_end; - int deleted; void *opaque; bool is_external; QLIST_ENTRY(AioHandler) node; @@ -65,19 +67,25 @@ static bool aio_epoll_try_enable(AioContext *ctx) { AioHandler *node; struct epoll_event event; + int r =3D 0; + =20 + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - int r; - if (node->deleted || !node->pfd.events) { + if (!node->pfd.events) { continue; } event.events =3D epoll_events_from_pfd(node->pfd.events); event.data.ptr =3D node; r =3D epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); if (r) { - return false; + break; } } + rcu_read_unlock(); + if (r) { + return false; + } ctx->epoll_enabled =3D true; return true; } @@ -193,14 +201,13 @@ static AioHandler *find_aio_handler(AioContext *ctx, = int fd) =20 QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (node->pfd.fd =3D=3D fd) - if (!node->deleted) - return node; + return node; } =20 return NULL; } =20 -static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node) +static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) { /* If the GSource is in the process of being destroyed then * g_source_remove_poll() causes an assertion failure. Skip @@ -210,19 +217,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, Aio= Handler *node) if (!g_source_is_destroyed(&ctx->source)) { g_source_remove_poll(&ctx->source, &node->pfd); } - - /* If a read is in progress, just mark the node as deleted */ - if (qemu_lockcnt_count(&ctx->list_lock)) { - node->deleted =3D 1; - node->pfd.revents =3D 0; - return false; - } - /* Otherwise, delete it for real. We can't just mark it as - * deleted because deleted nodes are only cleaned up while - * no one is walking the handlers list. - */ - QLIST_REMOVE(node, node); - return true; + QLIST_REMOVE_RCU(node, node); } =20 void aio_set_fd_handler(AioContext *ctx, @@ -249,7 +244,8 @@ void aio_set_fd_handler(AioContext *ctx, qemu_lockcnt_unlock(&ctx->list_lock); return; } - deleted =3D aio_remove_fd_handler(ctx, node); + aio_remove_fd_handler(ctx, node); + deleted =3D true; poll_disable_change =3D -!node->io_poll; } else { poll_disable_change =3D !io_poll - (node && !node->io_poll); @@ -269,7 +265,8 @@ void aio_set_fd_handler(AioContext *ctx, if (is_new) { new_node->pfd.fd =3D fd; } else { - deleted =3D aio_remove_fd_handler(ctx, node); + aio_remove_fd_handler(ctx, node); + deleted =3D true; new_node->pfd =3D node->pfd; } g_source_add_poll(&ctx->source, &new_node->pfd); @@ -296,7 +293,7 @@ void aio_set_fd_handler(AioContext *ctx, aio_notify(ctx); =20 if (deleted) { - g_free(node); + g_free_rcu(node, rcu); } } =20 @@ -345,13 +342,10 @@ static void poll_set_started(AioContext *ctx, bool st= arted) ctx->poll_started =3D started; =20 qemu_lockcnt_inc(&ctx->list_lock); + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { IOHandler *fn; =20 - if (node->deleted) { - continue; - } - if (started) { fn =3D node->io_poll_begin; } else { @@ -362,6 +356,7 @@ static void poll_set_started(AioContext *ctx, bool star= ted) fn(node->opaque); } } + rcu_read_unlock(); qemu_lockcnt_dec(&ctx->list_lock); } =20 @@ -385,6 +380,7 @@ bool aio_pending(AioContext *ctx) */ qemu_lockcnt_inc(&ctx->list_lock); =20 + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { int revents; =20 @@ -400,6 +396,7 @@ bool aio_pending(AioContext *ctx) break; } } + rcu_read_unlock(); qemu_lockcnt_dec(&ctx->list_lock); =20 return result; @@ -410,14 +407,14 @@ static bool aio_dispatch_handlers(AioContext *ctx) AioHandler *node, *tmp; bool progress =3D false; =20 + rcu_read_lock(); QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { int revents; =20 revents =3D node->pfd.revents & node->pfd.events; node->pfd.revents =3D 0; =20 - if (!node->deleted && - (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && + if ((revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && aio_node_check(ctx, node->is_external) && node->io_read) { node->io_read(node->opaque); @@ -427,22 +424,14 @@ static bool aio_dispatch_handlers(AioContext *ctx) progress =3D true; } } - if (!node->deleted && - (revents & (G_IO_OUT | G_IO_ERR)) && + if ((revents & (G_IO_OUT | G_IO_ERR)) && aio_node_check(ctx, node->is_external) && node->io_write) { node->io_write(node->opaque); progress =3D true; } - - if (node->deleted) { - if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { - QLIST_REMOVE(node, node); - g_free(node); - qemu_lockcnt_inc_and_unlock(&ctx->list_lock); - } - } } + rcu_read_unlock(); =20 return progress; } @@ -508,8 +497,9 @@ static bool run_poll_handlers_once(AioContext *ctx, int= 64_t *timeout) bool progress =3D false; AioHandler *node; =20 + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!node->deleted && node->io_poll && + if (node->io_poll && aio_node_check(ctx, node->is_external) && node->io_poll(node->opaque)) { *timeout =3D 0; @@ -520,6 +510,7 @@ static bool run_poll_handlers_once(AioContext *ctx, int= 64_t *timeout) =20 /* Caller handles freeing deleted nodes. Don't do it here. */ } + rcu_read_unlock(); =20 return progress; } @@ -637,12 +628,14 @@ bool aio_poll(AioContext *ctx, bool blocking) /* fill pollfds */ =20 if (!aio_epoll_enabled(ctx)) { + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!node->deleted && node->pfd.events + if (node->pfd.events && aio_node_check(ctx, node->is_external)) { add_pollfd(node); } } + rcu_read_unlock(); } =20 /* wait until next event */ diff --git a/util/aio-win32.c b/util/aio-win32.c index 00e38cdd9f..d7c694e5ac 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -29,7 +29,6 @@ struct AioHandler { IOHandler *io_write; EventNotifierHandler *io_notify; GPollFD pfd; - int deleted; void *opaque; bool is_external; QLIST_ENTRY(AioHandler) node; @@ -37,18 +36,8 @@ struct AioHandler { =20 static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) { - /* If aio_poll is in progress, just mark the node as deleted */ - if (qemu_lockcnt_count(&ctx->list_lock)) { - node->deleted =3D 1; - node->pfd.revents =3D 0; - } else { - /* Otherwise, delete it for real. We can't just mark it as - * deleted because deleted nodes are only cleaned up after - * releasing the list_lock. - */ - QLIST_REMOVE(node, node); - g_free(node); - } + QLIST_REMOVE_RCU(node, node); + g_free_rcu(node); } =20 void aio_set_fd_handler(AioContext *ctx, @@ -64,7 +53,7 @@ void aio_set_fd_handler(AioContext *ctx, =20 qemu_lockcnt_lock(&ctx->list_lock); QLIST_FOREACH(node, &ctx->aio_handlers, node) { - if (node->pfd.fd =3D=3D fd && !node->deleted) { + if (node->pfd.fd =3D=3D fd) { break; } } @@ -136,7 +125,7 @@ void aio_set_event_notifier(AioContext *ctx, =20 qemu_lockcnt_lock(&ctx->list_lock); QLIST_FOREACH(node, &ctx->aio_handlers, node) { - if (node->e =3D=3D e && !node->deleted) { + if (node->e =3D=3D e) { break; } } @@ -188,6 +177,7 @@ bool aio_prepare(AioContext *ctx) * called while we're walking. */ qemu_lockcnt_inc(&ctx->list_lock); + rcu_read_lock(); =20 /* fill fd sets */ FD_ZERO(&rfds); @@ -215,7 +205,7 @@ bool aio_prepare(AioContext *ctx) } } } - + rcu_read_unlock(); qemu_lockcnt_dec(&ctx->list_lock); return have_select_revents; } @@ -230,6 +220,7 @@ bool aio_pending(AioContext *ctx) * called while we're walking. */ qemu_lockcnt_inc(&ctx->list_lock); + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { if (node->pfd.revents && node->io_notify) { result =3D true; @@ -246,6 +237,7 @@ bool aio_pending(AioContext *ctx) } } =20 + rcu_read_unlock(); qemu_lockcnt_dec(&ctx->list_lock); return result; } @@ -256,6 +248,7 @@ static bool aio_dispatch_handlers(AioContext *ctx, HAND= LE event) bool progress =3D false; AioHandler *tmp; =20 + rcu_read_lock(); /* * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. @@ -263,8 +256,7 @@ static bool aio_dispatch_handlers(AioContext *ctx, HAND= LE event) QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { int revents =3D node->pfd.revents; =20 - if (!node->deleted && - (revents || event_notifier_get_handle(node->e) =3D=3D event) && + if ((revents || event_notifier_get_handle(node->e) =3D=3D event) && node->io_notify) { node->pfd.revents =3D 0; node->io_notify(node->e); @@ -275,8 +267,7 @@ static bool aio_dispatch_handlers(AioContext *ctx, HAND= LE event) } } =20 - if (!node->deleted && - (node->io_read || node->io_write)) { + if ((node->io_read || node->io_write)) { node->pfd.revents =3D 0; if ((revents & G_IO_IN) && node->io_read) { node->io_read(node->opaque); @@ -297,14 +288,8 @@ static bool aio_dispatch_handlers(AioContext *ctx, HAN= DLE event) } } =20 - if (node->deleted) { - if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { - QLIST_REMOVE(node, node); - g_free(node); - qemu_lockcnt_inc_and_unlock(&ctx->list_lock); - } - } } + rcu_read_unlock(); =20 return progress; } @@ -344,12 +329,14 @@ bool aio_poll(AioContext *ctx, bool blocking) =20 /* fill fd sets */ count =3D 0; + rcu_read_lock(); QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { - if (!node->deleted && node->io_notify + if (node->io_notify && aio_node_check(ctx, node->is_external)) { events[count++] =3D event_notifier_get_handle(node->e); } } + rcu_read_unlock(); =20 first =3D true; =20 --=20 2.19.1