From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) client-ip=207.211.31.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [207.211.31.120]) by mx.zohomail.com with SMTPS id 1581684748212691.3700971575295; Fri, 14 Feb 2020 04:52:28 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-395--JCZUvDdPAS3VLZ8L-Pe3g-1; Fri, 14 Feb 2020 07:52:25 -0500 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 4F7BE18C43C4; Fri, 14 Feb 2020 12:52:19 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 2864B10027A8; Fri, 14 Feb 2020 12:52:19 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id DB8BC18089CD; Fri, 14 Feb 2020 12:52:18 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqFf9014378 for ; Fri, 14 Feb 2020 07:52:15 -0500 Received: by smtp.corp.redhat.com (Postfix) id EACD81001DEF; Fri, 14 Feb 2020 12:52:15 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 591E91001DD8; Fri, 14 Feb 2020 12:52:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684747; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=0+gid8NaNSsemhf2cYup6UWZ53HeDkzLm813+0eHnqo=; b=YH7rmjFVC+as54v80BTFrYysUwVCy7nCCcyQTYCvHMH+8oG4WQGNl2iC2Q0oYua1qPeiTC IFlsdU/pjK39VuFIQOyac5cCXeewWBAWXoJtHscq17VCF9LIf/mIHfc1JpAgL5uScRWdaZ wkF8BpqxoZtZCx8fyiHteR5v1EofLrk= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 01/11] qemu: drop support for agent connections on PTYs Date: Fri, 14 Feb 2020 12:51:59 +0000 Message-Id: <20200214125209.1152894-2-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-MC-Unique: -JCZUvDdPAS3VLZ8L-Pe3g-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" Libvirt has never configured the QEMU agent to support running on a PTY implicitly. In theory an end user may have written such an XML config, but this is reasonably unlikely since when a bare is provided, libvirt will auto-expand it to a UNIX socket backend. With this change a user who has use the PTY backend will have to switch to the UNIX backend if they wish to use libvirt APIs for interacting with the agent. This will not have guest ABI impact. Signed-off-by: Daniel P. Berrang=C3=A9 Reviewed-by: Michal Privoznik --- src/qemu/qemu_agent.c | 36 ++---------------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 7d01d21a11..7ca5975a76 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -223,30 +223,6 @@ qemuAgentOpenUnix(const char *monitor) return -1; } =20 -static int -qemuAgentOpenPty(const char *monitor) -{ - int monfd; - - if ((monfd =3D open(monitor, O_RDWR | O_NONBLOCK)) < 0) { - virReportSystemError(errno, - _("Unable to open monitor path %s"), monitor); - return -1; - } - - if (virSetCloseExec(monfd) < 0) { - virReportSystemError(errno, "%s", - _("Unable to set monitor close-on-exec flag")= ); - goto error; - } - - return monfd; - - error: - VIR_FORCE_CLOSE(monfd); - return -1; -} - =20 static int qemuAgentIOProcessEvent(qemuAgentPtr mon, @@ -705,22 +681,14 @@ qemuAgentOpen(virDomainObjPtr vm, mon->vm =3D vm; mon->cb =3D cb; =20 - switch (config->type) { - case VIR_DOMAIN_CHR_TYPE_UNIX: - mon->fd =3D qemuAgentOpenUnix(config->data.nix.path); - break; - - case VIR_DOMAIN_CHR_TYPE_PTY: - mon->fd =3D qemuAgentOpenPty(config->data.file.path); - break; - - default: + if (config->type !=3D VIR_DOMAIN_CHR_TYPE_UNIX) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unable to handle monitor type: %s"), virDomainChrTypeToString(config->type)); goto cleanup; } =20 + mon->fd =3D qemuAgentOpenUnix(config->data.nix.path); if (mon->fd =3D=3D -1) goto cleanup; =20 --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) client-ip=207.211.31.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [207.211.31.120]) by mx.zohomail.com with SMTPS id 1581684755170688.0841050461183; Fri, 14 Feb 2020 04:52:35 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-153-l3wvpb4SNqCKeAr_2Q6uMg-1; Fri, 14 Feb 2020 07:52:31 -0500 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 55D9A18C43C1; Fri, 14 Feb 2020 12:52:24 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 201F51001B2C; Fri, 14 Feb 2020 12:52:24 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id A42D718089D6; Fri, 14 Feb 2020 12:52:23 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqGgE014386 for ; Fri, 14 Feb 2020 07:52:16 -0500 Received: by smtp.corp.redhat.com (Postfix) id D04141001DF0; Fri, 14 Feb 2020 12:52:16 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 3D9831001DD8; Fri, 14 Feb 2020 12:52:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684754; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=MaHzx2MTSp8u2gFNsty2U4DjGvwQkeRSTM2jXUpw6JA=; b=f0gGfE8u2CIa1XtRTSWFjcsNLb7hkr6yWl4ONzR1mkmrdmqQ/8gooVEA+GwVp6zELcGnmZ 9kkYOvEqLGr7Gk2BcmksHZVy9Gp9ZY4IGQoo4PP4Cs3xUz3OGbc7GpwwzWDUuDi2sukkLv V8+n5EKFcFKeyTiqqcR6k7TUY7jQAg4= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 02/11] qemu: drop ability to open monitor from FD Date: Fri, 14 Feb 2020 12:52:00 +0000 Message-Id: <20200214125209.1152894-3-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-MC-Unique: l3wvpb4SNqCKeAr_2Q6uMg-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" The qemuMonitorOpenFD method has not been used since it was first introduced. Signed-off-by: Daniel P. Berrang=C3=A9 Reviewed-by: Michal Privoznik --- src/qemu/qemu_monitor.c | 10 ---------- src/qemu/qemu_monitor.h | 5 ----- 2 files changed, 15 deletions(-) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 008d4a0e75..bf53962872 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -824,16 +824,6 @@ qemuMonitorOpen(virDomainObjPtr vm, } =20 =20 -qemuMonitorPtr -qemuMonitorOpenFD(virDomainObjPtr vm, - int sockfd, - qemuMonitorCallbacksPtr cb, - void *opaque) -{ - return qemuMonitorOpenInternal(vm, sockfd, cb, opaque); -} - - /** * qemuMonitorRegister: * @mon: QEMU monitor diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 8cf9e11899..c84cd425df 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -394,11 +394,6 @@ qemuMonitorPtr qemuMonitorOpen(virDomainObjPtr vm, qemuMonitorCallbacksPtr cb, void *opaque) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5); -qemuMonitorPtr qemuMonitorOpenFD(virDomainObjPtr vm, - int sockfd, - qemuMonitorCallbacksPtr cb, - void *opaque) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(4); =20 bool qemuMonitorRegister(qemuMonitorPtr mon) ATTRIBUTE_NONNULL(1); --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) client-ip=205.139.110.61; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-delivery-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-delivery-1.mimecast.com (us-smtp-2.mimecast.com [205.139.110.61]) by mx.zohomail.com with SMTPS id 1581684760443867.8401687883006; Fri, 14 Feb 2020 04:52:40 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-40-1aAs4A7QMW2yzxwl9aUsIQ-1; Fri, 14 Feb 2020 07:52:35 -0500 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id F02F718C43C5; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id C618F5C21B; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 80DBD8B2CE; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqHOG014393 for ; Fri, 14 Feb 2020 07:52:17 -0500 Received: by smtp.corp.redhat.com (Postfix) id B33041001E91; Fri, 14 Feb 2020 12:52:17 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 2271A1001DD8; Fri, 14 Feb 2020 12:52:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684759; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=xaI4ZyYEZQGa1i38AySmbBJfGuWcLrnpImc03toEcSA=; b=OJaPnMI3qTRuWME+a+VGMuIRWacF7rP6qPeaV2mp5BK5TGx7gM0Q8RP2OutqQgDmigeMD3 yuO2k6qizYgsiR/dHgDQXQjh53ETtUAfN1OxGiHYkSPdJNrwUexZsPuZ0t7zcWfBHxuFgn q27qA+mPdge608FJppZ3oP9/Hq6o3Qs= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 03/11] src: set the OS level thread name Date: Fri, 14 Feb 2020 12:52:01 +0000 Message-Id: <20200214125209.1152894-4-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 X-MC-Unique: 1aAs4A7QMW2yzxwl9aUsIQ-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" Setting the thread name makes it easier to debug libvirtd when many threads are running. Signed-off-by: Daniel P. Berrang=C3=A9 Reviewed-by: Michal Privoznik --- src/libvirt_private.syms | 1 + src/util/virthread.c | 44 +++++++++++++++++++++++++++++++++++----- src/util/virthread.h | 4 +++- 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index dc0449d1d8..375e6ea000 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -3258,6 +3258,7 @@ virThreadCreateFull; virThreadID; virThreadIsSelf; virThreadJoin; +virThreadMaxName; virThreadSelf; virThreadSelfID; =20 diff --git a/src/util/virthread.c b/src/util/virthread.c index cdc5cab604..750e8d5655 100644 --- a/src/util/virthread.c +++ b/src/util/virthread.c @@ -175,23 +175,57 @@ void virCondBroadcast(virCondPtr c) =20 struct virThreadArgs { virThreadFunc func; - const char *funcName; + char *name; bool worker; void *opaque; }; =20 +size_t virThreadMaxName(void) +{ +#if defined(__FreeBSD__) || defined(__APPLE__) + return 63; +#else +# ifdef __linux__ + return 15; +# else + return 0; /* unlimited */ +# endif +#endif +} + static void *virThreadHelper(void *data) { struct virThreadArgs *args =3D data; struct virThreadArgs local =3D *args; + g_autofree char *thname =3D NULL; + size_t maxname =3D virThreadMaxName(); =20 /* Free args early, rather than tying it up during the entire thread. = */ VIR_FREE(args); =20 if (local.worker) - virThreadJobSetWorker(local.funcName); + virThreadJobSetWorker(local.name); else - virThreadJobSet(local.funcName); + virThreadJobSet(local.name); + + if (maxname) { + thname =3D g_strndup(local.name, maxname); + } else { + thname =3D g_strdup(local.name); + } + g_free(local.name); + +#if defined(__linux__) || defined(WIN32) + pthread_setname_np(pthread_self(), thname); +#else +# ifdef __FreeBSD__ + pthread_set_name_np(pthread_self(), thname); +# else +# ifdef __APPLE__ + pthread_setname_np(thname); +# endif +# endif +#endif =20 local.func(local.opaque); =20 @@ -204,7 +238,7 @@ static void *virThreadHelper(void *data) int virThreadCreateFull(virThreadPtr thread, bool joinable, virThreadFunc func, - const char *funcName, + const char *name, bool worker, void *opaque) { @@ -221,7 +255,7 @@ int virThreadCreateFull(virThreadPtr thread, } =20 args->func =3D func; - args->funcName =3D funcName; + args->name =3D g_strdup(name); args->worker =3D worker; args->opaque =3D opaque; =20 diff --git a/src/util/virthread.h b/src/util/virthread.h index a7960e444a..c227951ddd 100644 --- a/src/util/virthread.h +++ b/src/util/virthread.h @@ -90,13 +90,15 @@ typedef void (*virThreadFunc)(void *opaque); int virThreadCreateFull(virThreadPtr thread, bool joinable, virThreadFunc func, - const char *funcName, + const char *name, bool worker, void *opaque) G_GNUC_WARN_UNUSED_RESULT; void virThreadSelf(virThreadPtr thread); bool virThreadIsSelf(virThreadPtr thread); void virThreadJoin(virThreadPtr thread); =20 +size_t virThreadMaxName(void); + /* This API is *NOT* for general use. It exists solely as a stub * for integration with libselinux AVC callbacks */ void virThreadCancel(virThreadPtr thread); --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) client-ip=205.139.110.61; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-delivery-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-delivery-1.mimecast.com (us-smtp-2.mimecast.com [205.139.110.61]) by mx.zohomail.com with SMTPS id 1581684753214468.9573045685896; Fri, 14 Feb 2020 04:52:33 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-204-SH2kN2T8MWKwzd3iX7IE5w-1; Fri, 14 Feb 2020 07:52:29 -0500 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 4F37A13E2; Fri, 14 Feb 2020 12:52:22 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 1C5A862670; Fri, 14 Feb 2020 12:52:22 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id B8BE318089CF; Fri, 14 Feb 2020 12:52:21 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqIjk014401 for ; Fri, 14 Feb 2020 07:52:18 -0500 Received: by smtp.corp.redhat.com (Postfix) id BE66810027A8; Fri, 14 Feb 2020 12:52:18 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 08B7610021B2; Fri, 14 Feb 2020 12:52:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684751; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=rxzFDAuqKZnVXuKGL0xVPGUj60AHLDWW5IYRKW+f4TY=; b=hWbI4/kT+WVgedzc+weX8OtGmMvvp9lE8sCMIMRRwqqQGfxrgSHhogMF/IGNhMaDw0Cuvq c4o0dsw8/+xJ5COWNAtDsa7Q9nrrKYltQoPY61uTa1UBIOHkPXZdLbO7fQtGRg9yrAC+Dj uugIFbWkoh80SGWC9Ff+ZIbpBfkQl1w= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 04/11] src: improve thread naming with human targetted names Date: Fri, 14 Feb 2020 12:52:02 +0000 Message-Id: <20200214125209.1152894-5-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 X-MC-Unique: SH2kN2T8MWKwzd3iX7IE5w-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" Historically threads are given a name based on the C function, and this name is just used inside libvirt. With OS level thread naming this name is now visible to debuggers, but also has to fit in 15 characters on Linux, so function names are too long in some cases. Signed-off-by: Daniel P. Berrang=C3=A9 Reviewed-by: Michal Privoznik --- src/libxl/libxl_domain.c | 10 ++++++---- src/libxl/libxl_migration.c | 23 ++++++++++++++++++----- src/lxc/lxc_fuse.c | 4 ++-- src/node_device/node_device_udev.c | 7 ++++--- src/nwfilter/nwfilter_dhcpsnoop.c | 11 ++++++----- src/nwfilter/nwfilter_learnipaddr.c | 10 ++++++---- src/qemu/qemu_driver.c | 3 ++- src/qemu/qemu_migration.c | 8 +++++--- src/qemu/qemu_process.c | 17 ++++++++++++----- src/remote/remote_daemon.c | 9 ++++++--- src/rpc/virnetserver.c | 9 +++++---- src/storage/storage_backend_scsi.c | 4 ++-- src/storage/storage_driver.c | 4 ++-- src/util/vircommand.c | 5 +++-- src/util/virfdstream.c | 10 ++++++---- src/util/virnodesuspend.c | 8 +++++--- src/util/virthreadpool.c | 14 ++++++++++---- src/util/virthreadpool.h | 2 +- 18 files changed, 101 insertions(+), 57 deletions(-) diff --git a/src/libxl/libxl_domain.c b/src/libxl/libxl_domain.c index c8b68665af..e3da9f777d 100644 --- a/src/libxl/libxl_domain.c +++ b/src/libxl/libxl_domain.c @@ -664,6 +664,7 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_CON= ST libxl_event *event) virThread thread; g_autoptr(libxlDriverConfig) cfg =3D NULL; int ret =3D -1; + g_autofree char *name =3D NULL; =20 if (event->type !=3D LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN && event->type !=3D LIBXL_EVENT_TYPE_DOMAIN_DEATH) { @@ -687,12 +688,13 @@ libxlDomainEventHandler(void *data, VIR_LIBXL_EVENT_C= ONST libxl_event *event) =20 shutdown_info->driver =3D driver; shutdown_info->event =3D (libxl_event *)event; + name =3D g_strdup_printf("ev-%d", event->domid); if (event->type =3D=3D LIBXL_EVENT_TYPE_DOMAIN_SHUTDOWN) - ret =3D virThreadCreate(&thread, false, libxlDomainShutdownThread, - shutdown_info); + ret =3D virThreadCreateFull(&thread, false, libxlDomainShutdownThr= ead, + name, false, shutdown_info); else if (event->type =3D=3D LIBXL_EVENT_TYPE_DOMAIN_DEATH) - ret =3D virThreadCreate(&thread, false, libxlDomainDeathThread, - shutdown_info); + ret =3D virThreadCreateFull(&thread, false, libxlDomainDeathThread, + name, false, shutdown_info); =20 if (ret < 0) { /* diff --git a/src/libxl/libxl_migration.c b/src/libxl/libxl_migration.c index 873b2b3e01..e5f39cfc40 100644 --- a/src/libxl/libxl_migration.c +++ b/src/libxl/libxl_migration.c @@ -293,6 +293,7 @@ libxlMigrateDstReceive(virNetSocketPtr sock, virNetSocketPtr client_sock; int recvfd =3D -1; size_t i; + g_autofree char *name =3D NULL; =20 /* Accept migration connection */ if (virNetSocketAccept(sock, &client_sock) < 0 || !client_sock) { @@ -313,8 +314,13 @@ libxlMigrateDstReceive(virNetSocketPtr sock, VIR_FREE(priv->migrationDstReceiveThr); if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0) goto fail; - if (virThreadCreate(priv->migrationDstReceiveThr, true, - libxlDoMigrateDstReceive, args) < 0) { + + name =3D g_strdup_printf("mig-%s", args->vm->def->name); + if (virThreadCreateFull(priv->migrationDstReceiveThr, true, + libxlDoMigrateDstReceive, + name, + false, + args) < 0) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Failed to create thread for receiving migration = data")); goto fail; @@ -553,6 +559,7 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dco= nn, char *xmlout =3D NULL; int dataFD[2] =3D { -1, -1 }; int ret =3D -1; + g_autofree char *name =3D NULL; =20 if (libxlDomainMigrationPrepareAny(dconn, def, cookiein, cookieinlen, &mig, &xmlout, &taint_hook) < 0) @@ -610,7 +617,10 @@ libxlDomainMigrationDstPrepareTunnel3(virConnectPtr dc= onn, VIR_FREE(priv->migrationDstReceiveThr); if (VIR_ALLOC(priv->migrationDstReceiveThr) < 0) goto error; - if (virThreadCreate(priv->migrationDstReceiveThr, true, libxlDoMigrate= DstReceive, args) < 0) { + name =3D g_strdup_printf("mig-%s", args->vm->def->name); + if (virThreadCreateFull(priv->migrationDstReceiveThr, true, + libxlDoMigrateDstReceive, + name, false, args) < 0) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Failed to create thread for receiving migration = data")); goto endjob; @@ -909,6 +919,7 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr driv= er, struct libxlTunnelControl *tc =3D NULL; libxlTunnelMigrationThread *arg =3D NULL; int ret =3D -1; + g_autofree char *name =3D NULL; =20 if (VIR_ALLOC(tc) < 0) goto out; @@ -924,8 +935,10 @@ libxlMigrationSrcStartTunnel(libxlDriverPrivatePtr dri= ver, arg->srcFD =3D tc->dataFD[0]; /* Write to dest stream */ arg->st =3D st; - if (virThreadCreate(&tc->thread, true, - libxlTunnel3MigrationSrcFunc, arg) < 0) { + name =3D g_strdup_printf("mig-%s", vm->def->name); + if (virThreadCreateFull(&tc->thread, true, + libxlTunnel3MigrationSrcFunc, + name, false, arg) < 0) { virReportError(errno, "%s", _("Unable to create tunnel migration thread")); goto out; diff --git a/src/lxc/lxc_fuse.c b/src/lxc/lxc_fuse.c index 44f240a0b5..5003771a24 100644 --- a/src/lxc/lxc_fuse.c +++ b/src/lxc/lxc_fuse.c @@ -342,8 +342,8 @@ int lxcSetupFuse(virLXCFusePtr *f, virDomainDefPtr def) =20 int lxcStartFuse(virLXCFusePtr fuse) { - if (virThreadCreate(&fuse->thread, false, lxcFuseRun, - (void *)fuse) < 0) { + if (virThreadCreateFull(&fuse->thread, false, lxcFuseRun, + "lxc-fuse", false, (void *)fuse) < 0) { lxcFuseDestroy(fuse); return -1; } diff --git a/src/node_device/node_device_udev.c b/src/node_device/node_devi= ce_udev.c index 396763fa29..2210b457ee 100644 --- a/src/node_device/node_device_udev.c +++ b/src/node_device/node_device_udev.c @@ -1862,7 +1862,8 @@ nodeStateInitialize(bool privileged, udev_monitor_set_receive_buffer_size(priv->udev_monitor, 128 * 1024 * 1024); =20 - if (virThreadCreate(&priv->th, true, udevEventHandleThread, NULL) < 0)= { + if (virThreadCreateFull(&priv->th, true, udevEventHandleThread, + "udev-event", false, NULL) < 0) { virReportSystemError(errno, "%s", _("failed to create udev handler thread")); goto unlock; @@ -1888,8 +1889,8 @@ nodeStateInitialize(bool privileged, if (udevSetupSystemDev() !=3D 0) goto cleanup; =20 - if (virThreadCreate(&enumThread, false, nodeStateInitializeEnumerate, - udev) < 0) { + if (virThreadCreateFull(&enumThread, false, nodeStateInitializeEnumera= te, + "nodedev-init", false, udev) < 0) { virReportSystemError(errno, "%s", _("failed to create udev enumerate thread")); goto cleanup; diff --git a/src/nwfilter/nwfilter_dhcpsnoop.c b/src/nwfilter/nwfilter_dhcp= snoop.c index e7f5b511ae..953d8936a4 100644 --- a/src/nwfilter/nwfilter_dhcpsnoop.c +++ b/src/nwfilter/nwfilter_dhcpsnoop.c @@ -1366,9 +1366,10 @@ virNWFilterDHCPSnoopThread(void *req0) } tmp =3D virNetDevGetIndex(req->binding->portdevname, &ifindex); threadkey =3D g_strdup(req->threadkey); - worker =3D virThreadPoolNew(1, 1, 0, - virNWFilterDHCPDecodeWorker, - req); + worker =3D virThreadPoolNewFull(1, 1, 0, + virNWFilterDHCPDecodeWorker, + "dhcp-decode", + req); } =20 /* let creator know how well we initialized */ @@ -1638,8 +1639,8 @@ virNWFilterDHCPSnoopReq(virNWFilterTechDriverPtr tech= driver, /* prevent thread from holding req */ virNWFilterSnoopReqLock(req); =20 - if (virThreadCreate(&thread, false, virNWFilterDHCPSnoopThread, - req) !=3D 0) { + if (virThreadCreateFull(&thread, false, virNWFilterDHCPSnoopThread, + "dhcp-snoop", false, req) !=3D 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("virNWFilterDHCPSnoopReq virThreadCreate " "failed on interface '%s'"), binding->portdevname= ); diff --git a/src/nwfilter/nwfilter_learnipaddr.c b/src/nwfilter/nwfilter_le= arnipaddr.c index f2d5e60d43..4ce8d5ba03 100644 --- a/src/nwfilter/nwfilter_learnipaddr.c +++ b/src/nwfilter/nwfilter_learnipaddr.c @@ -734,10 +734,12 @@ virNWFilterLearnIPAddress(virNWFilterTechDriverPtr te= chdriver, if (rc < 0) goto err_free_req; =20 - if (virThreadCreate(&thread, - false, - learnIPAddressThread, - req) !=3D 0) + if (virThreadCreateFull(&thread, + false, + learnIPAddressThread, + "ip-learn", + false, + req) !=3D 0) goto err_dereg_req; =20 return 0; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2813f084cd..0aab2f683d 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -996,7 +996,8 @@ qemuStateInitialize(bool privileged, /* must be initialized before trying to reconnect to all the * running domains since there might occur some QEMU monitor * events that will be dispatched to the worker pool */ - qemu_driver->workerPool =3D virThreadPoolNew(0, 1, 0, qemuProcessEvent= Handler, qemu_driver); + qemu_driver->workerPool =3D virThreadPoolNewFull(0, 1, 0, qemuProcessE= ventHandler, + "qemu-event", qemu_driv= er); if (!qemu_driver->workerPool) goto error; =20 diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index ceac81c960..76634add2f 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3298,9 +3298,11 @@ qemuMigrationSrcStartTunnel(virStreamPtr st, io->wakeupRecvFD =3D wakeupFD[0]; io->wakeupSendFD =3D wakeupFD[1]; =20 - if (virThreadCreate(&io->thread, true, - qemuMigrationSrcIOFunc, - io) < 0) { + if (virThreadCreateFull(&io->thread, true, + qemuMigrationSrcIOFunc, + "qemu-mig-tunnel", + false, + io) < 0) { virReportSystemError(errno, "%s", _("Unable to create migration thread")); goto error; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index bf987a3bc3..e36d1dd7c7 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -515,13 +515,16 @@ qemuProcessShutdownOrReboot(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv =3D vm->privateData; =20 if (priv->fakeReboot) { + g_autofree char *name =3D g_strdup_printf("reboot-%s", vm->def->na= me); qemuDomainSetFakeReboot(driver, vm, false); virObjectRef(vm); virThread th; - if (virThreadCreate(&th, - false, - qemuProcessFakeReboot, - vm) < 0) { + if (virThreadCreateFull(&th, + false, + qemuProcessFakeReboot, + name, + false, + vm) < 0) { VIR_ERROR(_("Failed to create reboot thread, killing domain")); ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT)= ); priv->pausedShutdown =3D false; @@ -8227,6 +8230,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj, virThread thread; struct qemuProcessReconnectData *src =3D opaque; struct qemuProcessReconnectData *data; + g_autofree char *name =3D NULL; =20 /* If the VM was inactive, we don't need to reconnect */ if (!obj->pid) @@ -8246,7 +8250,10 @@ qemuProcessReconnectHelper(virDomainObjPtr obj, virObjectLock(obj); virObjectRef(obj); =20 - if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) { + name =3D g_strdup_printf("init-%s", obj->def->name); + + if (virThreadCreateFull(&thread, false, qemuProcessReconnect, + name, false, data) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not create thread. QEMU initialization " "might be incomplete")); diff --git a/src/remote/remote_daemon.c b/src/remote/remote_daemon.c index 7082460bae..37e53a87cc 100644 --- a/src/remote/remote_daemon.c +++ b/src/remote/remote_daemon.c @@ -713,7 +713,8 @@ static void daemonReloadHandler(virNetDaemonPtr dmn G_G= NUC_UNUSED, return; } =20 - if (virThreadCreate(&thr, false, daemonReloadHandlerThread, NULL) < 0)= { + if (virThreadCreateFull(&thr, false, daemonReloadHandlerThread, + "daemon-reload", false, NULL) < 0) { /* * Not much we can do on error here except log it. */ @@ -770,7 +771,8 @@ static void daemonStop(virNetDaemonPtr dmn) { virThread thr; virObjectRef(dmn); - if (virThreadCreate(&thr, false, daemonStopWorker, dmn) < 0) + if (virThreadCreateFull(&thr, false, daemonStopWorker, + "daemon-stop", false, dmn) < 0) virObjectUnref(dmn); } =20 @@ -876,7 +878,8 @@ static int daemonStateInit(virNetDaemonPtr dmn) { virThread thr; virObjectRef(dmn); - if (virThreadCreate(&thr, false, daemonRunStateInit, dmn) < 0) { + if (virThreadCreateFull(&thr, false, daemonRunStateInit, + "daemon-init", false, dmn) < 0) { virObjectUnref(dmn); return -1; } diff --git a/src/rpc/virnetserver.c b/src/rpc/virnetserver.c index c87dade1a8..2031e1f3bd 100644 --- a/src/rpc/virnetserver.c +++ b/src/rpc/virnetserver.c @@ -367,10 +367,11 @@ virNetServerPtr virNetServerNew(const char *name, if (!(srv =3D virObjectLockableNew(virNetServerClass))) return NULL; =20 - if (!(srv->workers =3D virThreadPoolNew(min_workers, max_workers, - priority_workers, - virNetServerHandleJob, - srv))) + if (!(srv->workers =3D virThreadPoolNewFull(min_workers, max_workers, + priority_workers, + virNetServerHandleJob, + "rpc-worker", + srv))) goto error; =20 srv->name =3D g_strdup(name); diff --git a/src/storage/storage_backend_scsi.c b/src/storage/storage_backe= nd_scsi.c index 9c0f041616..37ae4325ca 100644 --- a/src/storage/storage_backend_scsi.c +++ b/src/storage/storage_backend_scsi.c @@ -334,8 +334,8 @@ createVport(virStoragePoolDefPtr def, memcpy(cbdata->pool_uuid, def->uuid, VIR_UUID_BUFLEN); cbdata->fchost_name =3D g_steal_pointer(&name); =20 - if (virThreadCreate(&thread, false, virStoragePoolFCRefreshThread, - cbdata) < 0) { + if (virThreadCreateFull(&thread, false, virStoragePoolFCRefreshThr= ead, + "scsi-refresh", false, cbdata) < 0) { /* Oh well - at least someone can still refresh afterwards */ VIR_DEBUG("Failed to create FC Pool Refresh Thread"); virStoragePoolFCRefreshDataFree(cbdata); diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c index 2dd093a9da..7a266f97c8 100644 --- a/src/storage/storage_driver.c +++ b/src/storage/storage_driver.c @@ -2366,8 +2366,8 @@ virStorageVolFDStreamCloseCb(virStreamPtr st G_GNUC_U= NUSED, { virThread thread; =20 - if (virThreadCreate(&thread, false, virStorageVolPoolRefreshThread, - opaque) < 0) { + if (virThreadCreateFull(&thread, false, virStorageVolPoolRefreshThread, + "vol-refresh", false, opaque) < 0) { /* Not much else can be done */ VIR_ERROR(_("Failed to create thread to handle pool refresh")); goto error; diff --git a/src/util/vircommand.c b/src/util/vircommand.c index 4a87f7c281..c150d99452 100644 --- a/src/util/vircommand.c +++ b/src/util/vircommand.c @@ -2620,8 +2620,9 @@ virCommandRunAsync(virCommandPtr cmd, pid_t *pid) /* clear any error so we can catch if the helper thread reports on= e */ cmd->has_error =3D 0; if (VIR_ALLOC(cmd->asyncioThread) < 0 || - virThreadCreate(cmd->asyncioThread, true, - virCommandDoAsyncIOHelper, cmd) < 0) { + virThreadCreateFull(cmd->asyncioThread, true, + virCommandDoAsyncIOHelper, + "cmd-async-io", false, cmd) < 0) { virReportSystemError(errno, "%s", _("Unable to create thread " "to process command's IO")); diff --git a/src/util/virfdstream.c b/src/util/virfdstream.c index 3337fc2060..111e451f8c 100644 --- a/src/util/virfdstream.c +++ b/src/util/virfdstream.c @@ -1134,10 +1134,12 @@ static int virFDStreamOpenInternal(virStreamPtr st, goto error; } =20 - if (virThreadCreate(fdst->thread, - true, - virFDStreamThread, - threadData) < 0) + if (virThreadCreateFull(fdst->thread, + true, + virFDStreamThread, + "fd-stream", + false, + threadData) < 0) goto error; } =20 diff --git a/src/util/virnodesuspend.c b/src/util/virnodesuspend.c index f81ea1ce02..544a29783c 100644 --- a/src/util/virnodesuspend.c +++ b/src/util/virnodesuspend.c @@ -220,9 +220,11 @@ int virNodeSuspend(unsigned int target, if (virNodeSuspendSetNodeWakeup(duration) < 0) goto cleanup; =20 - if (virThreadCreate(&thread, false, - virNodeSuspendHelper, - (void *)cmdString) < 0) { + if (virThreadCreateFull(&thread, false, + virNodeSuspendHelper, + "node-suspend", + false, + (void *)cmdString) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Failed to create thread to suspend the host")); goto cleanup; diff --git a/src/util/virthreadpool.c b/src/util/virthreadpool.c index ff5f34a946..379d2369ad 100644 --- a/src/util/virthreadpool.c +++ b/src/util/virthreadpool.c @@ -54,7 +54,7 @@ struct _virThreadPool { bool quit; =20 virThreadPoolJobFunc jobFunc; - const char *jobFuncName; + const char *jobName; void *jobOpaque; virThreadPoolJobList jobList; size_t jobQueueDepth; @@ -187,6 +187,7 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gain,= bool priority) return -1; =20 for (i =3D 0; i < gain; i++) { + g_autofree char *name =3D NULL; if (VIR_ALLOC(data) < 0) goto error; =20 @@ -194,10 +195,15 @@ virThreadPoolExpand(virThreadPoolPtr pool, size_t gai= n, bool priority) data->cond =3D priority ? &pool->prioCond : &pool->cond; data->priority =3D priority; =20 + if (priority) + name =3D g_strdup_printf("prio-%s", pool->jobName); + else + name =3D g_strdup(pool->jobName); + if (virThreadCreateFull(&(*workers)[i], false, virThreadPoolWorker, - pool->jobFuncName, + name, true, data) < 0) { VIR_FREE(data); @@ -218,7 +224,7 @@ virThreadPoolNewFull(size_t minWorkers, size_t maxWorkers, size_t prioWorkers, virThreadPoolJobFunc func, - const char *funcName, + const char *name, void *opaque) { virThreadPoolPtr pool; @@ -232,7 +238,7 @@ virThreadPoolNewFull(size_t minWorkers, pool->jobList.tail =3D pool->jobList.head =3D NULL; =20 pool->jobFunc =3D func; - pool->jobFuncName =3D funcName; + pool->jobName =3D name; pool->jobOpaque =3D opaque; =20 if (virMutexInit(&pool->mutex) < 0) diff --git a/src/util/virthreadpool.h b/src/util/virthreadpool.h index 5a55e22489..c97d9b3919 100644 --- a/src/util/virthreadpool.h +++ b/src/util/virthreadpool.h @@ -35,7 +35,7 @@ virThreadPoolPtr virThreadPoolNewFull(size_t minWorkers, size_t maxWorkers, size_t prioWorkers, virThreadPoolJobFunc func, - const char *funcName, + const char *name, void *opaque) ATTRIBUTE_NONNULL(4); =20 size_t virThreadPoolGetMinWorkers(virThreadPoolPtr pool); --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) client-ip=207.211.31.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [207.211.31.120]) by mx.zohomail.com with SMTPS id 1581684758516584.2402814545227; Fri, 14 Feb 2020 04:52:38 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-77-PrlMSWOgOhKq8eUF9DP1Fg-1; Fri, 14 Feb 2020 07:52:34 -0500 Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 0B36B107B0EF; Fri, 14 Feb 2020 12:52:28 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id D76E4790D6; Fri, 14 Feb 2020 12:52:27 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 8EA211808855; Fri, 14 Feb 2020 12:52:27 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqJXs014417 for ; Fri, 14 Feb 2020 07:52:19 -0500 Received: by smtp.corp.redhat.com (Postfix) id A1FE210027A9; Fri, 14 Feb 2020 12:52:19 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 107FD10027A3; Fri, 14 Feb 2020 12:52:18 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684757; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=ucqtJuHw6ZjLRmp4lCMY8IOZGUD7oAVnzbFbzz6GJOA=; b=X6EMty7j6DLsAWBdZdLSHIF5OwtKE1Y1CZ57pG0/Zk6Lc0d51qUcYdjZpuDPbuNLqltG3p uhE0Abj/fHVQ4uVVAN5VsGAZcYjP2KQerHsHPRzg24fQZjUQzWOBQenJsDsreWthUOjpKE YnNMb7VZvGLOm0R7AzcBJsG50zikCwM= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 05/11] src: introduce an abstraction for running event loops Date: Fri, 14 Feb 2020 12:52:03 +0000 Message-Id: <20200214125209.1152894-6-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 X-MC-Unique: PrlMSWOgOhKq8eUF9DP1Fg-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" We want a way to easily run a private GMainContext in a thread, with correct synchronization between startup and shutdown of the thread. Signed-off-by: Daniel P. Berrang=C3=A9 --- po/POTFILES.in | 1 + src/libvirt_private.syms | 5 ++ src/util/Makefile.inc.am | 2 + src/util/vireventthread.c | 175 ++++++++++++++++++++++++++++++++++++++ src/util/vireventthread.h | 31 +++++++ 5 files changed, 214 insertions(+) create mode 100644 src/util/vireventthread.c create mode 100644 src/util/vireventthread.h diff --git a/po/POTFILES.in b/po/POTFILES.in index dba0d3a12e..d49c10407a 100644 --- a/po/POTFILES.in +++ b/po/POTFILES.in @@ -238,6 +238,7 @@ @SRCDIR@/src/util/virerror.c @SRCDIR@/src/util/virerror.h @SRCDIR@/src/util/virevent.c +@SRCDIR@/src/util/vireventthread.c @SRCDIR@/src/util/virfcp.c @SRCDIR@/src/util/virfdstream.c @SRCDIR@/src/util/virfile.c diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 375e6ea000..361c9d6c13 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -1938,6 +1938,11 @@ virEventGLibRegister; virEventGLibRunOnce; =20 =20 +# util/vireventthread.h +virEventThreadGetContext; +virEventThreadNew; + + # util/virfcp.h virFCIsCapableRport; virFCReadRportValue; diff --git a/src/util/Makefile.inc.am b/src/util/Makefile.inc.am index fbe67090d3..35629808c9 100644 --- a/src/util/Makefile.inc.am +++ b/src/util/Makefile.inc.am @@ -65,6 +65,8 @@ UTIL_SOURCES =3D \ util/vireventglib.h \ util/vireventglibwatch.c \ util/vireventglibwatch.h \ + util/vireventthread.c \ + util/vireventthread.h \ util/virfcp.c \ util/virfcp.h \ util/virfdstream.c \ diff --git a/src/util/vireventthread.c b/src/util/vireventthread.c new file mode 100644 index 0000000000..aed376bc7c --- /dev/null +++ b/src/util/vireventthread.c @@ -0,0 +1,175 @@ +/* + * vireventthread.c: thread running a dedicated GMainLoop + * + * Copyright (C) 2020 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * . + */ + +#include + +#include "vireventthread.h" +#include "virthread.h" +#include "virerror.h" + +struct _virEventThread { + GObject parent; + + GCond cond; + GMutex lock; + bool running; + + GThread *thread; + GMainContext *context; + GMainLoop *loop; +}; + +G_DEFINE_TYPE(virEventThread, vir_event_thread, G_TYPE_OBJECT) + +#define VIR_FROM_THIS VIR_FROM_EVENT + +static void +vir_event_thread_finalize(GObject *object) +{ + virEventThread *evt =3D VIR_EVENT_THREAD(object); + + if (evt->thread) { + g_main_loop_quit(evt->loop); + g_thread_unref(evt->thread); + } + + g_main_loop_unref(evt->loop); + g_main_context_unref(evt->context); + + g_mutex_clear(&evt->lock); + g_cond_clear(&evt->cond); + + G_OBJECT_CLASS(vir_event_thread_parent_class)->finalize(object); +} + + +static void +vir_event_thread_init(virEventThread *evt) +{ + g_cond_init(&evt->cond); + g_mutex_init(&evt->lock); + evt->running =3D false; + evt->context =3D g_main_context_new(); + evt->loop =3D g_main_loop_new(evt->context, FALSE); +} + + +static void +vir_event_thread_class_init(virEventThreadClass *klass) +{ + GObjectClass *obj =3D G_OBJECT_CLASS(klass); + + obj->finalize =3D vir_event_thread_finalize; +} + + +static gboolean +virEventThreadNotify(void *opaque) +{ + virEventThread *evt =3D opaque; + + g_mutex_lock(&evt->lock); + evt->running =3D TRUE; + g_mutex_unlock(&evt->lock); + g_cond_signal(&evt->cond); + + return G_SOURCE_REMOVE; +} + + +static void * +virEventThreadWorker(void *opaque) +{ + virEventThread *evt =3D opaque; + g_autoptr(GSource) running =3D g_idle_source_new(); + + g_source_set_callback(running, virEventThreadNotify, evt, NULL); + + g_source_attach(running, evt->context); + + g_main_loop_run(evt->loop); + + g_main_loop_unref(evt->loop); + g_main_context_unref(evt->context); + + return NULL; +} + + +static int +virEventThreadStart(virEventThread *evt, const char *name) +{ + g_autoptr(GError) gerr =3D NULL; + g_autofree char *thname =3D NULL; + size_t maxname =3D virThreadMaxName(); + + if (maxname) + thname =3D g_strndup(name, maxname); + else + thname =3D g_strdup(name); + + if (evt->thread) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Event thread is already running")); + return -1; + } + + g_main_loop_ref(evt->loop); + g_main_context_ref(evt->context); + + evt->thread =3D g_thread_try_new(thname, + virEventThreadWorker, + evt, + &gerr); + if (!evt->thread) { + g_main_loop_unref(evt->loop); + g_main_context_unref(evt->context); + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unable to start event thread: %s"), + gerr->message); + return -1; + } + + g_mutex_lock(&evt->lock); + while (!evt->running) + g_cond_wait(&evt->cond, &evt->lock); + g_mutex_unlock(&evt->lock); + + return 0; +} + + +virEventThread * +virEventThreadNew(const char *name) +{ + g_autoptr(virEventThread) evt =3D VIR_EVENT_THREAD(g_object_new(VIR_TY= PE_EVENT_THREAD, NULL)); + + if (virEventThreadStart(evt, name) < 0) + return NULL; + + return g_steal_pointer(&evt); +} + + +GMainContext * +virEventThreadGetContext(virEventThread *evt) +{ + return evt->context; +} diff --git a/src/util/vireventthread.h b/src/util/vireventthread.h new file mode 100644 index 0000000000..5826c25cf4 --- /dev/null +++ b/src/util/vireventthread.h @@ -0,0 +1,31 @@ +/* + * vireventthread.h: thread running a dedicated GMainLoop + * + * Copyright (C) 2020 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * . + */ + +#pragma once + +#include "internal.h" +#include + +#define VIR_TYPE_EVENT_THREAD vir_event_thread_get_type() +G_DECLARE_FINAL_TYPE(virEventThread, vir_event_thread, VIR, EVENT_THREAD, = GObject); + +virEventThread *virEventThreadNew(const char *name); + +GMainContext *virEventThreadGetContext(virEventThread *evt); --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) client-ip=205.139.110.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [205.139.110.120]) by mx.zohomail.com with SMTPS id 1581684754981303.83404889379324; Fri, 14 Feb 2020 04:52:34 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-26-oaHqSk3BN_SKpmN9XL8Kmw-1; Fri, 14 Feb 2020 07:52:31 -0500 Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 5847E800D48; Fri, 14 Feb 2020 12:52:26 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 2F80D60BE2; Fri, 14 Feb 2020 12:52:26 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id E29BD8B2C7; Fri, 14 Feb 2020 12:52:25 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqKV2014424 for ; Fri, 14 Feb 2020 07:52:20 -0500 Received: by smtp.corp.redhat.com (Postfix) id 87EA11001B2D; Fri, 14 Feb 2020 12:52:20 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id E95B31001B2C; Fri, 14 Feb 2020 12:52:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684753; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=KhtXHTBpdVGAcDROW/YcasATdhtZEiD39OxLfkzLcHs=; b=c8s2JUIMfRQx3hB8EFlOEWEiLfV3PQ9sPwCvNLQLMjp8/ALjT9joWs6INF/EUzFeoZIHBI 0UaeP1zFRMfqfreFAifHyLvMsgdL7rKasmYExsxPMcsqyP7XBRp5s90Wc28fV7Wtt/banm kJEVGFEgj73SLGY7SanBLWuGSWcFvy8= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 06/11] qemu: start/stop an event loop thread for domains Date: Fri, 14 Feb 2020 12:52:04 +0000 Message-Id: <20200214125209.1152894-7-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 X-MC-Unique: oaHqSk3BN_SKpmN9XL8Kmw-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" The event loop thread will be responsible for handling any per-domain I/O operations, most notably the QEMU monitor and agent sockets. We start this event loop when launching QEMU, but stopping the event loop is a little more complicated. The obvious idea is to stop it in qemuProcessStop(), but if we do that we risk loosing the final events from the QEMU monitor, as they might not have been read by the event thread at the time we tell the thread to stop. The solution is to delay shutdown of the event thread until we have seen EOF from the QEMU monitor, and thus we know there are no further events to process. Note that this assumes that we don't have events to process from the QEMU agent. Signed-off-by: Daniel P. Berrang=C3=A9 --- src/qemu/qemu_domain.c | 33 +++++++++++++++++++++++++++++++++ src/qemu/qemu_domain.h | 6 ++++++ src/qemu/qemu_process.c | 21 +++++++++++++++++++++ 3 files changed, 60 insertions(+) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 6fc0bd4e68..916a21b5f2 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2146,6 +2146,33 @@ dbusVMStateHashFree(void *opaque) } =20 =20 +int +qemuDomainObjStartWorker(virDomainObjPtr dom) +{ + qemuDomainObjPrivatePtr priv =3D dom->privateData; + + if (!priv->eventThread) { + g_autofree char *threadName =3D g_strdup_printf("vm-%s", dom->def-= >name); + if (!(priv->eventThread =3D virEventThreadNew(threadName))) + return -1; + } + + return 0; +} + + +void +qemuDomainObjStopWorker(virDomainObjPtr dom) +{ + qemuDomainObjPrivatePtr priv =3D dom->privateData; + + if (priv->eventThread) { + g_object_unref(priv->eventThread); + priv->eventThread =3D NULL; + } +} + + static void * qemuDomainObjPrivateAlloc(void *opaque) { @@ -2285,6 +2312,12 @@ qemuDomainObjPrivateFree(void *data) virHashFree(priv->blockjobs); virHashFree(priv->dbusVMStates); =20 + /* This should never be non-NULL if we get here, but just in case... */ + if (priv->eventThread) { + VIR_ERROR(_("Unexpected event thread still active during domain de= letion")); + g_object_unref(priv->eventThread); + } + VIR_FREE(priv); } =20 diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index f8fb48f2ff..9ce27fbdda 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -40,6 +40,7 @@ #include "logging/log_manager.h" #include "virdomainmomentobjlist.h" #include "virenum.h" +#include "vireventthread.h" =20 #define QEMU_DOMAIN_FORMAT_LIVE_FLAGS \ (VIR_DOMAIN_XML_SECURE) @@ -300,6 +301,8 @@ struct _qemuDomainObjPrivate { =20 virBitmapPtr namespaces; =20 + virEventThread *eventThread; + qemuMonitorPtr mon; virDomainChrSourceDefPtr monConfig; bool monError; @@ -630,6 +633,9 @@ struct _qemuDomainXmlNsDef { char **capsdel; }; =20 +int qemuDomainObjStartWorker(virDomainObjPtr dom); +void qemuDomainObjStopWorker(virDomainObjPtr dom); + virDomainObjPtr qemuDomainObjFromDomain(virDomainPtr domain); =20 qemuDomainSaveCookiePtr qemuDomainSaveCookieNew(virDomainObjPtr vm); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index e36d1dd7c7..73158e29e6 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -319,6 +319,9 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon, qemuDomainDestroyNamespace(driver, vm); =20 cleanup: + /* Now we got EOF we're not expecting more I/O, so we + * can finally kill the event thread */ + qemuDomainObjStopWorker(vm); virObjectUnlock(vm); } =20 @@ -6912,6 +6915,9 @@ qemuProcessLaunch(virConnectPtr conn, if (rv =3D=3D -1) /* The VM failed to start */ goto cleanup; =20 + if (qemuDomainObjStartWorker(vm) < 0) + goto cleanup; + VIR_DEBUG("Waiting for monitor to show up"); if (qemuProcessWaitForMonitor(driver, vm, asyncJob, logCtxt) < 0) goto cleanup; @@ -7394,6 +7400,18 @@ void qemuProcessStop(virQEMUDriverPtr driver, priv->monConfig =3D NULL; } =20 + /* + * We cannot stop the event thread at this time. When + * we are in this code, we may not yet have processed the + * STOP event or EOF from the monitor. So the event loop + * may have pending input that we need to process still. + * The qemuProcessHandleMonitorEOF method will kill + * the event thread because at that point we don't + * expect any more I/O from the QEMU monitor. We are + * assuming we don't need to get any more events from the + * QEMU agent at that time. + */ + /* Remove the master key */ qemuDomainMasterKeyRemove(priv); =20 @@ -7985,6 +8003,9 @@ qemuProcessReconnect(void *opaque) virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CHARDEV_FD_PASS)) retry =3D false; =20 + if (qemuDomainObjStartWorker(obj) < 0) + goto error; + VIR_DEBUG("Reconnect monitor to def=3D%p name=3D'%s' retry=3D%d", obj, obj->def->name, retry); =20 --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) client-ip=205.139.110.61; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-delivery-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-delivery-1.mimecast.com (us-smtp-2.mimecast.com [205.139.110.61]) by mx.zohomail.com with SMTPS id 1581684763231618.0552343363399; Fri, 14 Feb 2020 04:52:43 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-142-hD3NNEYxMJ2rS6sEG6xF0A-1; Fri, 14 Feb 2020 07:52:39 -0500 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E497313FD; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id BC3D11001DD8; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 7B2E48B2CD; Fri, 14 Feb 2020 12:52:29 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqL4r014430 for ; Fri, 14 Feb 2020 07:52:21 -0500 Received: by smtp.corp.redhat.com (Postfix) id 6E4681001DEF; Fri, 14 Feb 2020 12:52:21 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id CE1511001DD8; Fri, 14 Feb 2020 12:52:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684761; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=b22UIFpKbumcJEraM2q3u00FchQHe5KcsOqIH20shjQ=; b=KHer/h8yiUgNO5nXZ4W/kACnS6rFrnsV6uxIGN1tEAh2g2gUzwusTLRnQaqkTDo7xg1cnA F27m/GOMyx9lhiFiHU33/P6gC3esWgQVivQOw70FwVDtsTS4BxMN5KyGK2J5exZIbjpxYn zZub2re8QXY6PgOv9Spx1z4E/84nyr0= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 07/11] qemu: start/stop an event thread for QMP probing Date: Fri, 14 Feb 2020 12:52:05 +0000 Message-Id: <20200214125209.1152894-8-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-MC-Unique: hD3NNEYxMJ2rS6sEG6xF0A-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" In common with regular QEMU guests, the QMP probing will need an event loop for handling monitor I/O operations. Signed-off-by: Daniel P. Berrang=C3=A9 --- src/qemu/qemu_process.c | 16 ++++++++++++++++ src/qemu/qemu_process.h | 2 ++ 2 files changed, 18 insertions(+) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 73158e29e6..7475813e9f 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -8377,6 +8377,9 @@ qemuProcessQMPFree(qemuProcessQMPPtr proc) return; =20 qemuProcessQMPStop(proc); + + g_object_unref(proc->eventThread); + VIR_FREE(proc->binary); VIR_FREE(proc->libDir); VIR_FREE(proc->uniqDir); @@ -8408,6 +8411,9 @@ qemuProcessQMPNew(const char *binary, { qemuProcessQMPPtr ret =3D NULL; qemuProcessQMPPtr proc =3D NULL; + g_autoptr(GError) gerr =3D NULL; + const char *threadSuffix; + g_autofree char *threadName =3D NULL; =20 VIR_DEBUG("exec=3D%s, libDir=3D%s, runUid=3D%u, runGid=3D%u, forceTCG= =3D%d", binary, libDir, runUid, runGid, forceTCG); @@ -8422,6 +8428,16 @@ qemuProcessQMPNew(const char *binary, proc->runGid =3D runGid; proc->forceTCG =3D forceTCG; =20 + threadSuffix =3D strrchr(binary, '-'); + if (threadSuffix) + threadSuffix++; + else + threadSuffix =3D binary; + threadName =3D g_strdup_printf("qmp-%s", threadSuffix); + + if (!(proc->eventThread =3D virEventThreadNew(threadName))) + goto cleanup; + ret =3D g_steal_pointer(&proc); =20 cleanup: diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 9af9f967fd..3077d3ef9e 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -24,6 +24,7 @@ #include "qemu_conf.h" #include "qemu_domain.h" #include "virstoragefile.h" +#include "vireventthread.h" =20 int qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig, const char *domainDir); @@ -217,6 +218,7 @@ struct _qemuProcessQMP { char *monpath; char *pidfile; char *uniqDir; + virEventThread *eventThread; virCommandPtr cmd; qemuMonitorPtr mon; pid_t pid; --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) client-ip=207.211.31.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 207.211.31.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [207.211.31.120]) by mx.zohomail.com with SMTPS id 1581684787258570.3108367566325; Fri, 14 Feb 2020 04:53:07 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-338-oAG0eQTJNDiWKJ8cS_Dshg-1; Fri, 14 Feb 2020 07:52:39 -0500 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id E47E5113784B; Fri, 14 Feb 2020 12:52:33 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id B9F241001DD8; Fri, 14 Feb 2020 12:52:33 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id 6BE618B2D2; Fri, 14 Feb 2020 12:52:33 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqMoO014438 for ; Fri, 14 Feb 2020 07:52:22 -0500 Received: by smtp.corp.redhat.com (Postfix) id 53B981001B2C; Fri, 14 Feb 2020 12:52:22 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id B52711001DD8; Fri, 14 Feb 2020 12:52:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684786; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=/gbqRJ+jMPy/vSSekBKNtaySHT4dTLjZIy2i/ABotJs=; b=Pma0Aa6+saq4Tpkj0C+AEFRhB19WG38HBCr4sVnVicsUOyKl6M47sBa89Y7VB6TRb6sygg 7N2TT2t6vL8wgNEpT2Hj2DI0zHPECOzV9ErvpbFHJbPTkojDhX/kFcPdifgYLJWBREhY1v ppBqHuJ8VrMT/kmEGLb33CxqBTT8CMo= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 08/11] tests: start/stop an event thread for QEMU monitor/agent tests Date: Fri, 14 Feb 2020 12:52:06 +0000 Message-Id: <20200214125209.1152894-9-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-MC-Unique: oAG0eQTJNDiWKJ8cS_Dshg-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" Tests which are using the QEMU monitor / agent need to have an event thread running a private GMainContext. There is already a thread running the main libvirt event loop but this can't be eliminated yet as it is used for more than just the monitor client I/O. Signed-off-by: Daniel P. Berrang=C3=A9 --- tests/qemumonitortestutils.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c index b29e5d8cd2..a1641050ea 100644 --- a/tests/qemumonitortestutils.c +++ b/tests/qemumonitortestutils.c @@ -36,6 +36,7 @@ #include "virlog.h" #include "virerror.h" #include "virstring.h" +#include "vireventthread.h" =20 #define VIR_FROM_THIS VIR_FROM_NONE =20 @@ -66,6 +67,8 @@ struct _qemuMonitorTest { virNetSocketPtr server; virNetSocketPtr client; =20 + virEventThread *eventThread; + qemuMonitorPtr mon; qemuAgentPtr agent; =20 @@ -389,6 +392,8 @@ qemuMonitorTestFree(qemuMonitorTestPtr test) qemuAgentClose(test->agent); } =20 + g_object_unref(test->eventThread); + virObjectUnref(test->vm); =20 if (test->started) @@ -1142,6 +1147,7 @@ qemuMonitorCommonTestInit(qemuMonitorTestPtr test) "}" /* We skip the normal handshake reply of "{\"execute\":\"qmp_capabilities\= "}" */ =20 + qemuMonitorTestPtr qemuMonitorTestNew(virDomainXMLOptionPtr xmlopt, virDomainObjPtr vm, @@ -1157,6 +1163,9 @@ qemuMonitorTestNew(virDomainXMLOptionPtr xmlopt, if (!(test =3D qemuMonitorCommonTestNew(xmlopt, vm, &src))) goto error; =20 + if (!(test->eventThread =3D virEventThreadNew("mon-test"))) + goto error; + test->qapischema =3D schema; if (!(test->mon =3D qemuMonitorOpen(test->vm, &src, @@ -1389,12 +1398,16 @@ qemuMonitorTestNewAgent(virDomainXMLOptionPtr xmlop= t) { qemuMonitorTestPtr test =3D NULL; virDomainChrSourceDef src; + g_autofree char *threadName =3D NULL; =20 memset(&src, 0, sizeof(src)); =20 if (!(test =3D qemuMonitorCommonTestNew(xmlopt, NULL, &src))) goto error; =20 + if (!(test->eventThread =3D virEventThreadNew("agent-test"))) + goto error; + if (!(test->agent =3D qemuAgentOpen(test->vm, &src, &qemuMonitorTestAgentCallbacks))) --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) client-ip=205.139.110.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [205.139.110.120]) by mx.zohomail.com with SMTPS id 1581684764095442.9417219093481; Fri, 14 Feb 2020 04:52:44 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-392-q8XsS4tdPFCSon9Zke7x8w-1; Fri, 14 Feb 2020 07:52:40 -0500 Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 36B5713F7; Fri, 14 Feb 2020 12:52:32 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 000CE1001DD8; Fri, 14 Feb 2020 12:52:31 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id A485A180880D; Fri, 14 Feb 2020 12:52:31 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqNVZ014453 for ; Fri, 14 Feb 2020 07:52:23 -0500 Received: by smtp.corp.redhat.com (Postfix) id 3B5891001B2C; Fri, 14 Feb 2020 12:52:23 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 9BF671001DD8; Fri, 14 Feb 2020 12:52:22 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684762; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=7DVSu7GfVmbg1/l2gUoyDxqF1pe/zXDJjgYxXNJf6VY=; b=jO6ddql40eWeBVLriHrSuRfGsV3pCXv3L2tCM499QrdRs2snNMH73RYWUjNLVYo7dM03VG Tme8OMMSBHAZmGqjtN/v+LiSi3PdFTaIhRqr8bLSnP1vjiOXEy/RlTOb/T2HvjRYGVUdZk WXB9Dpey/ftcI5g7rCBkJyHuC+G4U2k= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 09/11] qemu: convert monitor to use the per-VM event loop Date: Fri, 14 Feb 2020 12:52:07 +0000 Message-Id: <20200214125209.1152894-10-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-MC-Unique: q8XsS4tdPFCSon9Zke7x8w-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" This converts the QEMU monitor APIs to use the per-VM event loop, which involves switching from virEvent APIs to GMainContext / GSource APIs. A GSocket is used as a convenient way to create a GSource for a socket, but is not yet used for actual I/O. Signed-off-by: Daniel P. Berrang=C3=A9 --- src/qemu/qemu_monitor.c | 145 ++++++++++++++++------------------- src/qemu/qemu_monitor.h | 3 +- src/qemu/qemu_process.c | 6 +- tests/qemumonitortestutils.c | 1 + 4 files changed, 71 insertions(+), 84 deletions(-) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index bf53962872..d969853963 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -24,6 +24,7 @@ #include #include #include +#include =20 #include "qemu_monitor.h" #include "qemu_monitor_text.h" @@ -71,12 +72,9 @@ struct _qemuMonitor { =20 int fd; =20 - /* Represents the watch number to be used for updating and - * unregistering the monitor @fd for events in the event loop: - * > 0: valid watch number - * =3D 0: not registered - * < 0: an error occurred during the registration of @fd */ - int watch; + GMainContext *context; + GSocket *socket; + GSource *watch; =20 virDomainObjPtr vm; =20 @@ -226,6 +224,7 @@ qemuMonitorDispose(void *obj) (mon->cb->destroy)(mon, mon->vm, mon->callbackOpaque); virObjectUnref(mon->vm); =20 + g_main_context_unref(mon->context); virResetError(&mon->lastError); virCondDestroy(&mon->notify); VIR_FREE(mon->buffer); @@ -509,27 +508,16 @@ qemuMonitorIORead(qemuMonitorPtr mon) static void qemuMonitorUpdateWatch(qemuMonitorPtr mon) { - int events =3D - VIR_EVENT_HANDLE_HANGUP | - VIR_EVENT_HANDLE_ERROR; - - if (!mon->watch) - return; - - if (mon->lastError.code =3D=3D VIR_ERR_OK) { - events |=3D VIR_EVENT_HANDLE_READABLE; - - if ((mon->msg && mon->msg->txOffset < mon->msg->txLength) && - !mon->waitGreeting) - events |=3D VIR_EVENT_HANDLE_WRITABLE; - } - - virEventUpdateHandle(mon->watch, events); + qemuMonitorUnregister(mon); + if (mon->socket) + qemuMonitorRegister(mon); } =20 =20 -static void -qemuMonitorIO(int watch, int fd, int events, void *opaque) +static gboolean +qemuMonitorIO(GSocket *socket G_GNUC_UNUSED, + GIOCondition cond, + gpointer opaque) { qemuMonitorPtr mon =3D opaque; bool error =3D false; @@ -541,39 +529,29 @@ qemuMonitorIO(int watch, int fd, int events, void *op= aque) /* lock access to the monitor and protect fd */ virObjectLock(mon); #if DEBUG_IO - VIR_DEBUG("Monitor %p I/O on watch %d fd %d events %d", mon, watch, fd= , events); + VIR_DEBUG("Monitor %p I/O on socket %p cond %d", mon, socket, cond); #endif - if (mon->fd =3D=3D -1 || mon->watch =3D=3D 0) { + if (mon->fd =3D=3D -1 || !mon->watch) { virObjectUnlock(mon); virObjectUnref(mon); - return; + return G_SOURCE_REMOVE; } =20 - if (mon->fd !=3D fd || mon->watch !=3D watch) { - if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) - eof =3D true; - virReportError(VIR_ERR_INTERNAL_ERROR, - _("event from unexpected fd %d!=3D%d / watch %d!=3D= %d"), - mon->fd, fd, mon->watch, watch); - error =3D true; - } else if (mon->lastError.code !=3D VIR_ERR_OK) { - if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) + if (mon->lastError.code !=3D VIR_ERR_OK) { + if (cond & (G_IO_HUP | G_IO_ERR)) eof =3D true; error =3D true; } else { - if (events & VIR_EVENT_HANDLE_WRITABLE) { + if (cond & G_IO_OUT) { if (qemuMonitorIOWrite(mon) < 0) { error =3D true; if (errno =3D=3D ECONNRESET) hangup =3D true; } - events &=3D ~VIR_EVENT_HANDLE_WRITABLE; } =20 - if (!error && - events & VIR_EVENT_HANDLE_READABLE) { + if (!error && cond & G_IO_IN) { int got =3D qemuMonitorIORead(mon); - events &=3D ~VIR_EVENT_HANDLE_READABLE; if (got < 0) { error =3D true; if (errno =3D=3D ECONNRESET) @@ -581,37 +559,29 @@ qemuMonitorIO(int watch, int fd, int events, void *op= aque) } else if (got =3D=3D 0) { eof =3D true; } else { - /* Ignore hangup/error events if we read some data, to + /* Ignore hangup/error cond if we read some data, to * give time for that data to be consumed */ - events =3D 0; + cond =3D 0; =20 if (qemuMonitorIOProcess(mon) < 0) error =3D true; } } =20 - if (events & VIR_EVENT_HANDLE_HANGUP) { + if (cond & G_IO_HUP) { hangup =3D true; if (!error) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("End of file from qemu monitor")); eof =3D true; - events &=3D ~VIR_EVENT_HANDLE_HANGUP; } } =20 if (!error && !eof && - events & VIR_EVENT_HANDLE_ERROR) { + cond & G_IO_ERR) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid file descriptor while waiting for mo= nitor")); eof =3D true; - events &=3D ~VIR_EVENT_HANDLE_ERROR; - } - if (!error && events) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unhandled event %d for monitor fd %d"), - events, mon->fd); - error =3D true; } } =20 @@ -679,16 +649,20 @@ qemuMonitorIO(int watch, int fd, int events, void *op= aque) virObjectUnlock(mon); virObjectUnref(mon); } + + return G_SOURCE_REMOVE; } =20 =20 static qemuMonitorPtr qemuMonitorOpenInternal(virDomainObjPtr vm, int fd, + GMainContext *context, qemuMonitorCallbacksPtr cb, void *opaque) { qemuMonitorPtr mon; + g_autoptr(GError) gerr =3D NULL; =20 if (!cb->eofNotify) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -713,6 +687,7 @@ qemuMonitorOpenInternal(virDomainObjPtr vm, goto cleanup; } mon->fd =3D fd; + mon->context =3D g_main_context_ref(context); mon->vm =3D virObjectRef(vm); mon->waitGreeting =3D true; mon->cb =3D cb; @@ -723,20 +698,17 @@ qemuMonitorOpenInternal(virDomainObjPtr vm, "%s", _("Unable to set monitor close-on-exec flag")= ); goto cleanup; } - if (virSetNonBlock(mon->fd) < 0) { + + mon->socket =3D g_socket_new_from_fd(fd, &gerr); + if (!mon->socket) { virReportError(VIR_ERR_INTERNAL_ERROR, - "%s", _("Unable to put monitor into non-blocking mo= de")); + _("Unable to create socket object: %s"), + gerr->message); goto cleanup; } =20 - virObjectLock(mon); - if (!qemuMonitorRegister(mon)) { - virObjectUnlock(mon); - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("unable to register monitor events")); - goto cleanup; - } + qemuMonitorRegister(mon); =20 PROBE(QEMU_MONITOR_NEW, "mon=3D%p refs=3D%d fd=3D%d", @@ -782,6 +754,7 @@ qemuMonitorOpen(virDomainObjPtr vm, virDomainChrSourceDefPtr config, bool retry, unsigned long long timeout, + GMainContext *context, qemuMonitorCallbacksPtr cb, void *opaque) { @@ -815,7 +788,7 @@ qemuMonitorOpen(virDomainObjPtr vm, goto cleanup; } =20 - ret =3D qemuMonitorOpenInternal(vm, fd, cb, opaque); + ret =3D qemuMonitorOpenInternal(vm, fd, context, cb, opaque); cleanup: if (!ret) VIR_FORCE_CLOSE(fd); @@ -830,25 +803,32 @@ qemuMonitorOpen(virDomainObjPtr vm, * * Registers the monitor in the event loop. The caller has to hold the * lock for @mon. - * - * Returns true in case of success, false otherwise */ -bool +void qemuMonitorRegister(qemuMonitorPtr mon) { - virObjectRef(mon); - if ((mon->watch =3D virEventAddHandle(mon->fd, - VIR_EVENT_HANDLE_HANGUP | - VIR_EVENT_HANDLE_ERROR | - VIR_EVENT_HANDLE_READABLE, - qemuMonitorIO, - mon, - virObjectFreeCallback)) < 0) { - virObjectUnref(mon); - return false; + GIOCondition cond =3D 0; + + if (mon->lastError.code =3D=3D VIR_ERR_OK) { + cond |=3D G_IO_IN; + + if ((mon->msg && mon->msg->txOffset < mon->msg->txLength) && + !mon->waitGreeting) + cond |=3D G_IO_OUT; } =20 - return true; + mon->watch =3D g_socket_create_source(mon->socket, + cond, + NULL); + + virObjectRef(mon); + g_source_set_callback(mon->watch, + (GSourceFunc)qemuMonitorIO, + mon, + NULL); + + g_source_attach(mon->watch, + mon->context); } =20 =20 @@ -856,8 +836,9 @@ void qemuMonitorUnregister(qemuMonitorPtr mon) { if (mon->watch) { - virEventRemoveHandle(mon->watch); - mon->watch =3D 0; + g_source_destroy(mon->watch); + g_source_unref(mon->watch); + mon->watch =3D NULL; } } =20 @@ -873,9 +854,11 @@ qemuMonitorClose(qemuMonitorPtr mon) =20 qemuMonitorSetDomainLogLocked(mon, NULL, NULL, NULL); =20 - if (mon->fd >=3D 0) { + if (mon->socket) { qemuMonitorUnregister(mon); - VIR_FORCE_CLOSE(mon->fd); + g_object_unref(mon->socket); + mon->socket =3D NULL; + mon->fd =3D -1; } =20 /* In case another thread is waiting for its monitor command to be diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index c84cd425df..dd2aaa4691 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -391,11 +391,12 @@ qemuMonitorPtr qemuMonitorOpen(virDomainObjPtr vm, virDomainChrSourceDefPtr config, bool retry, unsigned long long timeout, + GMainContext *context, qemuMonitorCallbacksPtr cb, void *opaque) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5); =20 -bool qemuMonitorRegister(qemuMonitorPtr mon) +void qemuMonitorRegister(qemuMonitorPtr mon) ATTRIBUTE_NONNULL(1); void qemuMonitorUnregister(qemuMonitorPtr mon) ATTRIBUTE_NONNULL(1); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 7475813e9f..bc57474bdc 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1976,6 +1976,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomain= ObjPtr vm, int asyncJob, priv->monConfig, retry, timeout, + virEventThreadGetContext(priv->eventThread), &monitorCallbacks, driver); =20 @@ -8602,8 +8603,9 @@ qemuProcessQMPConnectMonitor(qemuProcessQMPPtr proc) =20 proc->vm->pid =3D proc->pid; =20 - if (!(proc->mon =3D qemuMonitorOpen(proc->vm, &monConfig, true, - 0, &callbacks, NULL))) + if (!(proc->mon =3D qemuMonitorOpen(proc->vm, &monConfig, true, 0, + virEventThreadGetContext(proc->event= Thread), + &callbacks, NULL))) goto cleanup; =20 virObjectLock(proc->mon); diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c index a1641050ea..3efdea9cce 100644 --- a/tests/qemumonitortestutils.c +++ b/tests/qemumonitortestutils.c @@ -1171,6 +1171,7 @@ qemuMonitorTestNew(virDomainXMLOptionPtr xmlopt, &src, true, 0, + virEventThreadGetContext(test->event= Thread), &qemuMonitorTestCallbacks, driver))) goto error; --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) client-ip=205.139.110.120; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.120 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-1.mimecast.com (us-smtp-delivery-1.mimecast.com [205.139.110.120]) by mx.zohomail.com with SMTPS id 158168476698141.010417945346035; Fri, 14 Feb 2020 04:52:46 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-150-ufqB7ey5O6ShopHNcfVXPA-1; Fri, 14 Feb 2020 07:52:42 -0500 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 1763D18C43C4; Fri, 14 Feb 2020 12:52:36 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.20]) by smtp.corp.redhat.com (Postfix) with ESMTPS id E39CB9009A; Fri, 14 Feb 2020 12:52:35 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id A35011804744; Fri, 14 Feb 2020 12:52:35 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqOCJ014460 for ; Fri, 14 Feb 2020 07:52:24 -0500 Received: by smtp.corp.redhat.com (Postfix) id 498DC1001B2D; Fri, 14 Feb 2020 12:52:24 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 849271001DEF; Fri, 14 Feb 2020 12:52:23 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684765; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=iTT/AulmvufQvoRRqL0XT9SqlB30uLz/0F/gUXAohqQ=; b=Eng8XmBiCg+bIC6ilVAY5tcilhtiQK/Vqpwe696GaZvYDU1aoevSug4zm4RW6VRcdXlapL qrS1c3eg1E097L8+tdjfP+DTgr+ld/SA+/RdviLshi7HkXIuQkPWiGsgSjRfbrtHiGBxbf QN5ZAq/emF/Tx+QVocsexWw/+ej5zZg= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 10/11] qemu: fix variable naming in agent code Date: Fri, 14 Feb 2020 12:52:08 +0000 Message-Id: <20200214125209.1152894-11-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 X-MC-Unique: ufqB7ey5O6ShopHNcfVXPA-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" We are dealing with the QEMU agent, not the monitor. Signed-off-by: Daniel P. Berrang=C3=A9 --- src/qemu/qemu_agent.c | 498 +++++++++++++++++++++--------------------- 1 file changed, 249 insertions(+), 249 deletions(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 7ca5975a76..da1081b60b 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -64,7 +64,7 @@ VIR_LOG_INIT("qemu.qemu_agent"); * static struct { const char *type; - void (*handler)(qemuAgentPtr mon, virJSONValuePtr data); + void (*handler)(qemuAgentPtr agent, virJSONValuePtr data); } eventHandlers[] =3D { }; */ @@ -77,13 +77,13 @@ struct _qemuAgentMessage { int txOffset; int txLength; =20 - /* Used by the JSON monitor to hold reply / error */ + /* Used by the JSON agent to hold reply / error */ char *rxBuffer; int rxLength; void *rxObject; =20 /* True if rxBuffer / rxObject are ready, or a - * fatal error occurred on the monitor channel + * fatal error occurred on the agent channel */ bool finished; /* true for sync command */ @@ -112,18 +112,18 @@ struct _qemuAgent { * non-NULL */ qemuAgentMessagePtr msg; =20 - /* Buffer incoming data ready for Agent monitor + /* Buffer incoming data ready for agent * code to process & find message boundaries */ size_t bufferOffset; size_t bufferLength; char *buffer; =20 /* If anything went wrong, this will be fed back - * the next monitor msg */ + * the next agent msg */ virError lastError; =20 /* Some guest agent commands don't return anything - * but fire up an event on qemu monitor instead. + * but fire up an event on qemu agent instead. * Take that as indication of successful completion */ qemuAgentEvent await_event; int timeout; @@ -165,71 +165,71 @@ qemuAgentEscapeNonPrintable(const char *text) =20 static void qemuAgentDispose(void *obj) { - qemuAgentPtr mon =3D obj; - VIR_DEBUG("mon=3D%p", mon); - if (mon->cb && mon->cb->destroy) - (mon->cb->destroy)(mon, mon->vm); - virCondDestroy(&mon->notify); - VIR_FREE(mon->buffer); - virResetError(&mon->lastError); + qemuAgentPtr agent =3D obj; + VIR_DEBUG("agent=3D%p", agent); + if (agent->cb && agent->cb->destroy) + (agent->cb->destroy)(agent, agent->vm); + virCondDestroy(&agent->notify); + VIR_FREE(agent->buffer); + virResetError(&agent->lastError); } =20 static int -qemuAgentOpenUnix(const char *monitor) +qemuAgentOpenUnix(const char *socketpath) { struct sockaddr_un addr; - int monfd; + int agentfd; int ret =3D -1; =20 - if ((monfd =3D socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { + if ((agentfd =3D socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { virReportSystemError(errno, "%s", _("failed to create socket")); return -1; } =20 - if (virSetNonBlock(monfd) < 0) { + if (virSetNonBlock(agentfd) < 0) { virReportSystemError(errno, "%s", _("Unable to put monitor " "into non-blocking mode")); goto error; } =20 - if (virSetCloseExec(monfd) < 0) { + if (virSetCloseExec(agentfd) < 0) { virReportSystemError(errno, "%s", - _("Unable to set monitor " + _("Unable to set agent " "close-on-exec flag")); goto error; } =20 memset(&addr, 0, sizeof(addr)); addr.sun_family =3D AF_UNIX; - if (virStrcpyStatic(addr.sun_path, monitor) < 0) { + if (virStrcpyStatic(addr.sun_path, socketpath) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, - _("Agent path %s too big for destination"), monitor= ); + _("Socket path %s too big for destination"), socket= path); goto error; } =20 - ret =3D connect(monfd, (struct sockaddr *)&addr, sizeof(addr)); + ret =3D connect(agentfd, (struct sockaddr *)&addr, sizeof(addr)); if (ret < 0) { virReportSystemError(errno, "%s", - _("failed to connect to monitor socket")); + _("failed to connect to agent socket")); goto error; } =20 - return monfd; + return agentfd; =20 error: - VIR_FORCE_CLOSE(monfd); + VIR_FORCE_CLOSE(agentfd); return -1; } =20 =20 static int -qemuAgentIOProcessEvent(qemuAgentPtr mon, +qemuAgentIOProcessEvent(qemuAgentPtr agent, virJSONValuePtr obj) { const char *type; - VIR_DEBUG("mon=3D%p obj=3D%p", mon, obj); + VIR_DEBUG("agent=3D%p obj=3D%p", agent, obj); =20 type =3D virJSONValueObjectGetString(obj, "event"); if (!type) { @@ -244,7 +244,7 @@ qemuAgentIOProcessEvent(qemuAgentPtr mon, virJSONValuePtr data =3D virJSONValueObjectGet(obj, "data"); VIR_DEBUG("handle %s handler=3D%p data=3D%p", type, eventHandlers[i].handler, data); - (eventHandlers[i].handler)(mon, data); + (eventHandlers[i].handler)(agent, data); break; } } @@ -253,7 +253,7 @@ qemuAgentIOProcessEvent(qemuAgentPtr mon, } =20 static int -qemuAgentIOProcessLine(qemuAgentPtr mon, +qemuAgentIOProcessLine(qemuAgentPtr agent, const char *line, qemuAgentMessagePtr msg) { @@ -282,7 +282,7 @@ qemuAgentIOProcessLine(qemuAgentPtr mon, if (virJSONValueObjectHasKey(obj, "QMP") =3D=3D 1) { ret =3D 0; } else if (virJSONValueObjectHasKey(obj, "event") =3D=3D 1) { - ret =3D qemuAgentIOProcessEvent(mon, obj); + ret =3D qemuAgentIOProcessEvent(agent, obj); } else if (virJSONValueObjectHasKey(obj, "error") =3D=3D 1 || virJSONValueObjectHasKey(obj, "return") =3D=3D 1) { if (msg) { @@ -322,7 +322,7 @@ qemuAgentIOProcessLine(qemuAgentPtr mon, return ret; } =20 -static int qemuAgentIOProcessData(qemuAgentPtr mon, +static int qemuAgentIOProcessData(qemuAgentPtr agent, char *data, size_t len, qemuAgentMessagePtr msg) @@ -346,7 +346,7 @@ static int qemuAgentIOProcessData(qemuAgentPtr mon, int got =3D nl - (data + used); for (i =3D 0; i < strlen(LINE_ENDING); i++) data[used + got + i] =3D '\0'; - if (qemuAgentIOProcessLine(mon, data + used, msg) < 0) + if (qemuAgentIOProcessLine(agent, data + used, msg) < 0) return -1; used +=3D got + strlen(LINE_ENDING); } else { @@ -359,11 +359,11 @@ static int qemuAgentIOProcessData(qemuAgentPtr mon, } =20 /* This method processes data that has been received - * from the monitor. Looking for async events and + * from the agent. Looking for async events and * replies/errors. */ static int -qemuAgentIOProcess(qemuAgentPtr mon) +qemuAgentIOProcess(qemuAgentPtr agent) { int len; qemuAgentMessagePtr msg =3D NULL; @@ -371,97 +371,97 @@ qemuAgentIOProcess(qemuAgentPtr mon) /* See if there's a message ready for reply; that is, * one that has completed writing all its data. */ - if (mon->msg && mon->msg->txOffset =3D=3D mon->msg->txLength) - msg =3D mon->msg; + if (agent->msg && agent->msg->txOffset =3D=3D agent->msg->txLength) + msg =3D agent->msg; =20 #if DEBUG_IO # if DEBUG_RAW_IO char *str1 =3D qemuAgentEscapeNonPrintable(msg ? msg->txBuffer : ""); - char *str2 =3D qemuAgentEscapeNonPrintable(mon->buffer); + char *str2 =3D qemuAgentEscapeNonPrintable(agent->buffer); VIR_ERROR(_("Process %zu %p %p [[[%s]]][[[%s]]]"), - mon->bufferOffset, mon->msg, msg, str1, str2); + agent->bufferOffset, agent->msg, msg, str1, str2); VIR_FREE(str1); VIR_FREE(str2); # else - VIR_DEBUG("Process %zu", mon->bufferOffset); + VIR_DEBUG("Process %zu", agent->bufferOffset); # endif #endif =20 - len =3D qemuAgentIOProcessData(mon, - mon->buffer, mon->bufferOffset, + len =3D qemuAgentIOProcessData(agent, + agent->buffer, agent->bufferOffset, msg); =20 if (len < 0) return -1; =20 - if (len < mon->bufferOffset) { - memmove(mon->buffer, mon->buffer + len, mon->bufferOffset - len); - mon->bufferOffset -=3D len; + if (len < agent->bufferOffset) { + memmove(agent->buffer, agent->buffer + len, agent->bufferOffset - = len); + agent->bufferOffset -=3D len; } else { - VIR_FREE(mon->buffer); - mon->bufferOffset =3D mon->bufferLength =3D 0; + VIR_FREE(agent->buffer); + agent->bufferOffset =3D agent->bufferLength =3D 0; } #if DEBUG_IO - VIR_DEBUG("Process done %zu used %d", mon->bufferOffset, len); + VIR_DEBUG("Process done %zu used %d", agent->bufferOffset, len); #endif if (msg && msg->finished) - virCondBroadcast(&mon->notify); + virCondBroadcast(&agent->notify); return len; } =20 =20 /* - * Called when the monitor is able to write data - * Call this function while holding the monitor lock. + * Called when the agent is able to write data + * Call this function while holding the agent lock. */ static int -qemuAgentIOWrite(qemuAgentPtr mon) +qemuAgentIOWrite(qemuAgentPtr agent) { int done; =20 /* If no active message, or fully transmitted, then no-op */ - if (!mon->msg || mon->msg->txOffset =3D=3D mon->msg->txLength) + if (!agent->msg || agent->msg->txOffset =3D=3D agent->msg->txLength) return 0; =20 - done =3D safewrite(mon->fd, - mon->msg->txBuffer + mon->msg->txOffset, - mon->msg->txLength - mon->msg->txOffset); + done =3D safewrite(agent->fd, + agent->msg->txBuffer + agent->msg->txOffset, + agent->msg->txLength - agent->msg->txOffset); =20 if (done < 0) { if (errno =3D=3D EAGAIN) return 0; =20 virReportSystemError(errno, "%s", - _("Unable to write to monitor")); + _("Unable to write to agent")); return -1; } - mon->msg->txOffset +=3D done; + agent->msg->txOffset +=3D done; return done; } =20 /* - * Called when the monitor has incoming data to read - * Call this function while holding the monitor lock. + * Called when the agent has incoming data to read + * Call this function while holding the agent lock. * * Returns -1 on error, or number of bytes read */ static int -qemuAgentIORead(qemuAgentPtr mon) +qemuAgentIORead(qemuAgentPtr agent) { - size_t avail =3D mon->bufferLength - mon->bufferOffset; + size_t avail =3D agent->bufferLength - agent->bufferOffset; int ret =3D 0; =20 if (avail < 1024) { - if (mon->bufferLength >=3D QEMU_AGENT_MAX_RESPONSE) { + if (agent->bufferLength >=3D QEMU_AGENT_MAX_RESPONSE) { virReportSystemError(ERANGE, _("No complete agent response found in %d= bytes"), QEMU_AGENT_MAX_RESPONSE); return -1; } - if (VIR_REALLOC_N(mon->buffer, - mon->bufferLength + 1024) < 0) + if (VIR_REALLOC_N(agent->buffer, + agent->bufferLength + 1024) < 0) return -1; - mon->bufferLength +=3D 1024; + agent->bufferLength +=3D 1024; avail +=3D 1024; } =20 @@ -469,14 +469,14 @@ qemuAgentIORead(qemuAgentPtr mon) until we block on EAGAIN, or hit EOF */ while (avail > 1) { int got; - got =3D read(mon->fd, - mon->buffer + mon->bufferOffset, + got =3D read(agent->fd, + agent->buffer + agent->bufferOffset, avail - 1); if (got < 0) { if (errno =3D=3D EAGAIN) break; virReportSystemError(errno, "%s", - _("Unable to read from monitor")); + _("Unable to read from agent")); ret =3D -1; break; } @@ -485,79 +485,79 @@ qemuAgentIORead(qemuAgentPtr mon) =20 ret +=3D got; avail -=3D got; - mon->bufferOffset +=3D got; - mon->buffer[mon->bufferOffset] =3D '\0'; + agent->bufferOffset +=3D got; + agent->buffer[agent->bufferOffset] =3D '\0'; } =20 #if DEBUG_IO - VIR_DEBUG("Now read %zu bytes of data", mon->bufferOffset); + VIR_DEBUG("Now read %zu bytes of data", agent->bufferOffset); #endif =20 return ret; } =20 =20 -static void qemuAgentUpdateWatch(qemuAgentPtr mon) +static void qemuAgentUpdateWatch(qemuAgentPtr agent) { int events =3D VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR; =20 - if (!mon->watch) + if (!agent->watch) return; =20 - if (mon->lastError.code =3D=3D VIR_ERR_OK) { + if (agent->lastError.code =3D=3D VIR_ERR_OK) { events |=3D VIR_EVENT_HANDLE_READABLE; =20 - if (mon->msg && mon->msg->txOffset < mon->msg->txLength) + if (agent->msg && agent->msg->txOffset < agent->msg->txLength) events |=3D VIR_EVENT_HANDLE_WRITABLE; } =20 - virEventUpdateHandle(mon->watch, events); + virEventUpdateHandle(agent->watch, events); } =20 =20 static void qemuAgentIO(int watch, int fd, int events, void *opaque) { - qemuAgentPtr mon =3D opaque; + qemuAgentPtr agent =3D opaque; bool error =3D false; bool eof =3D false; =20 - virObjectRef(mon); - /* lock access to the monitor and protect fd */ - virObjectLock(mon); + virObjectRef(agent); + /* lock access to the agent and protect fd */ + virObjectLock(agent); #if DEBUG_IO - VIR_DEBUG("Agent %p I/O on watch %d fd %d events %d", mon, watch, fd, = events); + VIR_DEBUG("Agent %p I/O on watch %d fd %d events %d", agent, watch, fd= , events); #endif =20 - if (mon->fd =3D=3D -1 || mon->watch =3D=3D 0) { - virObjectUnlock(mon); - virObjectUnref(mon); + if (agent->fd =3D=3D -1 || agent->watch =3D=3D 0) { + virObjectUnlock(agent); + virObjectUnref(agent); return; } =20 - if (mon->fd !=3D fd || mon->watch !=3D watch) { + if (agent->fd !=3D fd || agent->watch !=3D watch) { if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) eof =3D true; virReportError(VIR_ERR_INTERNAL_ERROR, _("event from unexpected fd %d!=3D%d / watch %d!=3D= %d"), - mon->fd, fd, mon->watch, watch); + agent->fd, fd, agent->watch, watch); error =3D true; - } else if (mon->lastError.code !=3D VIR_ERR_OK) { + } else if (agent->lastError.code !=3D VIR_ERR_OK) { if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) eof =3D true; error =3D true; } else { if (events & VIR_EVENT_HANDLE_WRITABLE) { - if (qemuAgentIOWrite(mon) < 0) + if (qemuAgentIOWrite(agent) < 0) error =3D true; events &=3D ~VIR_EVENT_HANDLE_WRITABLE; } =20 if (!error && events & VIR_EVENT_HANDLE_READABLE) { - int got =3D qemuAgentIORead(mon); + int got =3D qemuAgentIORead(agent); events &=3D ~VIR_EVENT_HANDLE_READABLE; if (got < 0) { error =3D true; @@ -568,7 +568,7 @@ qemuAgentIO(int watch, int fd, int events, void *opaque) * give time for that data to be consumed */ events =3D 0; =20 - if (qemuAgentIOProcess(mon) < 0) + if (qemuAgentIOProcess(agent) < 0) error =3D true; } } @@ -576,7 +576,7 @@ qemuAgentIO(int watch, int fd, int events, void *opaque) if (!error && events & VIR_EVENT_HANDLE_HANGUP) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("End of file from agent monitor")); + _("End of file from agent socket")); eof =3D true; events &=3D ~VIR_EVENT_HANDLE_HANGUP; } @@ -584,69 +584,69 @@ qemuAgentIO(int watch, int fd, int events, void *opaq= ue) if (!error && !eof && events & VIR_EVENT_HANDLE_ERROR) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("Invalid file descriptor while waiting for mo= nitor")); + _("Invalid file descriptor while waiting for ag= ent")); eof =3D true; events &=3D ~VIR_EVENT_HANDLE_ERROR; } if (!error && events) { virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unhandled event %d for monitor fd %d"), - events, mon->fd); + _("Unhandled event %d for agent fd %d"), + events, agent->fd); error =3D true; } } =20 if (error || eof) { - if (mon->lastError.code !=3D VIR_ERR_OK) { + if (agent->lastError.code !=3D VIR_ERR_OK) { /* Already have an error, so clear any new error */ virResetLastError(); } else { if (virGetLastErrorCode() =3D=3D VIR_ERR_OK) virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("Error while processing monitor IO")); - virCopyLastError(&mon->lastError); + _("Error while processing agent IO")); + virCopyLastError(&agent->lastError); virResetLastError(); } =20 - VIR_DEBUG("Error on monitor %s", NULLSTR(mon->lastError.message)); + VIR_DEBUG("Error on agent %s", NULLSTR(agent->lastError.message)); /* If IO process resulted in an error & we have a message, * then wakeup that waiter */ - if (mon->msg && !mon->msg->finished) { - mon->msg->finished =3D 1; - virCondSignal(&mon->notify); + if (agent->msg && !agent->msg->finished) { + agent->msg->finished =3D 1; + virCondSignal(&agent->notify); } } =20 - qemuAgentUpdateWatch(mon); + qemuAgentUpdateWatch(agent); =20 /* We have to unlock to avoid deadlock against command thread, * but is this safe ? I think it is, because the callback * will try to acquire the virDomainObjPtr mutex next */ if (eof) { void (*eofNotify)(qemuAgentPtr, virDomainObjPtr) - =3D mon->cb->eofNotify; - virDomainObjPtr vm =3D mon->vm; + =3D agent->cb->eofNotify; + virDomainObjPtr vm =3D agent->vm; =20 /* Make sure anyone waiting wakes up now */ - virCondSignal(&mon->notify); - virObjectUnlock(mon); - virObjectUnref(mon); + virCondSignal(&agent->notify); + virObjectUnlock(agent); + virObjectUnref(agent); VIR_DEBUG("Triggering EOF callback"); - (eofNotify)(mon, vm); + (eofNotify)(agent, vm); } else if (error) { void (*errorNotify)(qemuAgentPtr, virDomainObjPtr) - =3D mon->cb->errorNotify; - virDomainObjPtr vm =3D mon->vm; + =3D agent->cb->errorNotify; + virDomainObjPtr vm =3D agent->vm; =20 /* Make sure anyone waiting wakes up now */ - virCondSignal(&mon->notify); - virObjectUnlock(mon); - virObjectUnref(mon); + virCondSignal(&agent->notify); + virObjectUnlock(agent); + virObjectUnref(agent); VIR_DEBUG("Triggering error callback"); - (errorNotify)(mon, vm); + (errorNotify)(agent, vm); } else { - virObjectUnlock(mon); - virObjectUnref(mon); + virObjectUnlock(agent); + virObjectUnref(agent); } } =20 @@ -656,7 +656,7 @@ qemuAgentOpen(virDomainObjPtr vm, const virDomainChrSourceDef *config, qemuAgentCallbacksPtr cb) { - qemuAgentPtr mon; + qemuAgentPtr agent; =20 if (!cb || !cb->eofNotify) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -667,49 +667,49 @@ qemuAgentOpen(virDomainObjPtr vm, if (qemuAgentInitialize() < 0) return NULL; =20 - if (!(mon =3D virObjectLockableNew(qemuAgentClass))) + if (!(agent =3D virObjectLockableNew(qemuAgentClass))) return NULL; =20 - mon->timeout =3D QEMU_DOMAIN_PRIVATE(vm)->agentTimeout; - mon->fd =3D -1; - if (virCondInit(&mon->notify) < 0) { + agent->timeout =3D QEMU_DOMAIN_PRIVATE(vm)->agentTimeout; + agent->fd =3D -1; + if (virCondInit(&agent->notify) < 0) { virReportSystemError(errno, "%s", - _("cannot initialize monitor condition")); - virObjectUnref(mon); + _("cannot initialize agent condition")); + virObjectUnref(agent); return NULL; } - mon->vm =3D vm; - mon->cb =3D cb; + agent->vm =3D vm; + agent->cb =3D cb; =20 if (config->type !=3D VIR_DOMAIN_CHR_TYPE_UNIX) { virReportError(VIR_ERR_INTERNAL_ERROR, - _("unable to handle monitor type: %s"), + _("unable to handle agent type: %s"), virDomainChrTypeToString(config->type)); goto cleanup; } =20 - mon->fd =3D qemuAgentOpenUnix(config->data.nix.path); - if (mon->fd =3D=3D -1) + agent->fd =3D qemuAgentOpenUnix(config->data.nix.path); + if (agent->fd =3D=3D -1) goto cleanup; =20 - virObjectRef(mon); - if ((mon->watch =3D virEventAddHandle(mon->fd, + virObjectRef(agent); + if ((agent->watch =3D virEventAddHandle(agent->fd, VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR | VIR_EVENT_HANDLE_READABLE, qemuAgentIO, - mon, + agent, virObjectFreeCallback)) < 0) { - virObjectUnref(mon); + virObjectUnref(agent); virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("unable to register monitor events")); + _("unable to register agent events")); goto cleanup; } =20 - mon->running =3D true; - VIR_DEBUG("New mon %p fd =3D%d watch=3D%d", mon, mon->fd, mon->watch); + agent->running =3D true; + VIR_DEBUG("New agent %p fd =3D%d watch=3D%d", agent, agent->fd, agent-= >watch); =20 - return mon; + return agent; =20 cleanup: /* We don't want the 'destroy' callback invoked during @@ -717,75 +717,75 @@ qemuAgentOpen(virDomainObjPtr vm, * give a double-unref on virDomainObjPtr in the caller, * so kill the callbacks now. */ - mon->cb =3D NULL; - qemuAgentClose(mon); + agent->cb =3D NULL; + qemuAgentClose(agent); return NULL; } =20 =20 static void -qemuAgentNotifyCloseLocked(qemuAgentPtr mon) +qemuAgentNotifyCloseLocked(qemuAgentPtr agent) { - if (mon) { - mon->running =3D false; + if (agent) { + agent->running =3D false; =20 /* If there is somebody waiting for a message * wake him up. No message will arrive anyway. */ - if (mon->msg && !mon->msg->finished) { - mon->msg->finished =3D 1; - virCondSignal(&mon->notify); + if (agent->msg && !agent->msg->finished) { + agent->msg->finished =3D 1; + virCondSignal(&agent->notify); } } } =20 =20 void -qemuAgentNotifyClose(qemuAgentPtr mon) +qemuAgentNotifyClose(qemuAgentPtr agent) { - if (!mon) + if (!agent) return; =20 - VIR_DEBUG("mon=3D%p", mon); + VIR_DEBUG("agent=3D%p", agent); =20 - virObjectLock(mon); - qemuAgentNotifyCloseLocked(mon); - virObjectUnlock(mon); + virObjectLock(agent); + qemuAgentNotifyCloseLocked(agent); + virObjectUnlock(agent); } =20 =20 -void qemuAgentClose(qemuAgentPtr mon) +void qemuAgentClose(qemuAgentPtr agent) { - if (!mon) + if (!agent) return; =20 - VIR_DEBUG("mon=3D%p", mon); + VIR_DEBUG("agent=3D%p", agent); =20 - virObjectLock(mon); + virObjectLock(agent); =20 - if (mon->fd >=3D 0) { - if (mon->watch) { - virEventRemoveHandle(mon->watch); - mon->watch =3D 0; + if (agent->fd >=3D 0) { + if (agent->watch) { + virEventRemoveHandle(agent->watch); + agent->watch =3D 0; } - VIR_FORCE_CLOSE(mon->fd); + VIR_FORCE_CLOSE(agent->fd); } =20 - qemuAgentNotifyCloseLocked(mon); - virObjectUnlock(mon); + qemuAgentNotifyCloseLocked(agent); + virObjectUnlock(agent); =20 - virObjectUnref(mon); + virObjectUnref(agent); } =20 #define QEMU_AGENT_WAIT_TIME 5 =20 /** * qemuAgentSend: - * @mon: Monitor + * @agent: agent object * @msg: Message * @seconds: number of seconds to wait for the result, it can be either * -2, -1, 0 or positive. * - * Send @msg to agent @mon. If @seconds is equal to + * Send @msg to agent @agent. If @seconds is equal to * VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK(-2), this function will block forev= er * waiting for the result. The value of * VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT(-1) means use default timeout val= ue @@ -797,7 +797,7 @@ void qemuAgentClose(qemuAgentPtr mon) * -2 on timeout, * -1 otherwise */ -static int qemuAgentSend(qemuAgentPtr mon, +static int qemuAgentSend(qemuAgentPtr agent, qemuAgentMessagePtr msg, int seconds) { @@ -805,10 +805,10 @@ static int qemuAgentSend(qemuAgentPtr mon, unsigned long long then =3D 0; =20 /* Check whether qemu quit unexpectedly */ - if (mon->lastError.code !=3D VIR_ERR_OK) { + if (agent->lastError.code !=3D VIR_ERR_OK) { VIR_DEBUG("Attempt to send command while error is set %s", - NULLSTR(mon->lastError.message)); - virSetError(&mon->lastError); + NULLSTR(agent->lastError.message)); + virSetError(&agent->lastError); return -1; } =20 @@ -821,37 +821,37 @@ static int qemuAgentSend(qemuAgentPtr mon, then =3D now + seconds * 1000ull; } =20 - mon->msg =3D msg; - qemuAgentUpdateWatch(mon); + agent->msg =3D msg; + qemuAgentUpdateWatch(agent); =20 - while (!mon->msg->finished) { - if ((then && virCondWaitUntil(&mon->notify, &mon->parent.lock, the= n) < 0) || - (!then && virCondWait(&mon->notify, &mon->parent.lock) < 0)) { + while (!agent->msg->finished) { + if ((then && virCondWaitUntil(&agent->notify, &agent->parent.lock,= then) < 0) || + (!then && virCondWait(&agent->notify, &agent->parent.lock) < 0= )) { if (errno =3D=3D ETIMEDOUT) { virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s", _("Guest agent not available for now")); ret =3D -2; } else { virReportSystemError(errno, "%s", - _("Unable to wait on agent monitor " + _("Unable to wait on agent socket " "condition")); } goto cleanup; } } =20 - if (mon->lastError.code !=3D VIR_ERR_OK) { + if (agent->lastError.code !=3D VIR_ERR_OK) { VIR_DEBUG("Send command resulted in error %s", - NULLSTR(mon->lastError.message)); - virSetError(&mon->lastError); + NULLSTR(agent->lastError.message)); + virSetError(&agent->lastError); goto cleanup; } =20 ret =3D 0; =20 cleanup: - mon->msg =3D NULL; - qemuAgentUpdateWatch(mon); + agent->msg =3D NULL; + qemuAgentUpdateWatch(agent); =20 return ret; } @@ -859,7 +859,7 @@ static int qemuAgentSend(qemuAgentPtr mon, =20 /** * qemuAgentGuestSync: - * @mon: Monitor + * @agent: agent object * * Send guest-sync with unique ID * and wait for reply. If we get one, check if @@ -869,7 +869,7 @@ static int qemuAgentSend(qemuAgentPtr mon, * -1 otherwise */ static int -qemuAgentGuestSync(qemuAgentPtr mon) +qemuAgentGuestSync(qemuAgentPtr agent) { int ret =3D -1; int send_ret; @@ -879,8 +879,8 @@ qemuAgentGuestSync(qemuAgentPtr mon) =20 /* if user specified a custom agent timeout that is lower than the * default timeout, use the shorter timeout instead */ - if ((mon->timeout >=3D 0) && (mon->timeout < timeout)) - timeout =3D mon->timeout; + if ((agent->timeout >=3D 0) && (agent->timeout < timeout)) + timeout =3D agent->timeout; =20 memset(&sync_msg, 0, sizeof(sync_msg)); /* set only on first sync */ @@ -899,7 +899,7 @@ qemuAgentGuestSync(qemuAgentPtr mon) =20 VIR_DEBUG("Sending guest-sync command with ID: %llu", id); =20 - send_ret =3D qemuAgentSend(mon, &sync_msg, timeout); + send_ret =3D qemuAgentSend(agent, &sync_msg, timeout); =20 VIR_DEBUG("qemuAgentSend returned: %d", send_ret); =20 @@ -912,9 +912,9 @@ qemuAgentGuestSync(qemuAgentPtr mon) memset(&sync_msg, 0, sizeof(sync_msg)); goto retry; } else { - if (mon->running) + if (agent->running) virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("Missing monitor reply object")); + _("Missing agent reply object")); else virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s", _("Guest agent disappeared while executing = command")); @@ -1065,7 +1065,7 @@ qemuAgentCheckError(virJSONValuePtr cmd, } =20 static int -qemuAgentCommand(qemuAgentPtr mon, +qemuAgentCommand(qemuAgentPtr agent, virJSONValuePtr cmd, virJSONValuePtr *reply, bool needReply, @@ -1074,17 +1074,17 @@ qemuAgentCommand(qemuAgentPtr mon, int ret =3D -1; qemuAgentMessage msg; char *cmdstr =3D NULL; - int await_event =3D mon->await_event; + int await_event =3D agent->await_event; =20 *reply =3D NULL; =20 - if (!mon->running) { + if (!agent->running) { virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s", _("Guest agent disappeared while executing command"= )); return -1; } =20 - if (qemuAgentGuestSync(mon) < 0) + if (qemuAgentGuestSync(agent) < 0) return -1; =20 memset(&msg, 0, sizeof(msg)); @@ -1096,7 +1096,7 @@ qemuAgentCommand(qemuAgentPtr mon, =20 VIR_DEBUG("Send command '%s' for write, seconds =3D %d", cmdstr, secon= ds); =20 - ret =3D qemuAgentSend(mon, &msg, seconds); + ret =3D qemuAgentSend(agent, &msg, seconds); =20 VIR_DEBUG("Receive command reply ret=3D%d rxObject=3D%p", ret, msg.rxObject); @@ -1108,9 +1108,9 @@ qemuAgentCommand(qemuAgentPtr mon, if (await_event && !needReply) { VIR_DEBUG("Woken up by event %d", await_event); } else { - if (mon->running) + if (agent->running) virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("Missing monitor reply object")); + _("Missing agent reply object")); else virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s", _("Guest agent disappeared while execut= ing command")); @@ -1186,22 +1186,22 @@ qemuAgentMakeStringsArray(const char **strings, uns= igned int len) return NULL; } =20 -void qemuAgentNotifyEvent(qemuAgentPtr mon, +void qemuAgentNotifyEvent(qemuAgentPtr agent, qemuAgentEvent event) { - virObjectLock(mon); + virObjectLock(agent); =20 - VIR_DEBUG("mon=3D%p event=3D%d await_event=3D%d", mon, event, mon->awa= it_event); - if (mon->await_event =3D=3D event) { - mon->await_event =3D QEMU_AGENT_EVENT_NONE; + VIR_DEBUG("agent=3D%p event=3D%d await_event=3D%d", agent, event, agen= t->await_event); + if (agent->await_event =3D=3D event) { + agent->await_event =3D QEMU_AGENT_EVENT_NONE; /* somebody waiting for this event, wake him up. */ - if (mon->msg && !mon->msg->finished) { - mon->msg->finished =3D 1; - virCondSignal(&mon->notify); + if (agent->msg && !agent->msg->finished) { + agent->msg->finished =3D 1; + virCondSignal(&agent->notify); } } =20 - virObjectUnlock(mon); + virObjectUnlock(agent); } =20 VIR_ENUM_DECL(qemuAgentShutdownMode); @@ -1211,7 +1211,7 @@ VIR_ENUM_IMPL(qemuAgentShutdownMode, "powerdown", "reboot", "halt", ); =20 -int qemuAgentShutdown(qemuAgentPtr mon, +int qemuAgentShutdown(qemuAgentPtr agent, qemuAgentShutdownMode mode) { int ret =3D -1; @@ -1225,10 +1225,10 @@ int qemuAgentShutdown(qemuAgentPtr mon, return -1; =20 if (mode =3D=3D QEMU_AGENT_SHUTDOWN_REBOOT) - mon->await_event =3D QEMU_AGENT_EVENT_RESET; + agent->await_event =3D QEMU_AGENT_EVENT_RESET; else - mon->await_event =3D QEMU_AGENT_EVENT_SHUTDOWN; - ret =3D qemuAgentCommand(mon, cmd, &reply, false, + agent->await_event =3D QEMU_AGENT_EVENT_SHUTDOWN; + ret =3D qemuAgentCommand(agent, cmd, &reply, false, VIR_DOMAIN_QEMU_AGENT_COMMAND_SHUTDOWN); =20 virJSONValueFree(cmd); @@ -1238,7 +1238,7 @@ int qemuAgentShutdown(qemuAgentPtr mon, =20 /* * qemuAgentFSFreeze: - * @mon: Agent + * @agent: agent object * @mountpoints: Array of mountpoint paths to be frozen, or NULL for all * @nmountpoints: Number of mountpoints to be frozen, or 0 for all * @@ -1250,7 +1250,7 @@ int qemuAgentShutdown(qemuAgentPtr mon, * Returns: number of file system frozen on success, * -1 on error. */ -int qemuAgentFSFreeze(qemuAgentPtr mon, const char **mountpoints, +int qemuAgentFSFreeze(qemuAgentPtr agent, const char **mountpoints, unsigned int nmountpoints) { int ret =3D -1; @@ -1271,7 +1271,7 @@ int qemuAgentFSFreeze(qemuAgentPtr mon, const char **= mountpoints, if (!cmd) goto cleanup; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { @@ -1288,7 +1288,7 @@ int qemuAgentFSFreeze(qemuAgentPtr mon, const char **= mountpoints, =20 /* * qemuAgentFSThaw: - * @mon: Agent + * @agent: agent object * * Issue guest-fsfreeze-thaw command to guest agent, * which unfreezes all mounted file systems and returns @@ -1297,7 +1297,7 @@ int qemuAgentFSFreeze(qemuAgentPtr mon, const char **= mountpoints, * Returns: number of file system thawed on success, * -1 on error. */ -int qemuAgentFSThaw(qemuAgentPtr mon) +int qemuAgentFSThaw(qemuAgentPtr agent) { int ret =3D -1; virJSONValuePtr cmd; @@ -1308,7 +1308,7 @@ int qemuAgentFSThaw(qemuAgentPtr mon) if (!cmd) return -1; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { @@ -1332,7 +1332,7 @@ VIR_ENUM_IMPL(qemuAgentSuspendMode, ); =20 int -qemuAgentSuspend(qemuAgentPtr mon, +qemuAgentSuspend(qemuAgentPtr agent, unsigned int target) { int ret =3D -1; @@ -1344,8 +1344,8 @@ qemuAgentSuspend(qemuAgentPtr mon, if (!cmd) return -1; =20 - mon->await_event =3D QEMU_AGENT_EVENT_SUSPEND; - ret =3D qemuAgentCommand(mon, cmd, &reply, false, mon->timeout); + agent->await_event =3D QEMU_AGENT_EVENT_SUSPEND; + ret =3D qemuAgentCommand(agent, cmd, &reply, false, agent->timeout); =20 virJSONValueFree(cmd); virJSONValueFree(reply); @@ -1353,7 +1353,7 @@ qemuAgentSuspend(qemuAgentPtr mon, } =20 int -qemuAgentArbitraryCommand(qemuAgentPtr mon, +qemuAgentArbitraryCommand(qemuAgentPtr agent, const char *cmd_str, char **result, int timeout) @@ -1374,7 +1374,7 @@ qemuAgentArbitraryCommand(qemuAgentPtr mon, if (!(cmd =3D virJSONValueFromString(cmd_str))) goto cleanup; =20 - if ((ret =3D qemuAgentCommand(mon, cmd, &reply, true, timeout)) < 0) + if ((ret =3D qemuAgentCommand(agent, cmd, &reply, true, timeout)) < 0) goto cleanup; =20 if (!(*result =3D virJSONValueToString(reply, false))) @@ -1388,7 +1388,7 @@ qemuAgentArbitraryCommand(qemuAgentPtr mon, } =20 int -qemuAgentFSTrim(qemuAgentPtr mon, +qemuAgentFSTrim(qemuAgentPtr agent, unsigned long long minimum) { int ret =3D -1; @@ -1401,7 +1401,7 @@ qemuAgentFSTrim(qemuAgentPtr mon, if (!cmd) return ret; =20 - ret =3D qemuAgentCommand(mon, cmd, &reply, false, mon->timeout); + ret =3D qemuAgentCommand(agent, cmd, &reply, false, agent->timeout); =20 virJSONValueFree(cmd); virJSONValueFree(reply); @@ -1409,7 +1409,7 @@ qemuAgentFSTrim(qemuAgentPtr mon, } =20 int -qemuAgentGetVCPUs(qemuAgentPtr mon, +qemuAgentGetVCPUs(qemuAgentPtr agent, qemuAgentCPUInfoPtr *info) { int ret =3D -1; @@ -1422,7 +1422,7 @@ qemuAgentGetVCPUs(qemuAgentPtr mon, if (!(cmd =3D qemuAgentMakeCommand("guest-get-vcpus", NULL))) return -1; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 if (!(data =3D virJSONValueObjectGetArray(reply, "return"))) { @@ -1484,7 +1484,7 @@ qemuAgentGetVCPUs(qemuAgentPtr mon, =20 /* returns the value provided by the guest agent or -1 on internal error */ static int -qemuAgentSetVCPUsCommand(qemuAgentPtr mon, +qemuAgentSetVCPUsCommand(qemuAgentPtr agent, qemuAgentCPUInfoPtr info, size_t ninfo, int *nmodified) @@ -1536,7 +1536,7 @@ qemuAgentSetVCPUsCommand(qemuAgentPtr mon, NULL))) goto cleanup; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 /* All negative values are invalid. Return of 0 is bogus since we woul= dn't @@ -1567,7 +1567,7 @@ qemuAgentSetVCPUsCommand(qemuAgentPtr mon, * Returns -1 on error, 0 on success. */ int -qemuAgentSetVCPUs(qemuAgentPtr mon, +qemuAgentSetVCPUs(qemuAgentPtr agent, qemuAgentCPUInfoPtr info, size_t ninfo) { @@ -1576,7 +1576,7 @@ qemuAgentSetVCPUs(qemuAgentPtr mon, size_t i; =20 do { - if ((rv =3D qemuAgentSetVCPUsCommand(mon, info, ninfo, &nmodified)= ) < 0) + if ((rv =3D qemuAgentSetVCPUsCommand(agent, info, ninfo, &nmodifie= d)) < 0) return -1; =20 /* all vcpus were set successfully */ @@ -1676,7 +1676,7 @@ qemuAgentUpdateCPUInfo(unsigned int nvcpus, =20 =20 int -qemuAgentGetHostname(qemuAgentPtr mon, +qemuAgentGetHostname(qemuAgentPtr agent, char **hostname) { int ret =3D -1; @@ -1691,7 +1691,7 @@ qemuAgentGetHostname(qemuAgentPtr mon, if (!cmd) return ret; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) { + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) { if (qemuAgentErrorCommandUnsupported(reply)) ret =3D -2; goto cleanup; @@ -1721,7 +1721,7 @@ qemuAgentGetHostname(qemuAgentPtr mon, =20 =20 int -qemuAgentGetTime(qemuAgentPtr mon, +qemuAgentGetTime(qemuAgentPtr agent, long long *seconds, unsigned int *nseconds) { @@ -1735,7 +1735,7 @@ qemuAgentGetTime(qemuAgentPtr mon, if (!cmd) return ret; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 if (virJSONValueObjectGetNumberUlong(reply, "return", &json_time) < 0)= { @@ -1763,7 +1763,7 @@ qemuAgentGetTime(qemuAgentPtr mon, * @sync: let guest agent to read domain's RTC (@setTime is ignored) */ int -qemuAgentSetTime(qemuAgentPtr mon, +qemuAgentSetTime(qemuAgentPtr agent, long long seconds, unsigned int nseconds, bool rtcSync) @@ -1780,7 +1780,7 @@ qemuAgentSetTime(qemuAgentPtr mon, long long json_time; =20 /* Check if we overflow. For some reason qemu doesn't handle unsig= ned - * long long on the monitor well as it silently truncates numbers = to + * long long on the agent well as it silently truncates numbers to * signed long long. Therefore we must check overflow against LLON= G_MAX * not ULLONG_MAX. */ if (seconds > LLONG_MAX / 1000000000LL) { @@ -1800,7 +1800,7 @@ qemuAgentSetTime(qemuAgentPtr mon, if (!cmd) return ret; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 ret =3D 0; @@ -1922,7 +1922,7 @@ qemuAgentGetFSInfoFillDisks(virJSONValuePtr jsondisks, * -1 otherwise */ int -qemuAgentGetFSInfo(qemuAgentPtr mon, +qemuAgentGetFSInfo(qemuAgentPtr agent, qemuAgentFSInfoPtr **info) { size_t i; @@ -1937,7 +1937,7 @@ qemuAgentGetFSInfo(qemuAgentPtr mon, if (!cmd) return ret; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) { + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) { if (qemuAgentErrorCommandUnsupported(reply)) ret =3D -2; goto cleanup; @@ -2055,7 +2055,7 @@ qemuAgentGetFSInfo(qemuAgentPtr mon, =20 /* * qemuAgentGetInterfaces: - * @mon: Agent monitor + * @agent: agent object * @ifaces: pointer to an array of pointers pointing to interface objects * * Issue guest-network-get-interfaces to guest agent, which returns a @@ -2065,7 +2065,7 @@ qemuAgentGetFSInfo(qemuAgentPtr mon, * Returns: number of interfaces on success, -1 on error. */ int -qemuAgentGetInterfaces(qemuAgentPtr mon, +qemuAgentGetInterfaces(qemuAgentPtr agent, virDomainInterfacePtr **ifaces) { int ret =3D -1; @@ -2088,7 +2088,7 @@ qemuAgentGetInterfaces(qemuAgentPtr mon, if (!(cmd =3D qemuAgentMakeCommand("guest-network-get-interfaces", NUL= L))) goto cleanup; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 if (!(ret_array =3D virJSONValueObjectGet(reply, "return"))) { @@ -2245,7 +2245,7 @@ qemuAgentGetInterfaces(qemuAgentPtr mon, =20 =20 int -qemuAgentSetUserPassword(qemuAgentPtr mon, +qemuAgentSetUserPassword(qemuAgentPtr agent, const char *user, const char *password, bool crypted) @@ -2265,7 +2265,7 @@ qemuAgentSetUserPassword(qemuAgentPtr mon, NULL))) goto cleanup; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) goto cleanup; =20 ret =3D 0; @@ -2282,7 +2282,7 @@ qemuAgentSetUserPassword(qemuAgentPtr mon, * -1 otherwise */ int -qemuAgentGetUsers(qemuAgentPtr mon, +qemuAgentGetUsers(qemuAgentPtr agent, virTypedParameterPtr *params, int *nparams, int *maxparams) @@ -2296,7 +2296,7 @@ qemuAgentGetUsers(qemuAgentPtr mon, if (!(cmd =3D qemuAgentMakeCommand("guest-get-users", NULL))) return -1; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) { + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) { if (qemuAgentErrorCommandUnsupported(reply)) return -2; return -1; @@ -2373,7 +2373,7 @@ qemuAgentGetUsers(qemuAgentPtr mon, * -1 otherwise */ int -qemuAgentGetOSInfo(qemuAgentPtr mon, +qemuAgentGetOSInfo(qemuAgentPtr agent, virTypedParameterPtr *params, int *nparams, int *maxparams) @@ -2385,7 +2385,7 @@ qemuAgentGetOSInfo(qemuAgentPtr mon, if (!(cmd =3D qemuAgentMakeCommand("guest-get-osinfo", NULL))) return -1; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) { + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) { if (qemuAgentErrorCommandUnsupported(reply)) return -2; return -1; @@ -2426,7 +2426,7 @@ qemuAgentGetOSInfo(qemuAgentPtr mon, * -1 otherwise */ int -qemuAgentGetTimezone(qemuAgentPtr mon, +qemuAgentGetTimezone(qemuAgentPtr agent, virTypedParameterPtr *params, int *nparams, int *maxparams) @@ -2440,7 +2440,7 @@ qemuAgentGetTimezone(qemuAgentPtr mon, if (!(cmd =3D qemuAgentMakeCommand("guest-get-timezone", NULL))) return -1; =20 - if (qemuAgentCommand(mon, cmd, &reply, true, mon->timeout) < 0) { + if (qemuAgentCommand(agent, cmd, &reply, true, agent->timeout) < 0) { if (qemuAgentErrorCommandUnsupported(reply)) return -2; return -1; @@ -2471,14 +2471,14 @@ qemuAgentGetTimezone(qemuAgentPtr mon, } =20 /* qemuAgentSetResponseTimeout: - * mon: agent monitor - * timeout: number of seconds to wait for agent response + * @agent: agent object + * @timeout: number of seconds to wait for agent response * * The agent object must be locked prior to calling this function. */ void -qemuAgentSetResponseTimeout(qemuAgentPtr mon, +qemuAgentSetResponseTimeout(qemuAgentPtr agent, int timeout) { - mon->timeout =3D timeout; + agent->timeout =3D timeout; } --=20 2.24.1 From nobody Fri Apr 26 11:13:18 2024 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) client-ip=205.139.110.61; envelope-from=libvir-list-bounces@redhat.com; helo=us-smtp-delivery-1.mimecast.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of redhat.com designates 205.139.110.61 as permitted sender) smtp.mailfrom=libvir-list-bounces@redhat.com; dmarc=pass(p=none dis=none) header.from=redhat.com Return-Path: Received: from us-smtp-delivery-1.mimecast.com (us-smtp-1.mimecast.com [205.139.110.61]) by mx.zohomail.com with SMTPS id 1581684767875488.12736394913554; Fri, 14 Feb 2020 04:52:47 -0800 (PST) Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-102-Hl3orXgHMRCT817JRD550g-1; Fri, 14 Feb 2020 07:52:43 -0500 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 5B4C7107ACC9; Fri, 14 Feb 2020 12:52:38 +0000 (UTC) Received: from colo-mx.corp.redhat.com (colo-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 3437F5C1D8; Fri, 14 Feb 2020 12:52:38 +0000 (UTC) Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by colo-mx.corp.redhat.com (Postfix) with ESMTP id DDEF38B2C5; Fri, 14 Feb 2020 12:52:37 +0000 (UTC) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id 01ECqPdd014473 for ; Fri, 14 Feb 2020 07:52:25 -0500 Received: by smtp.corp.redhat.com (Postfix) id 306D81001B2D; Fri, 14 Feb 2020 12:52:25 +0000 (UTC) Received: from domokun.gsslab.fab.redhat.com (unknown [10.33.8.110]) by smtp.corp.redhat.com (Postfix) with ESMTP id 8FE4E1001DD8; Fri, 14 Feb 2020 12:52:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1581684766; h=from:from:sender:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:list-id:list-help: list-unsubscribe:list-subscribe:list-post; bh=N34oiMrN3o0/6XVX+KE6DO0EMOwGN9FwDxzzIX/9NOY=; b=Qu41a2jEufhf3kgusKzZ2TkxYsdwi3j0Pc/qgi9mkEbfHtUK9zEKC4WNm6qVjbGpcBLm+H WYChx9CJVySap/S5RwtLiTHEla2n09s0ljT1lHlMFvcbmJdD5+uJZeHJBz/O15KyzTLZmr /JqC6OPa/2U7ZXDsNLPj3kdRhzAPKMs= From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= To: libvir-list@redhat.com Subject: [libvirt PATCH 11/11] qemu: convert agent to use the per-VM event loop Date: Fri, 14 Feb 2020 12:52:09 +0000 Message-Id: <20200214125209.1152894-12-berrange@redhat.com> In-Reply-To: <20200214125209.1152894-1-berrange@redhat.com> References: <20200214125209.1152894-1-berrange@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-loop: libvir-list@redhat.com X-BeenThere: libvir-list@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk List-Id: Development discussions about the libvirt library & tools List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: libvir-list-bounces@redhat.com Errors-To: libvir-list-bounces@redhat.com X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 X-MC-Unique: Hl3orXgHMRCT817JRD550g-1 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @redhat.com) Content-Type: text/plain; charset="utf-8" This converts the QEMU agent APIs to use the per-VM event loop, which involves switching from virEvent APIs to GMainContext / GSource APIs. A GSocket is used as a convenient way to create a GSource for a socket, but is not yet used for actual I/O. Signed-off-by: Daniel P. Berrang=C3=A9 --- src/qemu/qemu_agent.c | 146 +++++++++++++++++++---------------- src/qemu/qemu_agent.h | 1 + src/qemu/qemu_process.c | 1 + tests/qemumonitortestutils.c | 1 + 4 files changed, 84 insertions(+), 65 deletions(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index da1081b60b..ecc9eb23d1 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -25,6 +25,7 @@ #include #include #include +#include =20 #include "qemu_agent.h" #include "qemu_domain.h" @@ -100,7 +101,10 @@ struct _qemuAgent { virCond notify; =20 int fd; - int watch; + + GMainContext *context; + GSocket *socket; + GSource *watch; =20 bool running; =20 @@ -171,6 +175,7 @@ static void qemuAgentDispose(void *obj) (agent->cb->destroy)(agent, agent->vm); virCondDestroy(&agent->notify); VIR_FREE(agent->buffer); + g_main_context_unref(agent->context); virResetError(&agent->lastError); } =20 @@ -187,13 +192,6 @@ qemuAgentOpenUnix(const char *socketpath) return -1; } =20 - if (virSetNonBlock(agentfd) < 0) { - virReportSystemError(errno, "%s", - _("Unable to put monitor " - "into non-blocking mode")); - goto error; - } - if (virSetCloseExec(agentfd) < 0) { virReportSystemError(errno, "%s", _("Unable to set agent " @@ -497,28 +495,62 @@ qemuAgentIORead(qemuAgentPtr agent) } =20 =20 -static void qemuAgentUpdateWatch(qemuAgentPtr agent) -{ - int events =3D - VIR_EVENT_HANDLE_HANGUP | - VIR_EVENT_HANDLE_ERROR; +static gboolean +qemuAgentIO(GSocket *socket, + GIOCondition cond, + gpointer opaque); =20 - if (!agent->watch) - return; + +static void +qemuAgentRegister(qemuAgentPtr agent) +{ + GIOCondition cond =3D 0; =20 if (agent->lastError.code =3D=3D VIR_ERR_OK) { - events |=3D VIR_EVENT_HANDLE_READABLE; + cond |=3D G_IO_IN; =20 if (agent->msg && agent->msg->txOffset < agent->msg->txLength) - events |=3D VIR_EVENT_HANDLE_WRITABLE; + cond |=3D G_IO_OUT; } =20 - virEventUpdateHandle(agent->watch, events); + agent->watch =3D g_socket_create_source(agent->socket, + cond, + NULL); + + virObjectRef(agent); + g_source_set_callback(agent->watch, + (GSourceFunc)qemuAgentIO, + agent, + NULL); + + g_source_attach(agent->watch, + agent->context); } =20 =20 static void -qemuAgentIO(int watch, int fd, int events, void *opaque) +qemuAgentUnregister(qemuAgentPtr agent) +{ + if (agent->watch) { + g_source_destroy(agent->watch); + g_source_unref(agent->watch); + agent->watch =3D NULL; + } +} + + +static void qemuAgentUpdateWatch(qemuAgentPtr agent) +{ + qemuAgentUnregister(agent); + if (agent->socket) + qemuAgentRegister(agent); +} + + +static gboolean +qemuAgentIO(GSocket *socket G_GNUC_UNUSED, + GIOCondition cond, + gpointer opaque) { qemuAgentPtr agent =3D opaque; bool error =3D false; @@ -528,45 +560,36 @@ qemuAgentIO(int watch, int fd, int events, void *opaq= ue) /* lock access to the agent and protect fd */ virObjectLock(agent); #if DEBUG_IO - VIR_DEBUG("Agent %p I/O on watch %d fd %d events %d", agent, watch, fd= , events); + VIR_DEBUG("Agent %p I/O on watch %d socket %p cond %d", agent, agent->= socket, cond); #endif =20 - if (agent->fd =3D=3D -1 || agent->watch =3D=3D 0) { + if (agent->fd =3D=3D -1 || !agent->watch) { virObjectUnlock(agent); virObjectUnref(agent); - return; + return G_SOURCE_REMOVE; } =20 - if (agent->fd !=3D fd || agent->watch !=3D watch) { - if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) - eof =3D true; - virReportError(VIR_ERR_INTERNAL_ERROR, - _("event from unexpected fd %d!=3D%d / watch %d!=3D= %d"), - agent->fd, fd, agent->watch, watch); - error =3D true; - } else if (agent->lastError.code !=3D VIR_ERR_OK) { - if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR)) + if (agent->lastError.code !=3D VIR_ERR_OK) { + if (cond & (G_IO_HUP | G_IO_ERR)) eof =3D true; error =3D true; } else { - if (events & VIR_EVENT_HANDLE_WRITABLE) { + if (cond & G_IO_OUT) { if (qemuAgentIOWrite(agent) < 0) error =3D true; - events &=3D ~VIR_EVENT_HANDLE_WRITABLE; } =20 if (!error && - events & VIR_EVENT_HANDLE_READABLE) { + cond & G_IO_IN) { int got =3D qemuAgentIORead(agent); - events &=3D ~VIR_EVENT_HANDLE_READABLE; if (got < 0) { error =3D true; } else if (got =3D=3D 0) { eof =3D true; } else { - /* Ignore hangup/error events if we read some data, to + /* Ignore hangup/error cond if we read some data, to * give time for that data to be consumed */ - events =3D 0; + cond =3D 0; =20 if (qemuAgentIOProcess(agent) < 0) error =3D true; @@ -574,25 +597,17 @@ qemuAgentIO(int watch, int fd, int events, void *opaq= ue) } =20 if (!error && - events & VIR_EVENT_HANDLE_HANGUP) { + cond & G_IO_HUP) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("End of file from agent socket")); eof =3D true; - events &=3D ~VIR_EVENT_HANDLE_HANGUP; } =20 if (!error && !eof && - events & VIR_EVENT_HANDLE_ERROR) { + cond & G_IO_ERR) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid file descriptor while waiting for ag= ent")); eof =3D true; - events &=3D ~VIR_EVENT_HANDLE_ERROR; - } - if (!error && events) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Unhandled event %d for agent fd %d"), - events, agent->fd); - error =3D true; } } =20 @@ -648,15 +663,19 @@ qemuAgentIO(int watch, int fd, int events, void *opaq= ue) virObjectUnlock(agent); virObjectUnref(agent); } + + return G_SOURCE_REMOVE; } =20 =20 qemuAgentPtr qemuAgentOpen(virDomainObjPtr vm, const virDomainChrSourceDef *config, + GMainContext *context, qemuAgentCallbacksPtr cb) { qemuAgentPtr agent; + g_autoptr(GError) gerr =3D NULL; =20 if (!cb || !cb->eofNotify) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -692,22 +711,20 @@ qemuAgentOpen(virDomainObjPtr vm, if (agent->fd =3D=3D -1) goto cleanup; =20 - virObjectRef(agent); - if ((agent->watch =3D virEventAddHandle(agent->fd, - VIR_EVENT_HANDLE_HANGUP | - VIR_EVENT_HANDLE_ERROR | - VIR_EVENT_HANDLE_READABLE, - qemuAgentIO, - agent, - virObjectFreeCallback)) < 0) { - virObjectUnref(agent); - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("unable to register agent events")); + agent->context =3D g_main_context_ref(context); + + agent->socket =3D g_socket_new_from_fd(agent->fd, &gerr); + if (!agent->socket) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unable to create socket object: %s"), + gerr->message); goto cleanup; } =20 + qemuAgentRegister(agent); + agent->running =3D true; - VIR_DEBUG("New agent %p fd =3D%d watch=3D%d", agent, agent->fd, agent-= >watch); + VIR_DEBUG("New agent %p fd=3D%d", agent, agent->fd); =20 return agent; =20 @@ -762,12 +779,11 @@ void qemuAgentClose(qemuAgentPtr agent) =20 virObjectLock(agent); =20 - if (agent->fd >=3D 0) { - if (agent->watch) { - virEventRemoveHandle(agent->watch); - agent->watch =3D 0; - } - VIR_FORCE_CLOSE(agent->fd); + if (agent->socket) { + qemuAgentUnregister(agent); + g_object_unref(agent->socket); + agent->socket =3D NULL; + agent->fd =3D -1; } =20 qemuAgentNotifyCloseLocked(agent); diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 5656fe60ff..d4d8615323 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -41,6 +41,7 @@ struct _qemuAgentCallbacks { =20 qemuAgentPtr qemuAgentOpen(virDomainObjPtr vm, const virDomainChrSourceDef *config, + GMainContext *context, qemuAgentCallbacksPtr cb); =20 void qemuAgentClose(qemuAgentPtr mon); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index bc57474bdc..cf2b5c260c 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -236,6 +236,7 @@ qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjP= tr vm) =20 agent =3D qemuAgentOpen(vm, config->source, + virEventThreadGetContext(priv->eventThread), &agentCallbacks); =20 virObjectLock(vm); diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c index 3efdea9cce..1eb06a0e54 100644 --- a/tests/qemumonitortestutils.c +++ b/tests/qemumonitortestutils.c @@ -1411,6 +1411,7 @@ qemuMonitorTestNewAgent(virDomainXMLOptionPtr xmlopt) =20 if (!(test->agent =3D qemuAgentOpen(test->vm, &src, + virEventThreadGetContext(test->event= Thread), &qemuMonitorTestAgentCallbacks))) goto error; =20 --=20 2.24.1