For each Monitor, add one field "use_io_thr" to show whether it will be
using the dedicated monitor IO thread to handle input/output. When set,
monitor IO parsing work will be offloaded to the dedicated monitor IO
thread, rather than the original main loop thread.
This only works for QMP. HMP will always be run on the main loop
thread.
Currently we're still keeping use_io_thr off always. Will turn it on
later at some point.
One thing to mention is that we cannot set use_io_thr for every QMP
monitor. The problem is that MUXed typed chardevs may not work well
with it now. When MUX is used, frontend of chardev can be the monitor
plus something else. The only thing we know would be safe to be run
outside main thread so far is the monitor frontend. All the rest of the
frontends should still be run in main thread only.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
monitor.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 99 insertions(+), 8 deletions(-)
diff --git a/monitor.c b/monitor.c
index cef71d5c45..fd6e9a6d28 100644
--- a/monitor.c
+++ b/monitor.c
@@ -35,6 +35,7 @@
#include "net/net.h"
#include "net/slirp.h"
#include "chardev/char-fe.h"
+#include "chardev/char-io.h"
#include "ui/qemu-spice.h"
#include "sysemu/numa.h"
#include "monitor/monitor.h"
@@ -75,6 +76,7 @@
#include "qmp-introspect.h"
#include "sysemu/qtest.h"
#include "sysemu/cpus.h"
+#include "sysemu/iothread.h"
#include "qemu/cutils.h"
#include "qapi/qmp/dispatch.h"
@@ -189,6 +191,7 @@ struct Monitor {
int flags;
int suspend_cnt;
bool skip_flush;
+ bool use_io_thr;
QemuMutex out_lock;
QString *outbuf;
@@ -207,6 +210,11 @@ struct Monitor {
QTAILQ_ENTRY(Monitor) entry;
};
+/* Let's add monitor global variables to this struct. */
+static struct {
+ IOThread *mon_iothread;
+} mon_global;
+
/* QMP checker flags */
#define QMP_ACCEPT_UNKNOWNS 1
@@ -567,7 +575,8 @@ static void monitor_qapi_event_init(void)
static void handle_hmp_command(Monitor *mon, const char *cmdline);
-static void monitor_data_init(Monitor *mon, bool skip_flush)
+static void monitor_data_init(Monitor *mon, bool skip_flush,
+ bool use_io_thr)
{
memset(mon, 0, sizeof(Monitor));
qemu_mutex_init(&mon->out_lock);
@@ -575,6 +584,7 @@ static void monitor_data_init(Monitor *mon, bool skip_flush)
/* Use *mon_cmds by default. */
mon->cmd_table = mon_cmds;
mon->skip_flush = skip_flush;
+ mon->use_io_thr = use_io_thr;
}
static void monitor_data_destroy(Monitor *mon)
@@ -595,7 +605,7 @@ char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index,
char *output = NULL;
Monitor *old_mon, hmp;
- monitor_data_init(&hmp, true);
+ monitor_data_init(&hmp, true, false);
old_mon = cur_mon;
cur_mon = &hmp;
@@ -4034,12 +4044,29 @@ static void sortcmdlist(void)
qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
}
+static GMainContext *monitor_io_context_get(void)
+{
+ return iothread_get_g_main_context(mon_global.mon_iothread);
+}
+
+static AioContext *monitor_aio_context_get(void)
+{
+ return iothread_get_aio_context(mon_global.mon_iothread);
+}
+
+static void monitor_iothread_init(void)
+{
+ mon_global.mon_iothread = iothread_create("mon_iothread",
+ &error_abort);
+}
+
void monitor_init_globals(void)
{
monitor_init_qmp_commands();
monitor_qapi_event_init();
sortcmdlist();
qemu_mutex_init(&monitor_lock);
+ monitor_iothread_init();
}
/* These functions just adapt the readline interface in a typesafe way. We
@@ -4082,11 +4109,41 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
}
}
+static void monitor_list_append(Monitor *mon)
+{
+ qemu_mutex_lock(&monitor_lock);
+ QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
+ qemu_mutex_unlock(&monitor_lock);
+}
+
+static void monitor_qmp_setup_handlers(void *data)
+{
+ Monitor *mon = data;
+ GMainContext *context;
+
+ if (mon->use_io_thr) {
+ /*
+ * When use_io_thr is set, we use the global shared dedicated
+ * IO thread for this monitor to handle input/output.
+ */
+ context = monitor_io_context_get();
+ /* We should have inited globals before reaching here. */
+ assert(context);
+ } else {
+ /* The default main loop, which is the main thread */
+ context = NULL;
+ }
+
+ qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
+ monitor_qmp_event, NULL, mon, context, true);
+ monitor_list_append(mon);
+}
+
void monitor_init(Chardev *chr, int flags)
{
Monitor *mon = g_malloc(sizeof(*mon));
- monitor_data_init(mon, false);
+ monitor_data_init(mon, false, false);
qemu_chr_fe_init(&mon->chr, chr, &error_abort);
mon->flags = flags;
@@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
}
if (monitor_is_qmp(mon)) {
- qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
- monitor_qmp_event, NULL, mon, NULL, true);
qemu_chr_fe_set_echo(&mon->chr, true);
json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
+ if (mon->use_io_thr) {
+ /*
+ * It's possible that we already have an IOWatchPoll
+ * registered for the Chardev during chardev_init_func().
+ * When that happened, the gcontext was still the default
+ * main context, always. We need to make sure we
+ * unregister that first and from now on we run the
+ * chardev on the new gcontext.
+ */
+ remove_fd_in_watch(chr);
+ /*
+ * We can't call qemu_chr_fe_set_handlers() directly here
+ * since during the procedure the chardev will be active
+ * and running in monitor iothread, while we'll still do
+ * something before returning from it, which is a possible
+ * race too. To avoid that, we just create a BH to setup
+ * the handlers.
+ */
+ aio_bh_schedule_oneshot(monitor_aio_context_get(),
+ monitor_qmp_setup_handlers, mon);
+ /* We'll add this to mon_list in the BH when setup done */
+ return;
+ } else {
+ qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
+ monitor_qmp_read, monitor_qmp_event,
+ NULL, mon, NULL, true);
+ }
} else {
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_read,
monitor_event, NULL, mon, NULL, true);
}
- qemu_mutex_lock(&monitor_lock);
- QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
- qemu_mutex_unlock(&monitor_lock);
+ monitor_list_append(mon);
}
void monitor_cleanup(void)
{
Monitor *mon, *next;
+ /*
+ * We need to explicitly stop the iothread (but not destroy it),
+ * cleanup the monitor resources, then destroy the iothread since
+ * we need to unregister from chardev below in
+ * monitor_data_destroy(), and chardev is not thread-safe yet
+ */
+ iothread_stop(mon_global.mon_iothread);
+
qemu_mutex_lock(&monitor_lock);
QTAILQ_FOREACH_SAFE(mon, &mon_list, entry, next) {
QTAILQ_REMOVE(&mon_list, mon, entry);
@@ -4124,6 +4212,9 @@ void monitor_cleanup(void)
g_free(mon);
}
qemu_mutex_unlock(&monitor_lock);
+
+ iothread_destroy(mon_global.mon_iothread);
+ mon_global.mon_iothread = NULL;
}
QemuOptsList qemu_mon_opts = {
--
2.14.3
On Wed, Jan 24, 2018 at 01:39:43PM +0800, Peter Xu wrote:
> @@ -4034,12 +4044,29 @@ static void sortcmdlist(void)
> qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
> }
>
> +static GMainContext *monitor_io_context_get(void)
> +{
> + return iothread_get_g_main_context(mon_global.mon_iothread);
> +}
> +
> +static AioContext *monitor_aio_context_get(void)
> +{
> + return iothread_get_aio_context(mon_global.mon_iothread);
> +}
Please follow the X_get_Y() naming convention instead of X_Y_get(). For
example, see qemu_get_aio_context() and iothread_get_aio_context().
> @@ -4082,11 +4109,41 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
> }
> }
>
> +static void monitor_list_append(Monitor *mon)
> +{
> + qemu_mutex_lock(&monitor_lock);
> + QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
> + qemu_mutex_unlock(&monitor_lock);
> +}
> +
> +static void monitor_qmp_setup_handlers(void *data)
BH functions are usually declared like this:
static void X_bh(void *opaque)
This way it's immediately clear that this function is invoked as a BH.
I suggest renaming the function to monitor_qmp_setup_handlers_bh().
Using 'opaque' instead of 'data' is common, too.
> @@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
> }
>
> if (monitor_is_qmp(mon)) {
> - qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
> - monitor_qmp_event, NULL, mon, NULL, true);
> qemu_chr_fe_set_echo(&mon->chr, true);
> json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
> + if (mon->use_io_thr) {
> + /*
> + * It's possible that we already have an IOWatchPoll
> + * registered for the Chardev during chardev_init_func().
When does this happen?
This seems like a hack that breaks when certain -chardev options are
used. For example, what happens if the chardev is a TCP connection with
reconnect=5. In that case the socket will be connecting asynchronously
and we cannot just remove the fd watch.
How does this interact with TCP listen chardevs? It looks like the
listener socket uses the main loop (see tcp_chr_disconnect()).
I'm worried that the chardev layer isn't thread-safe and you haven't
added anything to protect it or at least refuse to run in unsafe
conditions.
On Wed, Feb 21, 2018 at 04:00:07PM +0000, Stefan Hajnoczi wrote:
> On Wed, Jan 24, 2018 at 01:39:43PM +0800, Peter Xu wrote:
> > @@ -4034,12 +4044,29 @@ static void sortcmdlist(void)
> > qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
> > }
> >
> > +static GMainContext *monitor_io_context_get(void)
> > +{
> > + return iothread_get_g_main_context(mon_global.mon_iothread);
> > +}
> > +
> > +static AioContext *monitor_aio_context_get(void)
> > +{
> > + return iothread_get_aio_context(mon_global.mon_iothread);
> > +}
>
> Please follow the X_get_Y() naming convention instead of X_Y_get(). For
> example, see qemu_get_aio_context() and iothread_get_aio_context().
Sure.
>
> > @@ -4082,11 +4109,41 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
> > }
> > }
> >
> > +static void monitor_list_append(Monitor *mon)
> > +{
> > + qemu_mutex_lock(&monitor_lock);
> > + QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
> > + qemu_mutex_unlock(&monitor_lock);
> > +}
> > +
> > +static void monitor_qmp_setup_handlers(void *data)
>
> BH functions are usually declared like this:
>
> static void X_bh(void *opaque)
>
> This way it's immediately clear that this function is invoked as a BH.
>
> I suggest renaming the function to monitor_qmp_setup_handlers_bh().
> Using 'opaque' instead of 'data' is common, too.
Sure.
>
> > @@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
> > }
> >
> > if (monitor_is_qmp(mon)) {
> > - qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
> > - monitor_qmp_event, NULL, mon, NULL, true);
> > qemu_chr_fe_set_echo(&mon->chr, true);
> > json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
> > + if (mon->use_io_thr) {
> > + /*
> > + * It's possible that we already have an IOWatchPoll
> > + * registered for the Chardev during chardev_init_func().
>
> When does this happen?
>
> This seems like a hack that breaks when certain -chardev options are
> used. For example, what happens if the chardev is a TCP connection with
> reconnect=5. In that case the socket will be connecting asynchronously
> and we cannot just remove the fd watch.
>
> How does this interact with TCP listen chardevs? It looks like the
> listener socket uses the main loop (see tcp_chr_disconnect()).
>
> I'm worried that the chardev layer isn't thread-safe and you haven't
> added anything to protect it or at least refuse to run in unsafe
> conditions.
Indeed, I did some more reading and noticed that the TCP typed chardev
is really special.
Firstly there can be the QIO thread that handles sync connecting when
"reconnect" is setup (I don't really understand why we only need the
threads when reconnect != 0, but anyway, I'll just assume we need the
threads). It's done in qmp_chardev_open_socket().
Secondly, TCP can support TLS or TELNET (tcp_chr_new_client() handles
the main logic of it), so there can be actually more than one GSource
created for a single TCP chardev. Meanwhile, the
chr_update_read_handler() calls never handles the re-setup of those
special GSources (TLS/TELNET), only the common GSource of TCP stream
read/write.
And the whole TCP channel is based on QIO stuff, which means I need to
add non-default context support to QIO stuff too... That's mostly
about qio_channel_add_watch(). I may need to pass in context
information, and switch to GSource for that function instead of the
old tags, just like what I did to chardev in general.
I'll think about these. I may possibly need some pre-requisite and
separated patches to fix existing problems before going on with OOB
again.
This is the worst thing I'd like to see - "surprises". :(
Thanks,
--
Peter Xu
On Thu, Feb 22, 2018 at 06:01:19PM +0800, Peter Xu wrote:
> On Wed, Feb 21, 2018 at 04:00:07PM +0000, Stefan Hajnoczi wrote:
> > On Wed, Jan 24, 2018 at 01:39:43PM +0800, Peter Xu wrote:
> > > @@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
> > > }
> > >
> > > if (monitor_is_qmp(mon)) {
> > > - qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
> > > - monitor_qmp_event, NULL, mon, NULL, true);
> > > qemu_chr_fe_set_echo(&mon->chr, true);
> > > json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
> > > + if (mon->use_io_thr) {
> > > + /*
> > > + * It's possible that we already have an IOWatchPoll
> > > + * registered for the Chardev during chardev_init_func().
> >
> > When does this happen?
> >
> > This seems like a hack that breaks when certain -chardev options are
> > used. For example, what happens if the chardev is a TCP connection with
> > reconnect=5. In that case the socket will be connecting asynchronously
> > and we cannot just remove the fd watch.
> >
> > How does this interact with TCP listen chardevs? It looks like the
> > listener socket uses the main loop (see tcp_chr_disconnect()).
> >
> > I'm worried that the chardev layer isn't thread-safe and you haven't
> > added anything to protect it or at least refuse to run in unsafe
> > conditions.
>
> Indeed, I did some more reading and noticed that the TCP typed chardev
> is really special.
>
> Firstly there can be the QIO thread that handles sync connecting when
> "reconnect" is setup (I don't really understand why we only need the
> threads when reconnect != 0, but anyway, I'll just assume we need the
> threads). It's done in qmp_chardev_open_socket().
>
> Secondly, TCP can support TLS or TELNET (tcp_chr_new_client() handles
> the main logic of it), so there can be actually more than one GSource
> created for a single TCP chardev. Meanwhile, the
> chr_update_read_handler() calls never handles the re-setup of those
> special GSources (TLS/TELNET), only the common GSource of TCP stream
> read/write.
>
> And the whole TCP channel is based on QIO stuff, which means I need to
> add non-default context support to QIO stuff too... That's mostly
> about qio_channel_add_watch(). I may need to pass in context
> information, and switch to GSource for that function instead of the
> old tags, just like what I did to chardev in general.
>
> I'll think about these. I may possibly need some pre-requisite and
> separated patches to fix existing problems before going on with OOB
> again.
>
> This is the worst thing I'd like to see - "surprises". :(
Yes, this feature is more involved than anyone thought at the beginning.
Stefan
On Thu, Feb 22, 2018 at 06:01:19PM +0800, Peter Xu wrote:
> On Wed, Feb 21, 2018 at 04:00:07PM +0000, Stefan Hajnoczi wrote:
> > On Wed, Jan 24, 2018 at 01:39:43PM +0800, Peter Xu wrote:
> > > @@ -4034,12 +4044,29 @@ static void sortcmdlist(void)
> > > qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
> > > }
> > >
> > > +static GMainContext *monitor_io_context_get(void)
> > > +{
> > > + return iothread_get_g_main_context(mon_global.mon_iothread);
> > > +}
> > > +
> > > +static AioContext *monitor_aio_context_get(void)
> > > +{
> > > + return iothread_get_aio_context(mon_global.mon_iothread);
> > > +}
> >
> > Please follow the X_get_Y() naming convention instead of X_Y_get(). For
> > example, see qemu_get_aio_context() and iothread_get_aio_context().
>
> Sure.
>
> >
> > > @@ -4082,11 +4109,41 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
> > > }
> > > }
> > >
> > > +static void monitor_list_append(Monitor *mon)
> > > +{
> > > + qemu_mutex_lock(&monitor_lock);
> > > + QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
> > > + qemu_mutex_unlock(&monitor_lock);
> > > +}
> > > +
> > > +static void monitor_qmp_setup_handlers(void *data)
> >
> > BH functions are usually declared like this:
> >
> > static void X_bh(void *opaque)
> >
> > This way it's immediately clear that this function is invoked as a BH.
> >
> > I suggest renaming the function to monitor_qmp_setup_handlers_bh().
> > Using 'opaque' instead of 'data' is common, too.
>
> Sure.
>
> >
> > > @@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
> > > }
> > >
> > > if (monitor_is_qmp(mon)) {
> > > - qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
> > > - monitor_qmp_event, NULL, mon, NULL, true);
> > > qemu_chr_fe_set_echo(&mon->chr, true);
> > > json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
> > > + if (mon->use_io_thr) {
> > > + /*
> > > + * It's possible that we already have an IOWatchPoll
> > > + * registered for the Chardev during chardev_init_func().
> >
> > When does this happen?
> >
> > This seems like a hack that breaks when certain -chardev options are
> > used. For example, what happens if the chardev is a TCP connection with
> > reconnect=5. In that case the socket will be connecting asynchronously
> > and we cannot just remove the fd watch.
> >
> > How does this interact with TCP listen chardevs? It looks like the
> > listener socket uses the main loop (see tcp_chr_disconnect()).
> >
> > I'm worried that the chardev layer isn't thread-safe and you haven't
> > added anything to protect it or at least refuse to run in unsafe
> > conditions.
>
> Indeed, I did some more reading and noticed that the TCP typed chardev
> is really special.
>
> Firstly there can be the QIO thread that handles sync connecting when
> "reconnect" is setup (I don't really understand why we only need the
> threads when reconnect != 0, but anyway, I'll just assume we need the
> threads). It's done in qmp_chardev_open_socket().
>
> Secondly, TCP can support TLS or TELNET (tcp_chr_new_client() handles
> the main logic of it), so there can be actually more than one GSource
> created for a single TCP chardev. Meanwhile, the
> chr_update_read_handler() calls never handles the re-setup of those
> special GSources (TLS/TELNET), only the common GSource of TCP stream
> read/write.
>
> And the whole TCP channel is based on QIO stuff, which means I need to
> add non-default context support to QIO stuff too... That's mostly
> about qio_channel_add_watch(). I may need to pass in context
> information, and switch to GSource for that function instead of the
> old tags, just like what I did to chardev in general.
Rather than changing qio_channel_add_watch() which affects all callers,
just add a qio_channel_add_watch_full() variant which includes GMainContext
as an extra arg
Regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
On Thu, Feb 22, 2018 at 03:50:47PM +0000, Daniel P. Berrangé wrote:
> On Thu, Feb 22, 2018 at 06:01:19PM +0800, Peter Xu wrote:
> > On Wed, Feb 21, 2018 at 04:00:07PM +0000, Stefan Hajnoczi wrote:
> > > On Wed, Jan 24, 2018 at 01:39:43PM +0800, Peter Xu wrote:
> > > > @@ -4034,12 +4044,29 @@ static void sortcmdlist(void)
> > > > qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
> > > > }
> > > >
> > > > +static GMainContext *monitor_io_context_get(void)
> > > > +{
> > > > + return iothread_get_g_main_context(mon_global.mon_iothread);
> > > > +}
> > > > +
> > > > +static AioContext *monitor_aio_context_get(void)
> > > > +{
> > > > + return iothread_get_aio_context(mon_global.mon_iothread);
> > > > +}
> > >
> > > Please follow the X_get_Y() naming convention instead of X_Y_get(). For
> > > example, see qemu_get_aio_context() and iothread_get_aio_context().
> >
> > Sure.
> >
> > >
> > > > @@ -4082,11 +4109,41 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
> > > > }
> > > > }
> > > >
> > > > +static void monitor_list_append(Monitor *mon)
> > > > +{
> > > > + qemu_mutex_lock(&monitor_lock);
> > > > + QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
> > > > + qemu_mutex_unlock(&monitor_lock);
> > > > +}
> > > > +
> > > > +static void monitor_qmp_setup_handlers(void *data)
> > >
> > > BH functions are usually declared like this:
> > >
> > > static void X_bh(void *opaque)
> > >
> > > This way it's immediately clear that this function is invoked as a BH.
> > >
> > > I suggest renaming the function to monitor_qmp_setup_handlers_bh().
> > > Using 'opaque' instead of 'data' is common, too.
> >
> > Sure.
> >
> > >
> > > > @@ -4099,24 +4156,55 @@ void monitor_init(Chardev *chr, int flags)
> > > > }
> > > >
> > > > if (monitor_is_qmp(mon)) {
> > > > - qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
> > > > - monitor_qmp_event, NULL, mon, NULL, true);
> > > > qemu_chr_fe_set_echo(&mon->chr, true);
> > > > json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
> > > > + if (mon->use_io_thr) {
> > > > + /*
> > > > + * It's possible that we already have an IOWatchPoll
> > > > + * registered for the Chardev during chardev_init_func().
> > >
> > > When does this happen?
> > >
> > > This seems like a hack that breaks when certain -chardev options are
> > > used. For example, what happens if the chardev is a TCP connection with
> > > reconnect=5. In that case the socket will be connecting asynchronously
> > > and we cannot just remove the fd watch.
> > >
> > > How does this interact with TCP listen chardevs? It looks like the
> > > listener socket uses the main loop (see tcp_chr_disconnect()).
> > >
> > > I'm worried that the chardev layer isn't thread-safe and you haven't
> > > added anything to protect it or at least refuse to run in unsafe
> > > conditions.
> >
> > Indeed, I did some more reading and noticed that the TCP typed chardev
> > is really special.
> >
> > Firstly there can be the QIO thread that handles sync connecting when
> > "reconnect" is setup (I don't really understand why we only need the
> > threads when reconnect != 0, but anyway, I'll just assume we need the
> > threads). It's done in qmp_chardev_open_socket().
> >
> > Secondly, TCP can support TLS or TELNET (tcp_chr_new_client() handles
> > the main logic of it), so there can be actually more than one GSource
> > created for a single TCP chardev. Meanwhile, the
> > chr_update_read_handler() calls never handles the re-setup of those
> > special GSources (TLS/TELNET), only the common GSource of TCP stream
> > read/write.
> >
> > And the whole TCP channel is based on QIO stuff, which means I need to
> > add non-default context support to QIO stuff too... That's mostly
> > about qio_channel_add_watch(). I may need to pass in context
> > information, and switch to GSource for that function instead of the
> > old tags, just like what I did to chardev in general.
>
> Rather than changing qio_channel_add_watch() which affects all callers,
> just add a qio_channel_add_watch_full() variant which includes GMainContext
> as an extra arg
Yeh, will do. Thanks,
--
Peter Xu
© 2016 - 2026 Red Hat, Inc.