On one of our client's node, due to trying to read from closed ioc,
a segmentation fault occured. Corresponding backtrace:
0 object_get_class (obj=obj@entry=0x0)
1 qio_channel_readv_full (ioc=0x0, iov=0x7ffe55277180 ...
2 qio_channel_read (ioc=<optimized out> ...
3 vnc_client_read_buf (vs=vs@entry=0x55625f3c6000, ...
4 vnc_client_read_plain (vs=0x55625f3c6000)
5 vnc_client_read (vs=0x55625f3c6000)
6 vnc_client_io (ioc=<optimized out>, condition=G_IO_IN, ...
7 g_main_dispatch (context=0x556251568a50)
8 g_main_context_dispatch (context=context@entry=0x556251568a50)
9 glib_pollfds_poll ()
10 os_host_main_loop_wait (timeout=<optimized out>)
11 main_loop_wait (nonblocking=nonblocking@entry=0)
12 main_loop () at vl.c:1909
13 main (argc=<optimized out>, argv=<optimized out>, ...
Having analyzed the coredump, I understood that the reason is that
ioc_tag is reset on vnc_disconnect_start and ioc is cleaned
in vnc_disconnect_finish. Between these two events due to some
reasons the ioc_tag was set again and after vnc_disconnect_finish
the handler is running with freed ioc,
which led to the segmentation fault.
The patch checks vs->disconnecting in places where we call
qio_channel_add_watch and resets handler if disconnecting == TRUE
to prevent such an occurrence.
Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com>
---
Changelog:
v2: Attach the backtrace
v3: Change checks
ui/vnc-jobs.c | 6 ++++--
ui/vnc.c | 15 ++++++++++++++-
2 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c
index e326679dd0..868dddef4b 100644
--- a/ui/vnc-jobs.c
+++ b/ui/vnc-jobs.c
@@ -148,8 +148,10 @@ void vnc_jobs_consume_buffer(VncState *vs)
if (vs->ioc_tag) {
g_source_remove(vs->ioc_tag);
}
- vs->ioc_tag = qio_channel_add_watch(
- vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL);
+ if (vs->disconnecting == FALSE) {
+ vs->ioc_tag = qio_channel_add_watch(
+ vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL);
+ }
}
buffer_move(&vs->output, &vs->jobs_buffer);
diff --git a/ui/vnc.c b/ui/vnc.c
index 93731accb6..67ccc8160f 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -1536,12 +1536,19 @@ gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED,
VncState *vs = opaque;
if (condition & G_IO_IN) {
if (vnc_client_read(vs) < 0) {
- return TRUE;
+ goto end;
}
}
if (condition & G_IO_OUT) {
vnc_client_write(vs);
}
+end:
+ if (vs->disconnecting) {
+ if (vs->ioc_tag != 0) {
+ g_source_remove(vs->ioc_tag);
+ }
+ vs->ioc_tag = 0;
+ }
return TRUE;
}
@@ -1630,6 +1637,12 @@ void vnc_flush(VncState *vs)
if (vs->ioc != NULL && vs->output.offset) {
vnc_client_write_locked(vs);
}
+ if (vs->disconnecting) {
+ if (vs->ioc_tag != 0) {
+ g_source_remove(vs->ioc_tag);
+ }
+ vs->ioc_tag = 0;
+ }
vnc_unlock_output(vs);
}
--
2.14.3
ping On 02/07/2018 12:48 PM, Klim Kireev wrote: > On one of our client's node, due to trying to read from closed ioc, > a segmentation fault occured. Corresponding backtrace: > > 0 object_get_class (obj=obj@entry=0x0) > 1 qio_channel_readv_full (ioc=0x0, iov=0x7ffe55277180 ... > 2 qio_channel_read (ioc=<optimized out> ... > 3 vnc_client_read_buf (vs=vs@entry=0x55625f3c6000, ... > 4 vnc_client_read_plain (vs=0x55625f3c6000) > 5 vnc_client_read (vs=0x55625f3c6000) > 6 vnc_client_io (ioc=<optimized out>, condition=G_IO_IN, ... > 7 g_main_dispatch (context=0x556251568a50) > 8 g_main_context_dispatch (context=context@entry=0x556251568a50) > 9 glib_pollfds_poll () > 10 os_host_main_loop_wait (timeout=<optimized out>) > 11 main_loop_wait (nonblocking=nonblocking@entry=0) > 12 main_loop () at vl.c:1909 > 13 main (argc=<optimized out>, argv=<optimized out>, ... > > Having analyzed the coredump, I understood that the reason is that > ioc_tag is reset on vnc_disconnect_start and ioc is cleaned > in vnc_disconnect_finish. Between these two events due to some > reasons the ioc_tag was set again and after vnc_disconnect_finish > the handler is running with freed ioc, > which led to the segmentation fault. > > The patch checks vs->disconnecting in places where we call > qio_channel_add_watch and resets handler if disconnecting == TRUE > to prevent such an occurrence. > > Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com> > --- > Changelog: > v2: Attach the backtrace > > v3: Change checks > > ui/vnc-jobs.c | 6 ++++-- > ui/vnc.c | 15 ++++++++++++++- > 2 files changed, 18 insertions(+), 3 deletions(-) > > diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c > index e326679dd0..868dddef4b 100644 > --- a/ui/vnc-jobs.c > +++ b/ui/vnc-jobs.c > @@ -148,8 +148,10 @@ void vnc_jobs_consume_buffer(VncState *vs) > if (vs->ioc_tag) { > g_source_remove(vs->ioc_tag); > } > - vs->ioc_tag = qio_channel_add_watch( > - vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL); > + if (vs->disconnecting == FALSE) { > + vs->ioc_tag = qio_channel_add_watch( > + vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL); > + } > } > buffer_move(&vs->output, &vs->jobs_buffer); > > diff --git a/ui/vnc.c b/ui/vnc.c > index 93731accb6..67ccc8160f 100644 > --- a/ui/vnc.c > +++ b/ui/vnc.c > @@ -1536,12 +1536,19 @@ gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED, > VncState *vs = opaque; > if (condition & G_IO_IN) { > if (vnc_client_read(vs) < 0) { > - return TRUE; > + goto end; > } > } > if (condition & G_IO_OUT) { > vnc_client_write(vs); > } > +end: > + if (vs->disconnecting) { > + if (vs->ioc_tag != 0) { > + g_source_remove(vs->ioc_tag); > + } > + vs->ioc_tag = 0; > + } > return TRUE; > } > > @@ -1630,6 +1637,12 @@ void vnc_flush(VncState *vs) > if (vs->ioc != NULL && vs->output.offset) { > vnc_client_write_locked(vs); > } > + if (vs->disconnecting) { > + if (vs->ioc_tag != 0) { > + g_source_remove(vs->ioc_tag); > + } > + vs->ioc_tag = 0; > + } > vnc_unlock_output(vs); > } >
On Wed, Feb 14, 2018 at 05:43:19PM +0300, Klim Kireev wrote: > ping Queued now. Was lingering in my inbox, waiting for me to find the time for the next ui pull request. cheers, Gerd
On Wed, Feb 07, 2018 at 12:48:44PM +0300, Klim Kireev wrote: > On one of our client's node, due to trying to read from closed ioc, > a segmentation fault occured. Corresponding backtrace: > > 0 object_get_class (obj=obj@entry=0x0) > 1 qio_channel_readv_full (ioc=0x0, iov=0x7ffe55277180 ... > 2 qio_channel_read (ioc=<optimized out> ... > 3 vnc_client_read_buf (vs=vs@entry=0x55625f3c6000, ... > 4 vnc_client_read_plain (vs=0x55625f3c6000) > 5 vnc_client_read (vs=0x55625f3c6000) > 6 vnc_client_io (ioc=<optimized out>, condition=G_IO_IN, ... > 7 g_main_dispatch (context=0x556251568a50) > 8 g_main_context_dispatch (context=context@entry=0x556251568a50) > 9 glib_pollfds_poll () > 10 os_host_main_loop_wait (timeout=<optimized out>) > 11 main_loop_wait (nonblocking=nonblocking@entry=0) > 12 main_loop () at vl.c:1909 > 13 main (argc=<optimized out>, argv=<optimized out>, ... > > Having analyzed the coredump, I understood that the reason is that > ioc_tag is reset on vnc_disconnect_start and ioc is cleaned > in vnc_disconnect_finish. Between these two events due to some > reasons the ioc_tag was set again and after vnc_disconnect_finish > the handler is running with freed ioc, > which led to the segmentation fault. > > The patch checks vs->disconnecting in places where we call > qio_channel_add_watch and resets handler if disconnecting == TRUE > to prevent such an occurrence. > > Signed-off-by: Klim Kireev <klim.kireev@virtuozzo.com> > --- > Changelog: > v2: Attach the backtrace > > v3: Change checks > > ui/vnc-jobs.c | 6 ++++-- > ui/vnc.c | 15 ++++++++++++++- > 2 files changed, 18 insertions(+), 3 deletions(-) Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> > > diff --git a/ui/vnc-jobs.c b/ui/vnc-jobs.c > index e326679dd0..868dddef4b 100644 > --- a/ui/vnc-jobs.c > +++ b/ui/vnc-jobs.c > @@ -148,8 +148,10 @@ void vnc_jobs_consume_buffer(VncState *vs) > if (vs->ioc_tag) { > g_source_remove(vs->ioc_tag); > } > - vs->ioc_tag = qio_channel_add_watch( > - vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL); > + if (vs->disconnecting == FALSE) { > + vs->ioc_tag = qio_channel_add_watch( > + vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL); > + } > } > buffer_move(&vs->output, &vs->jobs_buffer); > > diff --git a/ui/vnc.c b/ui/vnc.c > index 93731accb6..67ccc8160f 100644 > --- a/ui/vnc.c > +++ b/ui/vnc.c > @@ -1536,12 +1536,19 @@ gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED, > VncState *vs = opaque; > if (condition & G_IO_IN) { > if (vnc_client_read(vs) < 0) { > - return TRUE; > + goto end; > } > } > if (condition & G_IO_OUT) { > vnc_client_write(vs); > } > +end: > + if (vs->disconnecting) { > + if (vs->ioc_tag != 0) { > + g_source_remove(vs->ioc_tag); > + } > + vs->ioc_tag = 0; > + } > return TRUE; > } > > @@ -1630,6 +1637,12 @@ void vnc_flush(VncState *vs) > if (vs->ioc != NULL && vs->output.offset) { > vnc_client_write_locked(vs); > } > + if (vs->disconnecting) { > + if (vs->ioc_tag != 0) { > + g_source_remove(vs->ioc_tag); > + } > + vs->ioc_tag = 0; > + } > vnc_unlock_output(vs); > } > > -- > 2.14.3 > Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
© 2016 - 2024 Red Hat, Inc.