[PATCH] main-loop: Avoid some unnecessary poll calls

Ross Lagerwall via posted 1 patch 1 week, 5 days ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20240212114541.1440728-1-ross.lagerwall@citrix.com
Maintainers: Paolo Bonzini <pbonzini@redhat.com>
util/main-loop.c | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
[PATCH] main-loop: Avoid some unnecessary poll calls
Posted by Ross Lagerwall via 1 week, 5 days ago
A common pattern is seen where a timer fires, the callback does some
work, then rearms the timer which implicitly calls qemu_notify_event().

qemu_notify_event() is supposed to interrupt the main loop's poll() by
calling qemu_bh_schedule(). In the case that this is being called from a
main loop callback, the main loop is already not waiting on poll() and
instead it means the main loop does an addition iteration with a timeout
of 0 to handle the bottom half wakeup, before once again polling with
the expected timeout value.

Detect this situation by skipping the qemu_bh_schedule() call if the
default main context is currently owned by the caller. i.e. it is being
called as part of a poll / timer callback. Adjust the scope of the main
context acquire / release to cover the timer callbacks in
qemu_clock_run_all_timers().

Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
---
 util/main-loop.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/util/main-loop.c b/util/main-loop.c
index a0386cfeb60c..a2afbb7d0e13 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -145,10 +145,16 @@ AioContext *qemu_get_aio_context(void)
 
 void qemu_notify_event(void)
 {
+    GMainContext *context;
+
     if (!qemu_aio_context) {
         return;
     }
-    qemu_bh_schedule(qemu_notify_bh);
+
+    context = g_main_context_default();
+    if (!g_main_context_is_owner(context)) {
+        qemu_bh_schedule(qemu_notify_bh);
+    }
 }
 
 static GArray *gpollfds;
@@ -292,11 +298,8 @@ static void glib_pollfds_poll(void)
 
 static int os_host_main_loop_wait(int64_t timeout)
 {
-    GMainContext *context = g_main_context_default();
     int ret;
 
-    g_main_context_acquire(context);
-
     glib_pollfds_fill(&timeout);
 
     bql_unlock();
@@ -309,8 +312,6 @@ static int os_host_main_loop_wait(int64_t timeout)
 
     glib_pollfds_poll();
 
-    g_main_context_release(context);
-
     return ret;
 }
 #else
@@ -470,15 +471,12 @@ static int os_host_main_loop_wait(int64_t timeout)
     fd_set rfds, wfds, xfds;
     int nfds;
 
-    g_main_context_acquire(context);
-
     /* XXX: need to suppress polling by better using win32 events */
     ret = 0;
     for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
         ret |= pe->func(pe->opaque);
     }
     if (ret != 0) {
-        g_main_context_release(context);
         return ret;
     }
 
@@ -538,8 +536,6 @@ static int os_host_main_loop_wait(int64_t timeout)
         g_main_context_dispatch(context);
     }
 
-    g_main_context_release(context);
-
     return select_ret || g_poll_ret;
 }
 #endif
@@ -559,6 +555,7 @@ void main_loop_poll_remove_notifier(Notifier *notify)
 
 void main_loop_wait(int nonblocking)
 {
+    GMainContext *context = g_main_context_default();
     MainLoopPoll mlpoll = {
         .state = MAIN_LOOP_POLL_FILL,
         .timeout = UINT32_MAX,
@@ -586,7 +583,10 @@ void main_loop_wait(int nonblocking)
                                       timerlistgroup_deadline_ns(
                                           &main_loop_tlg));
 
+    g_main_context_acquire(context);
+
     ret = os_host_main_loop_wait(timeout_ns);
+
     mlpoll.state = ret < 0 ? MAIN_LOOP_POLL_ERR : MAIN_LOOP_POLL_OK;
     notifier_list_notify(&main_loop_poll_notifiers, &mlpoll);
 
@@ -598,6 +598,8 @@ void main_loop_wait(int nonblocking)
         icount_start_warp_timer();
     }
     qemu_clock_run_all_timers();
+
+    g_main_context_release(context);
 }
 
 /* Functions to operate on the main QEMU AioContext.  */
-- 
2.43.0