These trace events were very useful to help me to understand and find a
reordering issue in vfio, for example:
qemu_mutex_lock locked mutex 0x10905ad8
vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4)
qemu_mutex_unlock unlocked mutex 0x10905ad8
qemu_mutex_lock locked mutex 0x10905ad8
vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4)
qemu_mutex_unlock unlocked mutex 0x10905ad8
that also helped me to see the desired result after the fix:
qemu_mutex_lock locked mutex 0x10905ad8
vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4)
vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4)
qemu_mutex_unlock unlocked mutex 0x10905ad8
So it could be a good idea to have these traces implemented. It's worth
mentioning that they should be surgically enabled during the debugging,
otherwise it can flood the trace logs with lock/unlock messages.
How to use it:
trace-event qemu_mutex_lock on|off
trace-event qemu_mutex_unlock on|off
or
trace-event qemu_mutex* on|off
Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com>
---
v2:
- removed unecessary (void*) cast
- renamed parameter name to lock instead of qemu_global_mutex
util/qemu-thread-posix.c | 5 +++++
util/trace-events | 4 ++++
2 files changed, 9 insertions(+)
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 73e3a0e..4f77d7b 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -14,6 +14,7 @@
#include "qemu/thread.h"
#include "qemu/atomic.h"
#include "qemu/notify.h"
+#include "trace.h"
static bool name_threads;
@@ -60,6 +61,8 @@ void qemu_mutex_lock(QemuMutex *mutex)
err = pthread_mutex_lock(&mutex->lock);
if (err)
error_exit(err, __func__);
+
+ trace_qemu_mutex_lock(&mutex->lock);
}
int qemu_mutex_trylock(QemuMutex *mutex)
@@ -74,6 +77,8 @@ void qemu_mutex_unlock(QemuMutex *mutex)
err = pthread_mutex_unlock(&mutex->lock);
if (err)
error_exit(err, __func__);
+
+ trace_qemu_mutex_unlock(&mutex->lock);
}
void qemu_rec_mutex_init(QemuRecMutex *mutex)
diff --git a/util/trace-events b/util/trace-events
index b44ef4f..70f6212 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -55,3 +55,7 @@ lockcnt_futex_wait_prepare(const void *lockcnt, int expected, int new) "lockcnt
lockcnt_futex_wait(const void *lockcnt, int val) "lockcnt %p waiting on %d"
lockcnt_futex_wait_resume(const void *lockcnt, int new) "lockcnt %p after wait: %d"
lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter"
+
+# util/qemu-thread-posix.c
+qemu_mutex_lock(void *lock) "locked mutex %p"
+qemu_mutex_unlock(void *lock) "unlocked mutex %p"
--
2.7.4
On Mon, 04/24 14:19, Jose Ricardo Ziviani wrote: > These trace events were very useful to help me to understand and find a > reordering issue in vfio, for example: > > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > that also helped me to see the desired result after the fix: > > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4) > vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > So it could be a good idea to have these traces implemented. It's worth > mentioning that they should be surgically enabled during the debugging, > otherwise it can flood the trace logs with lock/unlock messages. > > How to use it: > trace-event qemu_mutex_lock on|off > trace-event qemu_mutex_unlock on|off > or > trace-event qemu_mutex* on|off > > Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> > --- > v2: > - removed unecessary (void*) cast > - renamed parameter name to lock instead of qemu_global_mutex > > util/qemu-thread-posix.c | 5 +++++ > util/trace-events | 4 ++++ > 2 files changed, 9 insertions(+) > > diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c > index 73e3a0e..4f77d7b 100644 > --- a/util/qemu-thread-posix.c > +++ b/util/qemu-thread-posix.c > @@ -14,6 +14,7 @@ > #include "qemu/thread.h" > #include "qemu/atomic.h" > #include "qemu/notify.h" > +#include "trace.h" > > static bool name_threads; > > @@ -60,6 +61,8 @@ void qemu_mutex_lock(QemuMutex *mutex) > err = pthread_mutex_lock(&mutex->lock); > if (err) > error_exit(err, __func__); > + > + trace_qemu_mutex_lock(&mutex->lock); > } > > int qemu_mutex_trylock(QemuMutex *mutex) > @@ -74,6 +77,8 @@ void qemu_mutex_unlock(QemuMutex *mutex) > err = pthread_mutex_unlock(&mutex->lock); > if (err) > error_exit(err, __func__); > + > + trace_qemu_mutex_unlock(&mutex->lock); > } > > void qemu_rec_mutex_init(QemuRecMutex *mutex) > diff --git a/util/trace-events b/util/trace-events > index b44ef4f..70f6212 100644 > --- a/util/trace-events > +++ b/util/trace-events > @@ -55,3 +55,7 @@ lockcnt_futex_wait_prepare(const void *lockcnt, int expected, int new) "lockcnt > lockcnt_futex_wait(const void *lockcnt, int val) "lockcnt %p waiting on %d" > lockcnt_futex_wait_resume(const void *lockcnt, int new) "lockcnt %p after wait: %d" > lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter" > + > +# util/qemu-thread-posix.c > +qemu_mutex_lock(void *lock) "locked mutex %p" > +qemu_mutex_unlock(void *lock) "unlocked mutex %p" > -- > 2.7.4 > Reviewed-by: Fam Zheng <famz@redhat.com>
On 24/04/2017 19:19, Jose Ricardo Ziviani wrote: > These trace events were very useful to help me to understand and find a > reordering issue in vfio, for example: > > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > that also helped me to see the desired result after the fix: > > qemu_mutex_lock locked mutex 0x10905ad8 > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4) > vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4) > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > So it could be a good idea to have these traces implemented. It's worth > mentioning that they should be surgically enabled during the debugging, > otherwise it can flood the trace logs with lock/unlock messages. > > How to use it: > trace-event qemu_mutex_lock on|off > trace-event qemu_mutex_unlock on|off > or > trace-event qemu_mutex* on|off > > Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> Some improvements: 1) handle trylock and Win32 too 2) pass mutex instead of &mutex->lock, it is the same but the latter is unnecessarily obfuscated 3) also trace unlock/lock around cond_wait 4) trace "unlocked" before calling pthread_mutex_unlock, so that it is always placed before the next "locked" tracepoint. diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index bf5756763d..46f4c08e6d 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -62,7 +62,7 @@ void qemu_mutex_lock(QemuMutex *mutex) if (err) error_exit(err, __func__); - trace_qemu_mutex_lock(&mutex->lock); + trace_qemu_mutex_locked(mutex); } int qemu_mutex_trylock(QemuMutex *mutex) @@ -71,7 +71,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) err = pthread_mutex_trylock(&mutex->lock); if (err == 0) { - trace_qemu_mutex_lock(&mutex->lock); + trace_qemu_mutex_locked(mutex); return 0; } if (err == EBUSY) { @@ -84,11 +84,10 @@ void qemu_mutex_unlock(QemuMutex *mutex) { int err; + trace_qemu_mutex_unlocked(mutex); err = pthread_mutex_unlock(&mutex->lock); if (err) error_exit(err, __func__); - - trace_qemu_mutex_unlock(&mutex->lock); } void qemu_rec_mutex_init(QemuRecMutex *mutex) @@ -145,7 +144,9 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) { int err; + trace_qemu_mutex_unlocked(mutex); err = pthread_cond_wait(&cond->cond, &mutex->lock); + trace_qemu_mutex_locked(mutex); if (err) error_exit(err, __func__); } diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index d3c87bc89e..0dc3ae7756 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -55,6 +55,7 @@ void qemu_mutex_destroy(QemuMutex *mutex) void qemu_mutex_lock(QemuMutex *mutex) { AcquireSRWLockExclusive(&mutex->lock); + trace_qemu_mutex_locked(mutex); } int qemu_mutex_trylock(QemuMutex *mutex) @@ -64,6 +64,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) owned = TryAcquireSRWLockExclusive(&mutex->lock); if (owned) { + trace_qemu_mutex_locked(mutex); return 0; } return -EBUSY; @@ -72,6 +72,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) void qemu_mutex_unlock(QemuMutex *mutex) { + trace_qemu_mutex_unlocked(mutex); ReleaseSRWLockExclusive(&mutex->lock); } @@ -124,7 +124,9 @@ void qemu_cond_broadcast(QemuCond *cond) void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) { + trace_qemu_mutex_unlocked(mutex); SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0); + trace_qemu_mutex_locked(mutex); } void qemu_sem_init(QemuSemaphore *sem, int init) diff --git a/util/trace-events b/util/trace-events index 70f62124e1..fa540c620b 100644 --- a/util/trace-events +++ b/util/trace-events @@ -57,5 +57,5 @@ lockcnt_futex_wait_resume(const void *lockcnt, int new) "lockcnt %p after wait: lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter" # util/qemu-thread-posix.c -qemu_mutex_lock(void *lock) "locked mutex %p" -qemu_mutex_unlock(void *lock) "unlocked mutex %p" +qemu_mutex_locked(void *lock) "locked mutex %p" +qemu_mutex_unlocked(void *lock) "unlocked mutex %p"
On Thu, Apr 27, 2017 at 10:55:04AM +0200, Paolo Bonzini wrote: > > > On 24/04/2017 19:19, Jose Ricardo Ziviani wrote: > > These trace events were very useful to help me to understand and find a > > reordering issue in vfio, for example: > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4) > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > qemu_mutex_lock locked mutex 0x10905ad8 > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4) > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > that also helped me to see the desired result after the fix: > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4) > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4) > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > So it could be a good idea to have these traces implemented. It's worth > > mentioning that they should be surgically enabled during the debugging, > > otherwise it can flood the trace logs with lock/unlock messages. > > > > How to use it: > > trace-event qemu_mutex_lock on|off > > trace-event qemu_mutex_unlock on|off > > or > > trace-event qemu_mutex* on|off > > > > Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> > > Some improvements: > > 1) handle trylock and Win32 too > > 2) pass mutex instead of &mutex->lock, it is the same but the latter is > unnecessarily obfuscated > > 3) also trace unlock/lock around cond_wait > > 4) trace "unlocked" before calling pthread_mutex_unlock, so that it is > always placed before the next "locked" tracepoint. I'm working on it Thanks for your review! > > diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c > index bf5756763d..46f4c08e6d 100644 > --- a/util/qemu-thread-posix.c > +++ b/util/qemu-thread-posix.c > @@ -62,7 +62,7 @@ void qemu_mutex_lock(QemuMutex *mutex) > if (err) > error_exit(err, __func__); > > - trace_qemu_mutex_lock(&mutex->lock); > + trace_qemu_mutex_locked(mutex); > } > > int qemu_mutex_trylock(QemuMutex *mutex) > @@ -71,7 +71,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > err = pthread_mutex_trylock(&mutex->lock); > if (err == 0) { > - trace_qemu_mutex_lock(&mutex->lock); > + trace_qemu_mutex_locked(mutex); > return 0; > } > if (err == EBUSY) { > @@ -84,11 +84,10 @@ void qemu_mutex_unlock(QemuMutex *mutex) > { > int err; > > + trace_qemu_mutex_unlocked(mutex); > err = pthread_mutex_unlock(&mutex->lock); > if (err) > error_exit(err, __func__); > - > - trace_qemu_mutex_unlock(&mutex->lock); > } > > void qemu_rec_mutex_init(QemuRecMutex *mutex) > @@ -145,7 +144,9 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > { > int err; > > + trace_qemu_mutex_unlocked(mutex); > err = pthread_cond_wait(&cond->cond, &mutex->lock); > + trace_qemu_mutex_locked(mutex); > if (err) > error_exit(err, __func__); > } > diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c > index d3c87bc89e..0dc3ae7756 100644 > --- a/util/qemu-thread-win32.c > +++ b/util/qemu-thread-win32.c > @@ -55,6 +55,7 @@ void qemu_mutex_destroy(QemuMutex *mutex) > void qemu_mutex_lock(QemuMutex *mutex) > { > AcquireSRWLockExclusive(&mutex->lock); > + trace_qemu_mutex_locked(mutex); > } > > int qemu_mutex_trylock(QemuMutex *mutex) > @@ -64,6 +64,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > owned = TryAcquireSRWLockExclusive(&mutex->lock); > if (owned) { > + trace_qemu_mutex_locked(mutex); > return 0; > } > return -EBUSY; > @@ -72,6 +72,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > void qemu_mutex_unlock(QemuMutex *mutex) > { > + trace_qemu_mutex_unlocked(mutex); > ReleaseSRWLockExclusive(&mutex->lock); > } > > @@ -124,7 +124,9 @@ void qemu_cond_broadcast(QemuCond *cond) > > void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > { > + trace_qemu_mutex_unlocked(mutex); > SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0); > + trace_qemu_mutex_locked(mutex); > } > > void qemu_sem_init(QemuSemaphore *sem, int init) > diff --git a/util/trace-events b/util/trace-events > index 70f62124e1..fa540c620b 100644 > --- a/util/trace-events > +++ b/util/trace-events > @@ -57,5 +57,5 @@ lockcnt_futex_wait_resume(const void *lockcnt, int > new) "lockcnt %p after wait: > lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter" > > # util/qemu-thread-posix.c > -qemu_mutex_lock(void *lock) "locked mutex %p" > -qemu_mutex_unlock(void *lock) "unlocked mutex %p" > +qemu_mutex_locked(void *lock) "locked mutex %p" > +qemu_mutex_unlocked(void *lock) "unlocked mutex %p" >
----- Original Message ----- > From: joserz@linux.vnet.ibm.com > To: "Paolo Bonzini" <pbonzini@redhat.com> > Cc: famz@redhat.com, qemu-devel@nongnu.org, stefanha@redhat.com > Sent: Thursday, April 27, 2017 4:59:26 PM > Subject: Re: [Qemu-devel] [PATCH v2] trace: add qemu mutex lock and unlock trace events > > On Thu, Apr 27, 2017 at 10:55:04AM +0200, Paolo Bonzini wrote: > > > > > > On 24/04/2017 19:19, Jose Ricardo Ziviani wrote: > > > These trace events were very useful to help me to understand and find a > > > reordering issue in vfio, for example: > > > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > > > that also helped me to see the desired result after the fix: > > > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4) > > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > > > So it could be a good idea to have these traces implemented. It's worth > > > mentioning that they should be surgically enabled during the debugging, > > > otherwise it can flood the trace logs with lock/unlock messages. > > > > > > How to use it: > > > trace-event qemu_mutex_lock on|off > > > trace-event qemu_mutex_unlock on|off > > > or > > > trace-event qemu_mutex* on|off > > > > > > Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> > > > > Some improvements: > > > > 1) handle trylock and Win32 too > > > > 2) pass mutex instead of &mutex->lock, it is the same but the latter is > > unnecessarily obfuscated > > > > 3) also trace unlock/lock around cond_wait > > > > 4) trace "unlocked" before calling pthread_mutex_unlock, so that it is > > always placed before the next "locked" tracepoint. > > I'm working on it > Thanks for your review! No need, if you agree with the change below I can just queue the modified patch. Paolo > > > > diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c > > index bf5756763d..46f4c08e6d 100644 > > --- a/util/qemu-thread-posix.c > > +++ b/util/qemu-thread-posix.c > > @@ -62,7 +62,7 @@ void qemu_mutex_lock(QemuMutex *mutex) > > if (err) > > error_exit(err, __func__); > > > > - trace_qemu_mutex_lock(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > } > > > > int qemu_mutex_trylock(QemuMutex *mutex) > > @@ -71,7 +71,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > err = pthread_mutex_trylock(&mutex->lock); > > if (err == 0) { > > - trace_qemu_mutex_lock(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > return 0; > > } > > if (err == EBUSY) { > > @@ -84,11 +84,10 @@ void qemu_mutex_unlock(QemuMutex *mutex) > > { > > int err; > > > > + trace_qemu_mutex_unlocked(mutex); > > err = pthread_mutex_unlock(&mutex->lock); > > if (err) > > error_exit(err, __func__); > > - > > - trace_qemu_mutex_unlock(&mutex->lock); > > } > > > > void qemu_rec_mutex_init(QemuRecMutex *mutex) > > @@ -145,7 +144,9 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > > { > > int err; > > > > + trace_qemu_mutex_unlocked(mutex); > > err = pthread_cond_wait(&cond->cond, &mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > if (err) > > error_exit(err, __func__); > > } > > diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c > > index d3c87bc89e..0dc3ae7756 100644 > > --- a/util/qemu-thread-win32.c > > +++ b/util/qemu-thread-win32.c > > @@ -55,6 +55,7 @@ void qemu_mutex_destroy(QemuMutex *mutex) > > void qemu_mutex_lock(QemuMutex *mutex) > > { > > AcquireSRWLockExclusive(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > } > > > > int qemu_mutex_trylock(QemuMutex *mutex) > > @@ -64,6 +64,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > owned = TryAcquireSRWLockExclusive(&mutex->lock); > > if (owned) { > > + trace_qemu_mutex_locked(mutex); > > return 0; > > } > > return -EBUSY; > > @@ -72,6 +72,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > void qemu_mutex_unlock(QemuMutex *mutex) > > { > > + trace_qemu_mutex_unlocked(mutex); > > ReleaseSRWLockExclusive(&mutex->lock); > > } > > > > @@ -124,7 +124,9 @@ void qemu_cond_broadcast(QemuCond *cond) > > > > void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > > { > > + trace_qemu_mutex_unlocked(mutex); > > SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0); > > + trace_qemu_mutex_locked(mutex); > > } > > > > void qemu_sem_init(QemuSemaphore *sem, int init) > > diff --git a/util/trace-events b/util/trace-events > > index 70f62124e1..fa540c620b 100644 > > --- a/util/trace-events > > +++ b/util/trace-events > > @@ -57,5 +57,5 @@ lockcnt_futex_wait_resume(const void *lockcnt, int > > new) "lockcnt %p after wait: > > lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter" > > > > # util/qemu-thread-posix.c > > -qemu_mutex_lock(void *lock) "locked mutex %p" > > -qemu_mutex_unlock(void *lock) "unlocked mutex %p" > > +qemu_mutex_locked(void *lock) "locked mutex %p" > > +qemu_mutex_unlocked(void *lock) "unlocked mutex %p" > > > > >
On Thu, Apr 27, 2017 at 11:59:26AM -0300, joserz@linux.vnet.ibm.com wrote: > On Thu, Apr 27, 2017 at 10:55:04AM +0200, Paolo Bonzini wrote: > > > > > > On 24/04/2017 19:19, Jose Ricardo Ziviani wrote: > > > These trace events were very useful to help me to understand and find a > > > reordering issue in vfio, for example: > > > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2020c, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xa0000, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > > > that also helped me to see the desired result after the fix: > > > > > > qemu_mutex_lock locked mutex 0x10905ad8 > > > vfio_region_write (0001:03:00.0:region1+0xc0, 0x2000c, 4) > > > vfio_region_write (0001:03:00.0:region1+0xc4, 0xb0000, 4) > > > qemu_mutex_unlock unlocked mutex 0x10905ad8 > > > > > > So it could be a good idea to have these traces implemented. It's worth > > > mentioning that they should be surgically enabled during the debugging, > > > otherwise it can flood the trace logs with lock/unlock messages. > > > > > > How to use it: > > > trace-event qemu_mutex_lock on|off > > > trace-event qemu_mutex_unlock on|off > > > or > > > trace-event qemu_mutex* on|off > > > > > > Signed-off-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> > > > > Some improvements: > > > > 1) handle trylock and Win32 too > > > > 2) pass mutex instead of &mutex->lock, it is the same but the latter is > > unnecessarily obfuscated > > > > 3) also trace unlock/lock around cond_wait > > > > 4) trace "unlocked" before calling pthread_mutex_unlock, so that it is > > always placed before the next "locked" tracepoint. > > I'm working on it > Thanks for your review! Ops! just saw you already did it. Thanks. Reviewed-by: Jose Ricardo Ziviani <joserz@linux.vnet.ibm.com> > > > > > diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c > > index bf5756763d..46f4c08e6d 100644 > > --- a/util/qemu-thread-posix.c > > +++ b/util/qemu-thread-posix.c > > @@ -62,7 +62,7 @@ void qemu_mutex_lock(QemuMutex *mutex) > > if (err) > > error_exit(err, __func__); > > > > - trace_qemu_mutex_lock(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > } > > > > int qemu_mutex_trylock(QemuMutex *mutex) > > @@ -71,7 +71,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > err = pthread_mutex_trylock(&mutex->lock); > > if (err == 0) { > > - trace_qemu_mutex_lock(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > return 0; > > } > > if (err == EBUSY) { > > @@ -84,11 +84,10 @@ void qemu_mutex_unlock(QemuMutex *mutex) > > { > > int err; > > > > + trace_qemu_mutex_unlocked(mutex); > > err = pthread_mutex_unlock(&mutex->lock); > > if (err) > > error_exit(err, __func__); > > - > > - trace_qemu_mutex_unlock(&mutex->lock); > > } > > > > void qemu_rec_mutex_init(QemuRecMutex *mutex) > > @@ -145,7 +144,9 @@ void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > > { > > int err; > > > > + trace_qemu_mutex_unlocked(mutex); > > err = pthread_cond_wait(&cond->cond, &mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > if (err) > > error_exit(err, __func__); > > } > > diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c > > index d3c87bc89e..0dc3ae7756 100644 > > --- a/util/qemu-thread-win32.c > > +++ b/util/qemu-thread-win32.c > > @@ -55,6 +55,7 @@ void qemu_mutex_destroy(QemuMutex *mutex) > > void qemu_mutex_lock(QemuMutex *mutex) > > { > > AcquireSRWLockExclusive(&mutex->lock); > > + trace_qemu_mutex_locked(mutex); > > } > > > > int qemu_mutex_trylock(QemuMutex *mutex) > > @@ -64,6 +64,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > owned = TryAcquireSRWLockExclusive(&mutex->lock); > > if (owned) { > > + trace_qemu_mutex_locked(mutex); > > return 0; > > } > > return -EBUSY; > > @@ -72,6 +72,7 @@ int qemu_mutex_trylock(QemuMutex *mutex) > > > > void qemu_mutex_unlock(QemuMutex *mutex) > > { > > + trace_qemu_mutex_unlocked(mutex); > > ReleaseSRWLockExclusive(&mutex->lock); > > } > > > > @@ -124,7 +124,9 @@ void qemu_cond_broadcast(QemuCond *cond) > > > > void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) > > { > > + trace_qemu_mutex_unlocked(mutex); > > SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0); > > + trace_qemu_mutex_locked(mutex); > > } > > > > void qemu_sem_init(QemuSemaphore *sem, int init) > > diff --git a/util/trace-events b/util/trace-events > > index 70f62124e1..fa540c620b 100644 > > --- a/util/trace-events > > +++ b/util/trace-events > > @@ -57,5 +57,5 @@ lockcnt_futex_wait_resume(const void *lockcnt, int > > new) "lockcnt %p after wait: > > lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter" > > > > # util/qemu-thread-posix.c > > -qemu_mutex_lock(void *lock) "locked mutex %p" > > -qemu_mutex_unlock(void *lock) "unlocked mutex %p" > > +qemu_mutex_locked(void *lock) "locked mutex %p" > > +qemu_mutex_unlocked(void *lock) "unlocked mutex %p" > >
© 2016 - 2024 Red Hat, Inc.