[PATCH v2] lockable: Replace locks with lock guard macros

Simran Singhal posted 1 patch 4 years, 5 months ago
Test docker-mingw@fedora passed
Test docker-quick@centos7 passed
Test checkpatch passed
Test FreeBSD passed
Test asan passed
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20200402065035.GA15477@simran-Inspiron-5558
Maintainers: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>, Yuval Shaia <yuval.shaia.ml@gmail.com>
hw/hyperv/hyperv.c     | 15 ++++++-------
hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
hw/rdma/rdma_rm.c      |  3 +--
3 files changed, 33 insertions(+), 35 deletions(-)
[PATCH v2] lockable: Replace locks with lock guard macros
Posted by Simran Singhal 4 years, 5 months ago
Replace manual lock()/unlock() calls with lock guard macros
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).

Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
---
Changes in v2:
        -Drop changes in file hw/rdma/rdma_utils.c

 hw/hyperv/hyperv.c     | 15 ++++++-------
 hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
 hw/rdma/rdma_rm.c      |  3 +--
 3 files changed, 33 insertions(+), 35 deletions(-)

diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
index 8ca3706f5b..4ddafe1de1 100644
--- a/hw/hyperv/hyperv.c
+++ b/hw/hyperv/hyperv.c
@@ -15,6 +15,7 @@
 #include "sysemu/kvm.h"
 #include "qemu/bitops.h"
 #include "qemu/error-report.h"
+#include "qemu/lockable.h"
 #include "qemu/queue.h"
 #include "qemu/rcu.h"
 #include "qemu/rcu_queue.h"
@@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
     int ret;
     MsgHandler *mh;
 
-    qemu_mutex_lock(&handlers_mutex);
+    QEMU_LOCK_GUARD(&handlers_mutex);
     QLIST_FOREACH(mh, &msg_handlers, link) {
         if (mh->conn_id == conn_id) {
             if (handler) {
@@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
                 g_free_rcu(mh, rcu);
                 ret = 0;
             }
-            goto unlock;
+            return ret;
         }
     }
 
@@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
     } else {
         ret = -ENOENT;
     }
-unlock:
-    qemu_mutex_unlock(&handlers_mutex);
+
     return ret;
 }
 
@@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
     int ret;
     EventFlagHandler *handler;
 
-    qemu_mutex_lock(&handlers_mutex);
+    QEMU_LOCK_GUARD(&handlers_mutex);
     QLIST_FOREACH(handler, &event_flag_handlers, link) {
         if (handler->conn_id == conn_id) {
             if (notifier) {
@@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
                 g_free_rcu(handler, rcu);
                 ret = 0;
             }
-            goto unlock;
+            return ret;
         }
     }
 
@@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
     } else {
         ret = -ENOENT;
     }
-unlock:
-    qemu_mutex_unlock(&handlers_mutex);
+
     return ret;
 }
 
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 3dd39fe1a7..db7e5c8be5 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
     struct ibv_wc wc[2];
     RdmaProtectedGSList *cqe_ctx_list;
 
-    qemu_mutex_lock(&rdma_dev_res->lock);
-    do {
-        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
+    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
+        do {
+            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
 
-        trace_rdma_poll_cq(ne, ibcq);
+            trace_rdma_poll_cq(ne, ibcq);
 
-        for (i = 0; i < ne; i++) {
-            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
-            if (unlikely(!bctx)) {
-                rdma_error_report("No matching ctx for req %"PRId64,
-                                  wc[i].wr_id);
-                continue;
-            }
+            for (i = 0; i < ne; i++) {
+                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+                if (unlikely(!bctx)) {
+                    rdma_error_report("No matching ctx for req %"PRId64,
+                                      wc[i].wr_id);
+                    continue;
+                }
 
-            comp_handler(bctx->up_ctx, &wc[i]);
+                comp_handler(bctx->up_ctx, &wc[i]);
 
-            if (bctx->backend_qp) {
-                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
-            } else {
-                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
-            }
+                if (bctx->backend_qp) {
+                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
+                } else {
+                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
+                }
 
-            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
-            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
-            g_free(bctx);
-        }
-        total_ne += ne;
-    } while (ne > 0);
-    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
-    qemu_mutex_unlock(&rdma_dev_res->lock);
+                rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
+                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
+                g_free(bctx);
+            }
+            total_ne += ne;
+        } while (ne > 0);
+        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
+    }
 
     if (ne < 0) {
         rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
index 7e9ea283c9..60957f88db 100644
--- a/hw/rdma/rdma_rm.c
+++ b/hw/rdma/rdma_rm.c
@@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
 {
     trace_rdma_res_tbl_dealloc(tbl->name, handle);
 
-    qemu_mutex_lock(&tbl->lock);
+    QEMU_LOCK_GUARD(&tbl->lock);
 
     if (handle < tbl->tbl_sz) {
         clear_bit(handle, tbl->bitmap);
         tbl->used--;
     }
 
-    qemu_mutex_unlock(&tbl->lock);
 }
 
 int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
-- 
2.17.1


Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Yuval Shaia 4 years, 5 months ago
On Thu, 2 Apr 2020 at 09:50, Simran Singhal <singhalsimran0@gmail.com>
wrote:

> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
>         -Drop changes in file hw/rdma/rdma_utils.c
>

So i guess we are expected to see this back soon, right?

Ignore my r-b and t-b for v1, i did not encounter the build errors, this
one is okay too.

For the hw/rdma stuff:

Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>

Thanks,
Yuval


>
>  hw/hyperv/hyperv.c     | 15 ++++++-------
>  hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>  hw/rdma/rdma_rm.c      |  3 +--
>  3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
>  #include "sysemu/kvm.h"
>  #include "qemu/bitops.h"
>  #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
>  #include "qemu/queue.h"
>  #include "qemu/rcu.h"
>  #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>      int ret;
>      MsgHandler *mh;
>
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>      QLIST_FOREACH(mh, &msg_handlers, link) {
>          if (mh->conn_id == conn_id) {
>              if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>                  g_free_rcu(mh, rcu);
>                  ret = 0;
>              }
> -            goto unlock;
> +            return ret;
>          }
>      }
>
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id,
> HvMsgHandler handler, void *data)
>      } else {
>          ret = -ENOENT;
>      }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>      return ret;
>  }
>
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>      int ret;
>      EventFlagHandler *handler;
>
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>      QLIST_FOREACH(handler, &event_flag_handlers, link) {
>          if (handler->conn_id == conn_id) {
>              if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>                  g_free_rcu(handler, rcu);
>                  ret = 0;
>              }
> -            goto unlock;
> +            return ret;
>          }
>      }
>
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id,
> EventNotifier *notifier)
>      } else {
>          ret = -ENOENT;
>      }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>      return ret;
>  }
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources
> *rdma_dev_res, struct ibv_cq *ibcq)
>      struct ibv_wc wc[2];
>      RdmaProtectedGSList *cqe_ctx_list;
>
> -    qemu_mutex_lock(&rdma_dev_res->lock);
> -    do {
> -        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> +    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> +        do {
> +            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>
> -        trace_rdma_poll_cq(ne, ibcq);
> +            trace_rdma_poll_cq(ne, ibcq);
>
> -        for (i = 0; i < ne; i++) {
> -            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            if (unlikely(!bctx)) {
> -                rdma_error_report("No matching ctx for req %"PRId64,
> -                                  wc[i].wr_id);
> -                continue;
> -            }
> +            for (i = 0; i < ne; i++) {
> +                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                if (unlikely(!bctx)) {
> +                    rdma_error_report("No matching ctx for req %"PRId64,
> +                                      wc[i].wr_id);
> +                    continue;
> +                }
>
> -            comp_handler(bctx->up_ctx, &wc[i]);
> +                comp_handler(bctx->up_ctx, &wc[i]);
>
> -            if (bctx->backend_qp) {
> -                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> -            } else {
> -                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> -            }
> +                if (bctx->backend_qp) {
> +                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> +                } else {
> +                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> +                }
>
> -            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> -            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            g_free(bctx);
> -        }
> -        total_ne += ne;
> -    } while (ne > 0);
> -    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> -    qemu_mutex_unlock(&rdma_dev_res->lock);
> +                rdma_protected_gslist_remove_int32(cqe_ctx_list,
> wc[i].wr_id);
> +                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                g_free(bctx);
> +            }
> +            total_ne += ne;
> +        } while (ne > 0);
> +        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> +    }
>
>      if (ne < 0) {
>          rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl
> *tbl, uint32_t handle)
>  {
>      trace_rdma_res_tbl_dealloc(tbl->name, handle);
>
> -    qemu_mutex_lock(&tbl->lock);
> +    QEMU_LOCK_GUARD(&tbl->lock);
>
>      if (handle < tbl->tbl_sz) {
>          clear_bit(handle, tbl->bitmap);
>          tbl->used--;
>      }
>
> -    qemu_mutex_unlock(&tbl->lock);
>  }
>
>  int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev
> *backend_dev,
> --
> 2.17.1
>
>
Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Marcel Apfelbaum 4 years, 5 months ago
Hi Simran,

On 4/2/20 9:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
>          -Drop changes in file hw/rdma/rdma_utils.c
>
>   hw/hyperv/hyperv.c     | 15 ++++++-------
>   hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>   hw/rdma/rdma_rm.c      |  3 +--
>   3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
>   #include "sysemu/kvm.h"
>   #include "qemu/bitops.h"
>   #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
>   #include "qemu/queue.h"
>   #include "qemu/rcu.h"
>   #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       int ret;
>       MsgHandler *mh;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>       QLIST_FOREACH(mh, &msg_handlers, link) {
>           if (mh->conn_id == conn_id) {
>               if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>                   g_free_rcu(mh, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       int ret;
>       EventFlagHandler *handler;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>       QLIST_FOREACH(handler, &event_flag_handlers, link) {
>           if (handler->conn_id == conn_id) {
>               if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>                   g_free_rcu(handler, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
>       struct ibv_wc wc[2];
>       RdmaProtectedGSList *cqe_ctx_list;
>   
> -    qemu_mutex_lock(&rdma_dev_res->lock);
> -    do {
> -        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> +    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> +        do {
> +            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>   
> -        trace_rdma_poll_cq(ne, ibcq);
> +            trace_rdma_poll_cq(ne, ibcq);
>   
> -        for (i = 0; i < ne; i++) {
> -            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            if (unlikely(!bctx)) {
> -                rdma_error_report("No matching ctx for req %"PRId64,
> -                                  wc[i].wr_id);
> -                continue;
> -            }
> +            for (i = 0; i < ne; i++) {
> +                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                if (unlikely(!bctx)) {
> +                    rdma_error_report("No matching ctx for req %"PRId64,
> +                                      wc[i].wr_id);
> +                    continue;
> +                }
>   
> -            comp_handler(bctx->up_ctx, &wc[i]);
> +                comp_handler(bctx->up_ctx, &wc[i]);
>   
> -            if (bctx->backend_qp) {
> -                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> -            } else {
> -                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> -            }
> +                if (bctx->backend_qp) {
> +                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> +                } else {
> +                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> +                }
>   
> -            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> -            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            g_free(bctx);
> -        }
> -        total_ne += ne;
> -    } while (ne > 0);
> -    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> -    qemu_mutex_unlock(&rdma_dev_res->lock);
> +                rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> +                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                g_free(bctx);
> +            }
> +            total_ne += ne;
> +        } while (ne > 0);
> +        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> +    }
>   
>       if (ne < 0) {
>           rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
>   {
>       trace_rdma_res_tbl_dealloc(tbl->name, handle);
>   
> -    qemu_mutex_lock(&tbl->lock);
> +    QEMU_LOCK_GUARD(&tbl->lock);
>   
>       if (handle < tbl->tbl_sz) {
>           clear_bit(handle, tbl->bitmap);
>           tbl->used--;
>       }
>   
> -    qemu_mutex_unlock(&tbl->lock);
>   }
>   
>   int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,

Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>

Thanks,
Marcel


Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Philippe Mathieu-Daudé 4 years, 5 months ago
On 4/2/20 8:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
> 
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---

You can give a hint to reviewers by adding:

This patch is based on:
"Replace locks with lock guard macros"
https://www.mail-archive.com/qemu-devel@nongnu.org/msg694412.html

Similar hint to build bots:
Based-on: <20200404042108.389635-1-dnbrdsky@gmail.com>

You should Cc Paolo Bonzini on your v3, since he'll likely queue the 
base series.

Patch looks good otherwise.

> Changes in v2:
>          -Drop changes in file hw/rdma/rdma_utils.c
> 
>   hw/hyperv/hyperv.c     | 15 ++++++-------
>   hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>   hw/rdma/rdma_rm.c      |  3 +--
>   3 files changed, 33 insertions(+), 35 deletions(-)
> 
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
>   #include "sysemu/kvm.h"
>   #include "qemu/bitops.h"
>   #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
>   #include "qemu/queue.h"
>   #include "qemu/rcu.h"
>   #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       int ret;
>       MsgHandler *mh;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>       QLIST_FOREACH(mh, &msg_handlers, link) {
>           if (mh->conn_id == conn_id) {
>               if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>                   g_free_rcu(mh, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       int ret;
>       EventFlagHandler *handler;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>       QLIST_FOREACH(handler, &event_flag_handlers, link) {
>           if (handler->conn_id == conn_id) {
>               if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>                   g_free_rcu(handler, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
>       struct ibv_wc wc[2];
>       RdmaProtectedGSList *cqe_ctx_list;
>   
> -    qemu_mutex_lock(&rdma_dev_res->lock);
> -    do {
> -        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> +    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> +        do {
> +            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>   
> -        trace_rdma_poll_cq(ne, ibcq);
> +            trace_rdma_poll_cq(ne, ibcq);
>   
> -        for (i = 0; i < ne; i++) {
> -            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            if (unlikely(!bctx)) {
> -                rdma_error_report("No matching ctx for req %"PRId64,
> -                                  wc[i].wr_id);
> -                continue;
> -            }
> +            for (i = 0; i < ne; i++) {
> +                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                if (unlikely(!bctx)) {
> +                    rdma_error_report("No matching ctx for req %"PRId64,
> +                                      wc[i].wr_id);
> +                    continue;
> +                }
>   
> -            comp_handler(bctx->up_ctx, &wc[i]);
> +                comp_handler(bctx->up_ctx, &wc[i]);
>   
> -            if (bctx->backend_qp) {
> -                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> -            } else {
> -                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> -            }
> +                if (bctx->backend_qp) {
> +                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> +                } else {
> +                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> +                }
>   
> -            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> -            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            g_free(bctx);
> -        }
> -        total_ne += ne;
> -    } while (ne > 0);
> -    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> -    qemu_mutex_unlock(&rdma_dev_res->lock);
> +                rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> +                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                g_free(bctx);
> +            }
> +            total_ne += ne;
> +        } while (ne > 0);
> +        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> +    }
>   
>       if (ne < 0) {
>           rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
>   {
>       trace_rdma_res_tbl_dealloc(tbl->name, handle);
>   
> -    qemu_mutex_lock(&tbl->lock);
> +    QEMU_LOCK_GUARD(&tbl->lock);
>   
>       if (handle < tbl->tbl_sz) {
>           clear_bit(handle, tbl->bitmap);
>           tbl->used--;
>       }
>   
> -    qemu_mutex_unlock(&tbl->lock);
>   }
>   
>   int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
> 


Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Stefan Hajnoczi 4 years, 5 months ago
On Thu, Apr 02, 2020 at 12:20:35PM +0530, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
> 
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
>         -Drop changes in file hw/rdma/rdma_utils.c
> 
>  hw/hyperv/hyperv.c     | 15 ++++++-------
>  hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>  hw/rdma/rdma_rm.c      |  3 +--
>  3 files changed, 33 insertions(+), 35 deletions(-)

Thanks, applied to my block-next tree:
https://github.com/stefanha/qemu/commits/block-next

Stefan
Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Marcel Apfelbaum 4 years, 5 months ago
Hi Simran,

On 4/2/20 9:50 AM, Simran Singhal wrote:
> Replace manual lock()/unlock() calls with lock guard macros
> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>
> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> ---
> Changes in v2:
>          -Drop changes in file hw/rdma/rdma_utils.c
>
>   hw/hyperv/hyperv.c     | 15 ++++++-------
>   hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>   hw/rdma/rdma_rm.c      |  3 +--
>   3 files changed, 33 insertions(+), 35 deletions(-)
>
> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> index 8ca3706f5b..4ddafe1de1 100644
> --- a/hw/hyperv/hyperv.c
> +++ b/hw/hyperv/hyperv.c
> @@ -15,6 +15,7 @@
>   #include "sysemu/kvm.h"
>   #include "qemu/bitops.h"
>   #include "qemu/error-report.h"
> +#include "qemu/lockable.h"
>   #include "qemu/queue.h"
>   #include "qemu/rcu.h"
>   #include "qemu/rcu_queue.h"
> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       int ret;
>       MsgHandler *mh;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);

It does not passes compilation:
     export ARCH=x86_64
     make docker-image-fedora V=1 NETWORK=1
     make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1

Error:

  CC      x86_64-softmmu/hw/net/virtio-net.o
/tmp/qemu-test/src/hw/hyperv/hyperv.c:495:5: error: unused variable 
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
     QEMU_LOCK_GUARD(&handlers_mutex);
     ^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from 
macro 'QEMU_LOCK_GUARD'
     g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
                             ^
<scratch space>:24:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
/tmp/qemu-test/src/hw/hyperv/hyperv.c:568:5: error: unused variable 
'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
     QEMU_LOCK_GUARD(&handlers_mutex);
     ^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from 
macro 'QEMU_LOCK_GUARD'
     g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
                             ^
<scratch space>:39:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
2 errors generated.

I suggest splitting it into an pvrdma path and hyperv patch anyway.
It will be nice to get an ack from the hyperv maintainer, adding Roman.

Thanks,
Marcel


>       QLIST_FOREACH(mh, &msg_handlers, link) {
>           if (mh->conn_id == conn_id) {
>               if (handler) {
> @@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>                   g_free_rcu(mh, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> @@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       int ret;
>       EventFlagHandler *handler;
>   
> -    qemu_mutex_lock(&handlers_mutex);
> +    QEMU_LOCK_GUARD(&handlers_mutex);
>       QLIST_FOREACH(handler, &event_flag_handlers, link) {
>           if (handler->conn_id == conn_id) {
>               if (notifier) {
> @@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>                   g_free_rcu(handler, rcu);
>                   ret = 0;
>               }
> -            goto unlock;
> +            return ret;
>           }
>       }
>   
> @@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
>       } else {
>           ret = -ENOENT;
>       }
> -unlock:
> -    qemu_mutex_unlock(&handlers_mutex);
> +
>       return ret;
>   }
>   
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index 3dd39fe1a7..db7e5c8be5 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
>       struct ibv_wc wc[2];
>       RdmaProtectedGSList *cqe_ctx_list;
>   
> -    qemu_mutex_lock(&rdma_dev_res->lock);
> -    do {
> -        ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
> +    WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
> +        do {
> +            ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
>   
> -        trace_rdma_poll_cq(ne, ibcq);
> +            trace_rdma_poll_cq(ne, ibcq);
>   
> -        for (i = 0; i < ne; i++) {
> -            bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            if (unlikely(!bctx)) {
> -                rdma_error_report("No matching ctx for req %"PRId64,
> -                                  wc[i].wr_id);
> -                continue;
> -            }
> +            for (i = 0; i < ne; i++) {
> +                bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                if (unlikely(!bctx)) {
> +                    rdma_error_report("No matching ctx for req %"PRId64,
> +                                      wc[i].wr_id);
> +                    continue;
> +                }
>   
> -            comp_handler(bctx->up_ctx, &wc[i]);
> +                comp_handler(bctx->up_ctx, &wc[i]);
>   
> -            if (bctx->backend_qp) {
> -                cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> -            } else {
> -                cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> -            }
> +                if (bctx->backend_qp) {
> +                    cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
> +                } else {
> +                    cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
> +                }
>   
> -            rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> -            rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> -            g_free(bctx);
> -        }
> -        total_ne += ne;
> -    } while (ne > 0);
> -    atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> -    qemu_mutex_unlock(&rdma_dev_res->lock);
> +                rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
> +                rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
> +                g_free(bctx);
> +            }
> +            total_ne += ne;
> +        } while (ne > 0);
> +        atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
> +    }
>   
>       if (ne < 0) {
>           rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
> diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
> index 7e9ea283c9..60957f88db 100644
> --- a/hw/rdma/rdma_rm.c
> +++ b/hw/rdma/rdma_rm.c
> @@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
>   {
>       trace_rdma_res_tbl_dealloc(tbl->name, handle);
>   
> -    qemu_mutex_lock(&tbl->lock);
> +    QEMU_LOCK_GUARD(&tbl->lock);
>   
>       if (handle < tbl->tbl_sz) {
>           clear_bit(handle, tbl->bitmap);
>           tbl->used--;
>       }
>   
> -    qemu_mutex_unlock(&tbl->lock);
>   }
>   
>   int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,


Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Julia Suvorova 4 years, 5 months ago
On Sat, Apr 18, 2020 at 2:03 PM Marcel Apfelbaum
<marcel.apfelbaum@gmail.com> wrote:
>
> Hi Simran,
>
> On 4/2/20 9:50 AM, Simran Singhal wrote:
> > Replace manual lock()/unlock() calls with lock guard macros
> > (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
> >
> > Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
> > ---
> > Changes in v2:
> >          -Drop changes in file hw/rdma/rdma_utils.c
> >
> >   hw/hyperv/hyperv.c     | 15 ++++++-------
> >   hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
> >   hw/rdma/rdma_rm.c      |  3 +--
> >   3 files changed, 33 insertions(+), 35 deletions(-)
> >
> > diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
> > index 8ca3706f5b..4ddafe1de1 100644
> > --- a/hw/hyperv/hyperv.c
> > +++ b/hw/hyperv/hyperv.c
> > @@ -15,6 +15,7 @@
> >   #include "sysemu/kvm.h"
> >   #include "qemu/bitops.h"
> >   #include "qemu/error-report.h"
> > +#include "qemu/lockable.h"
> >   #include "qemu/queue.h"
> >   #include "qemu/rcu.h"
> >   #include "qemu/rcu_queue.h"
> > @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
> >       int ret;
> >       MsgHandler *mh;
> >
> > -    qemu_mutex_lock(&handlers_mutex);
> > +    QEMU_LOCK_GUARD(&handlers_mutex);
>
> It does not passes compilation:
>      export ARCH=x86_64
>      make docker-image-fedora V=1 NETWORK=1
>      make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1

This is a problem with the macros themselves, not with this patch.
This is fixed (concat '##' problem and warnings) in
  [PATCH v5 0/2] Replaced locks with lock guard macros
and the patch should be based on it.

Best regards, Julia Suvorova.

Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by Marcel Apfelbaum 4 years, 5 months ago
Hi Julia,

On 4/19/20 5:46 AM, Julia Suvorova wrote:
> On Sat, Apr 18, 2020 at 2:03 PM Marcel Apfelbaum
> <marcel.apfelbaum@gmail.com> wrote:
>> Hi Simran,
>>
>> On 4/2/20 9:50 AM, Simran Singhal wrote:
>>> Replace manual lock()/unlock() calls with lock guard macros
>>> (QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
>>>
>>> Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
>>> ---
>>> Changes in v2:
>>>           -Drop changes in file hw/rdma/rdma_utils.c
>>>
>>>    hw/hyperv/hyperv.c     | 15 ++++++-------
>>>    hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
>>>    hw/rdma/rdma_rm.c      |  3 +--
>>>    3 files changed, 33 insertions(+), 35 deletions(-)
>>>
>>> diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
>>> index 8ca3706f5b..4ddafe1de1 100644
>>> --- a/hw/hyperv/hyperv.c
>>> +++ b/hw/hyperv/hyperv.c
>>> @@ -15,6 +15,7 @@
>>>    #include "sysemu/kvm.h"
>>>    #include "qemu/bitops.h"
>>>    #include "qemu/error-report.h"
>>> +#include "qemu/lockable.h"
>>>    #include "qemu/queue.h"
>>>    #include "qemu/rcu.h"
>>>    #include "qemu/rcu_queue.h"
>>> @@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
>>>        int ret;
>>>        MsgHandler *mh;
>>>
>>> -    qemu_mutex_lock(&handlers_mutex);
>>> +    QEMU_LOCK_GUARD(&handlers_mutex);
>> It does not passes compilation:
>>       export ARCH=x86_64
>>       make docker-image-fedora V=1 NETWORK=1
>>       make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu NETWORK=1
> This is a problem with the macros themselves, not with this patch.
> This is fixed (concat '##' problem and warnings) in
>    [PATCH v5 0/2] Replaced locks with lock guard macros
> and the patch should be based on it.

Thanks for the pointer.
I'll wait for the fix to be merged then.

Thanks,
Marcel

>
> Best regards, Julia Suvorova.


Re: [PATCH v2] lockable: Replace locks with lock guard macros
Posted by no-reply@patchew.org 4 years, 5 months ago
Patchew URL: https://patchew.org/QEMU/20200402065035.GA15477@simran-Inspiron-5558/



Hi,

This series failed the asan build test. Please find the testing commands and
their output below. If you have Docker installed, you can probably reproduce it
locally.

=== TEST SCRIPT BEGIN ===
#!/bin/bash
export ARCH=x86_64
make docker-image-fedora V=1 NETWORK=1
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
=== TEST SCRIPT END ===

  CC      x86_64-softmmu/hw/i386/pc_piix.o
  CC      x86_64-softmmu/hw/i386/pc_q35.o
  CC      x86_64-softmmu/hw/i386/microvm.o
/tmp/qemu-test/src/hw/hyperv/hyperv.c:495:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
    QEMU_LOCK_GUARD(&handlers_mutex);
    ^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
<scratch space>:24:1: note: expanded from here
qemu_lockable_auto__COUNTER__
^
/tmp/qemu-test/src/hw/hyperv/hyperv.c:568:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
    QEMU_LOCK_GUARD(&handlers_mutex);
    ^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
qemu_lockable_auto__COUNTER__
^
2 errors generated.
make[1]: *** [/tmp/qemu-test/src/rules.mak:69: hw/hyperv/hyperv.o] Error 1
make[1]: *** Waiting for unfinished jobs....
/tmp/qemu-test/src/hw/rdma/rdma_rm.c:150:5: error: unused variable 'qemu_lockable_auto__COUNTER__' [-Werror,-Wunused-variable]
    QEMU_LOCK_GUARD(&tbl->lock);
    ^
/tmp/qemu-test/src/include/qemu/lockable.h:173:29: note: expanded from macro 'QEMU_LOCK_GUARD'
---
qemu_lockable_auto__COUNTER__
^
1 error generated.
make[1]: *** [/tmp/qemu-test/src/rules.mak:69: hw/rdma/rdma_rm.o] Error 1
make: *** [Makefile:527: x86_64-softmmu/all] Error 2
Traceback (most recent call last):
  File "./tests/docker/docker.py", line 664, in <module>
    sys.exit(main())
---
    raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['sudo', '-n', 'docker', 'run', '--label', 'com.qemu.instance.uuid=528913c5fbdd4e42bf1946168442f34c', '-u', '1003', '--security-opt', 'seccomp=unconfined', '--rm', '-e', 'TARGET_LIST=x86_64-softmmu', '-e', 'EXTRA_CONFIGURE_OPTS=', '-e', 'V=', '-e', 'J=14', '-e', 'DEBUG=', '-e', 'SHOW_ENV=', '-e', 'CCACHE_DIR=/var/tmp/ccache', '-v', '/home/patchew2/.cache/qemu-docker-ccache:/var/tmp/ccache:z', '-v', '/var/tmp/patchew-tester-tmp-hg7wzjtd/src/docker-src.2020-04-02-03.43.13.4475:/var/tmp/qemu:z,ro', 'qemu:fedora', '/var/tmp/qemu/run', 'test-debug']' returned non-zero exit status 2.
filter=--filter=label=com.qemu.instance.uuid=528913c5fbdd4e42bf1946168442f34c
make[1]: *** [docker-run] Error 1
make[1]: Leaving directory `/var/tmp/patchew-tester-tmp-hg7wzjtd/src'
make: *** [docker-run-test-debug@fedora] Error 2

real    3m40.193s
user    0m8.084s


The full log is available at
http://patchew.org/logs/20200402065035.GA15477@simran-Inspiron-5558/testing.asan/?type=message.
---
Email generated automatically by Patchew [https://patchew.org/].
Please send your feedback to patchew-devel@redhat.com