[PATCH 05/43] esx: convert virMutex to GMutex

Rafael Fonseca posted 43 patches 5 years, 10 months ago
[PATCH 05/43] esx: convert virMutex to GMutex
Posted by Rafael Fonseca 5 years, 10 months ago
Signed-off-by: Rafael Fonseca <r4f4rfs@gmail.com>
---
 src/esx/esx_stream.c | 25 +++++++---------
 src/esx/esx_vi.c     | 68 +++++++++++++++-----------------------------
 src/esx/esx_vi.h     |  6 ++--
 3 files changed, 36 insertions(+), 63 deletions(-)

diff --git a/src/esx/esx_stream.c b/src/esx/esx_stream.c
index fe3c42ae02..474c0f4739 100644
--- a/src/esx/esx_stream.c
+++ b/src/esx/esx_stream.c
@@ -203,6 +203,7 @@ esxStreamSend(virStreamPtr stream, const char *data, size_t nbytes)
     int result = -1;
     esxStreamPrivate *priv = stream->privateData;
     int status;
+    g_autoptr(GMutexLocker) locker = NULL;
 
     if (nbytes == 0)
         return 0;
@@ -217,7 +218,7 @@ esxStreamSend(virStreamPtr stream, const char *data, size_t nbytes)
         return -1;
     }
 
-    virMutexLock(&priv->curl->lock);
+    locker = g_mutex_locker_new(&priv->curl->lock);
 
     priv->buffer = (char *)data;
     priv->buffer_size = nbytes;
@@ -225,7 +226,7 @@ esxStreamSend(virStreamPtr stream, const char *data, size_t nbytes)
 
     if (stream->flags & VIR_STREAM_NONBLOCK) {
         if (esxStreamTransfer(priv, false) < 0)
-            goto cleanup;
+            return -1;
 
         if (priv->buffer_used < priv->buffer_size)
             result = priv->buffer_size - priv->buffer_used;
@@ -236,7 +237,7 @@ esxStreamSend(virStreamPtr stream, const char *data, size_t nbytes)
             status = esxStreamTransfer(priv, true);
 
             if (status < 0)
-                goto cleanup;
+                return -1;
 
             if (status > 0)
                 break;
@@ -245,9 +246,6 @@ esxStreamSend(virStreamPtr stream, const char *data, size_t nbytes)
         result = priv->buffer_size - priv->buffer_used;
     }
 
- cleanup:
-    virMutexUnlock(&priv->curl->lock);
-
     return result;
 }
 
@@ -260,6 +258,7 @@ esxStreamRecvFlags(virStreamPtr stream,
     int result = -1;
     esxStreamPrivate *priv = stream->privateData;
     int status;
+    g_autoptr(GMutexLocker) locker = NULL;
 
     virCheckFlags(0, -1);
 
@@ -276,7 +275,7 @@ esxStreamRecvFlags(virStreamPtr stream,
         return -1;
     }
 
-    virMutexLock(&priv->curl->lock);
+    locker = g_mutex_locker_new(&priv->curl->lock);
 
     priv->buffer = data;
     priv->buffer_size = nbytes;
@@ -296,7 +295,7 @@ esxStreamRecvFlags(virStreamPtr stream,
         result = priv->buffer_used;
     } else if (stream->flags & VIR_STREAM_NONBLOCK) {
         if (esxStreamTransfer(priv, false) < 0)
-            goto cleanup;
+            return -1;
 
         if (priv->buffer_used > 0)
             result = priv->buffer_used;
@@ -307,7 +306,7 @@ esxStreamRecvFlags(virStreamPtr stream,
             status = esxStreamTransfer(priv, true);
 
             if (status < 0)
-                goto cleanup;
+                return -1;
 
             if (status > 0)
                 break;
@@ -316,9 +315,6 @@ esxStreamRecvFlags(virStreamPtr stream,
         result = priv->buffer_used;
     }
 
- cleanup:
-    virMutexUnlock(&priv->curl->lock);
-
     return result;
 }
 
@@ -346,11 +342,12 @@ esxStreamClose(virStreamPtr stream, bool finish)
 {
     int result = 0;
     esxStreamPrivate *priv = stream->privateData;
+    g_autoptr(GMutexLocker) locker = NULL;
 
     if (!priv)
         return 0;
 
-    virMutexLock(&priv->curl->lock);
+    locker = g_mutex_locker_new(&priv->curl->lock);
 
     if (finish && priv->backlog_used > 0) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@@ -360,8 +357,6 @@ esxStreamClose(virStreamPtr stream, bool finish)
 
     stream->privateData = NULL;
 
-    virMutexUnlock(&priv->curl->lock);
-
     esxFreeStreamPrivate(&priv);
 
     return result;
diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
index 16690edfbe..ed6c6c28cd 100644
--- a/src/esx/esx_vi.c
+++ b/src/esx/esx_vi.c
@@ -112,7 +112,7 @@ ESX_VI__TEMPLATE__FREE(CURL,
     if (item->headers)
         curl_slist_free_all(item->headers);
 
-    virMutexDestroy(&item->lock);
+    g_mutex_clear(&item->lock);
 })
 
 static size_t
@@ -356,11 +356,7 @@ esxVI_CURL_Connect(esxVI_CURL *curl, esxUtil_ParsedUri *parsedUri)
                          parsedUri->proxy_port);
     }
 
-    if (virMutexInit(&curl->lock) < 0) {
-        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                       _("Could not initialize CURL mutex"));
-        return -1;
-    }
+    g_mutex_init(&curl->lock);
 
     return 0;
 }
@@ -392,7 +388,7 @@ esxVI_CURL_Download(esxVI_CURL *curl, const char *url, char **content,
         range = g_strdup_printf("%llu-", offset);
     }
 
-    virMutexLock(&curl->lock);
+    g_mutex_lock(&curl->lock);
 
     curl_easy_setopt(curl->handle, CURLOPT_URL, url);
     curl_easy_setopt(curl->handle, CURLOPT_RANGE, range);
@@ -402,7 +398,7 @@ esxVI_CURL_Download(esxVI_CURL *curl, const char *url, char **content,
 
     responseCode = esxVI_CURL_Perform(curl, url);
 
-    virMutexUnlock(&curl->lock);
+    g_mutex_unlock(&curl->lock);
 
     if (responseCode < 0) {
         goto cleanup;
@@ -433,14 +429,13 @@ int
 esxVI_CURL_Upload(esxVI_CURL *curl, const char *url, const char *content)
 {
     int responseCode = 0;
+    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
 
     if (!content) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid argument"));
         return -1;
     }
 
-    virMutexLock(&curl->lock);
-
     curl_easy_setopt(curl->handle, CURLOPT_URL, url);
     curl_easy_setopt(curl->handle, CURLOPT_RANGE, NULL);
     curl_easy_setopt(curl->handle, CURLOPT_READDATA, &content);
@@ -449,8 +444,6 @@ esxVI_CURL_Upload(esxVI_CURL *curl, const char *url, const char *content)
 
     responseCode = esxVI_CURL_Perform(curl, url);
 
-    virMutexUnlock(&curl->lock);
-
     if (responseCode < 0) {
         return -1;
     } else if (responseCode != 200 && responseCode != 201) {
@@ -494,7 +487,7 @@ esxVI_SharedCURL_Lock(CURL *handle G_GNUC_UNUSED, curl_lock_data data,
         return;
     }
 
-    virMutexLock(&shared->locks[i]);
+    g_mutex_lock(&shared->locks[i]);
 }
 
 static void
@@ -522,7 +515,7 @@ esxVI_SharedCURL_Unlock(CURL *handle G_GNUC_UNUSED, curl_lock_data data,
         return;
     }
 
-    virMutexUnlock(&shared->locks[i]);
+    g_mutex_unlock(&shared->locks[i]);
 }
 
 /* esxVI_SharedCURL_Alloc */
@@ -543,7 +536,7 @@ ESX_VI__TEMPLATE__FREE(SharedCURL,
         curl_share_cleanup(item->handle);
 
     for (i = 0; i < G_N_ELEMENTS(item->locks); ++i)
-        virMutexDestroy(&item->locks[i]);
+        g_mutex_clear(&item->locks[i]);
 })
 
 int
@@ -583,22 +576,18 @@ esxVI_SharedCURL_Add(esxVI_SharedCURL *shared, esxVI_CURL *curl)
                           CURL_LOCK_DATA_DNS);
 
         for (i = 0; i < G_N_ELEMENTS(shared->locks); ++i) {
-            if (virMutexInit(&shared->locks[i]) < 0) {
-                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                               _("Could not initialize a CURL (share) mutex"));
-                return -1;
-            }
+            g_mutex_init(&shared->locks[i]);
         }
     }
 
-    virMutexLock(&curl->lock);
+    g_mutex_lock(&curl->lock);
 
     curl_easy_setopt(curl->handle, CURLOPT_SHARE, shared->handle);
 
     curl->shared = shared;
     ++shared->count;
 
-    virMutexUnlock(&curl->lock);
+    g_mutex_unlock(&curl->lock);
 
     return 0;
 }
@@ -606,6 +595,8 @@ esxVI_SharedCURL_Add(esxVI_SharedCURL *shared, esxVI_CURL *curl)
 int
 esxVI_SharedCURL_Remove(esxVI_SharedCURL *shared, esxVI_CURL *curl)
 {
+    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
+
     if (!curl->handle) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                        _("Cannot unshare uninitialized CURL handle"));
@@ -623,15 +614,11 @@ esxVI_SharedCURL_Remove(esxVI_SharedCURL *shared, esxVI_CURL *curl)
         return -1;
     }
 
-    virMutexLock(&curl->lock);
-
     curl_easy_setopt(curl->handle, CURLOPT_SHARE, NULL);
 
     curl->shared = NULL;
     --shared->count;
 
-    virMutexUnlock(&curl->lock);
-
     return 0;
 }
 
@@ -661,6 +648,8 @@ ESX_VI__TEMPLATE__FREE(MultiCURL,
 int
 esxVI_MultiCURL_Add(esxVI_MultiCURL *multi, esxVI_CURL *curl)
 {
+    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
+
     if (!curl->handle) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                        _("Cannot add uninitialized CURL handle to a multi handle"));
@@ -684,21 +673,19 @@ esxVI_MultiCURL_Add(esxVI_MultiCURL *multi, esxVI_CURL *curl)
 
     }
 
-    virMutexLock(&curl->lock);
-
     curl_multi_add_handle(multi->handle, curl->handle);
 
     curl->multi = multi;
     ++multi->count;
 
-    virMutexUnlock(&curl->lock);
-
     return 0;
 }
 
 int
 esxVI_MultiCURL_Remove(esxVI_MultiCURL *multi, esxVI_CURL *curl)
 {
+    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
+
     if (!curl->handle) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                        _("Cannot remove uninitialized CURL handle from a "
@@ -718,15 +705,11 @@ esxVI_MultiCURL_Remove(esxVI_MultiCURL *multi, esxVI_CURL *curl)
         return -1;
     }
 
-    virMutexLock(&curl->lock);
-
     curl_multi_remove_handle(multi->handle, curl->handle);
 
     curl->multi = NULL;
     --multi->count;
 
-    virMutexUnlock(&curl->lock);
-
     return 0;
 }
 
@@ -809,7 +792,7 @@ ESX_VI__TEMPLATE__ALLOC(Context)
 ESX_VI__TEMPLATE__FREE(Context,
 {
     if (item->sessionLock)
-        virMutexDestroy(item->sessionLock);
+        g_mutex_clear(item->sessionLock);
 
     esxVI_CURL_Free(&item->curl);
     VIR_FREE(item->url);
@@ -870,11 +853,7 @@ esxVI_Context_Connect(esxVI_Context *ctx, const char *url,
         goto cleanup;
 
 
-    if (virMutexInit(ctx->sessionLock) < 0) {
-        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                       _("Could not initialize session mutex"));
-        goto cleanup;
-    }
+    g_mutex_init(ctx->sessionLock);
 
     if (esxVI_RetrieveServiceContent(ctx, &ctx->service) < 0)
         goto cleanup;
@@ -1262,7 +1241,7 @@ esxVI_Context_Execute(esxVI_Context *ctx, const char *methodName,
     if (esxVI_Response_Alloc(response) < 0)
         return -1;
 
-    virMutexLock(&ctx->curl->lock);
+    g_mutex_lock(&ctx->curl->lock);
 
     curl_easy_setopt(ctx->curl->handle, CURLOPT_URL, ctx->url);
     curl_easy_setopt(ctx->curl->handle, CURLOPT_RANGE, NULL);
@@ -1273,7 +1252,7 @@ esxVI_Context_Execute(esxVI_Context *ctx, const char *methodName,
 
     (*response)->responseCode = esxVI_CURL_Perform(ctx->curl, ctx->url);
 
-    virMutexUnlock(&ctx->curl->lock);
+    g_mutex_unlock(&ctx->curl->lock);
 
     if ((*response)->responseCode < 0)
         goto cleanup;
@@ -1908,13 +1887,14 @@ esxVI_EnsureSession(esxVI_Context *ctx)
     esxVI_DynamicProperty *dynamicProperty = NULL;
     esxVI_UserSession *currentSession = NULL;
     char *escapedPassword = NULL;
+    g_autoptr(GMutexLocker) locker = NULL;
 
     if (!ctx->sessionLock) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid call, no mutex"));
         return -1;
     }
 
-    virMutexLock(ctx->sessionLock);
+    locker = g_mutex_locker_new(ctx->sessionLock);
 
     if (!ctx->session) {
         virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid call, no session"));
@@ -1969,8 +1949,6 @@ esxVI_EnsureSession(esxVI_Context *ctx)
     result = 0;
 
  cleanup:
-    virMutexUnlock(ctx->sessionLock);
-
     VIR_FREE(escapedPassword);
     esxVI_String_Free(&propertyNameList);
     esxVI_ObjectContent_Free(&sessionManager);
diff --git a/src/esx/esx_vi.h b/src/esx/esx_vi.h
index b960c0900a..949f90a43d 100644
--- a/src/esx/esx_vi.h
+++ b/src/esx/esx_vi.h
@@ -115,7 +115,7 @@ struct _esxVI_ParsedHostCpuIdInfo {
 
 struct _esxVI_CURL {
     CURL *handle;
-    virMutex lock;
+    GMutex lock;
     struct curl_slist *headers;
     char error[CURL_ERROR_SIZE];
     esxVI_SharedCURL *shared;
@@ -137,7 +137,7 @@ int esxVI_CURL_Upload(esxVI_CURL *curl, const char *url, const char *content);
 
 struct _esxVI_SharedCURL {
     CURLSH *handle;
-    virMutex locks[3]; /* share, cookie, dns */
+    GMutex locks[3]; /* share, cookie, dns */
     size_t count; /* number of added easy handle */
 };
 
@@ -184,7 +184,7 @@ struct _esxVI_Context {
     esxVI_ProductLine productLine;
     unsigned long productVersion; /* = 1000000 * major + 1000 * minor + micro */
     esxVI_UserSession *session; /* ... except the session ... */
-    virMutexPtr sessionLock; /* ... that is protected by this mutex */
+    GMutex *sessionLock; /* ... that is protected by this mutex */
     esxVI_Datacenter *datacenter;
     char *datacenterPath; /* including folders */
     esxVI_ComputeResource *computeResource;
-- 
2.25.2


Re: [PATCH 05/43] esx: convert virMutex to GMutex
Posted by Pino Toscano 5 years, 10 months ago
On Friday, 10 April 2020 15:54:32 CEST Rafael Fonseca wrote:
> @@ -346,11 +342,12 @@ esxStreamClose(virStreamPtr stream, bool finish)
>  {
>      int result = 0;
>      esxStreamPrivate *priv = stream->privateData;
> +    g_autoptr(GMutexLocker) locker = NULL;
>  
>      if (!priv)
>          return 0;
>  
> -    virMutexLock(&priv->curl->lock);
> +    locker = g_mutex_locker_new(&priv->curl->lock);
>  
>      if (finish && priv->backlog_used > 0) {
>          virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> @@ -360,8 +357,6 @@ esxStreamClose(virStreamPtr stream, bool finish)
>  
>      stream->privateData = NULL;
>  
> -    virMutexUnlock(&priv->curl->lock);
> -
>      esxFreeStreamPrivate(&priv);

Careful here, this is a problematic situation:
- the lock is indirectly part of the @priv structure
- esxFreeStreamPrivate calls esxVI_CURL_Free(priv->curl)
- esxVI_CURL_Free calls virMutexDestroy(&item->lock)
- lock is still locked, so it will deadlock (or crash, or something
  not good anyway)

You must unlock the mutex before esxFreeStreamPrivate is called.

I did not check other patches of this long series for similar potential
issues, please do check them.

> diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
> index 16690edfbe..ed6c6c28cd 100644
> --- a/src/esx/esx_vi.c
> +++ b/src/esx/esx_vi.c
> @@ -433,14 +429,13 @@ int
>  esxVI_CURL_Upload(esxVI_CURL *curl, const char *url, const char *content)
>  {
>      int responseCode = 0;
> +    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
>  
>      if (!content) {
>          virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid argument"));
>          return -1;
>      }
>  
> -    virMutexLock(&curl->lock);
> -

Careful #2 here about locking earlier: while usually this is not an
issue, it could be in case the code that was executed without the lock
held can be called by other code branches with the lock held.

Again, this must be thoroughly checked in the whole patch series.

-- 
Pino Toscano
Re: [PATCH 05/43] esx: convert virMutex to GMutex
Posted by Rafael Fonseca 5 years, 10 months ago
Pino,

thank you for the review. Could you please take a look at patch #42 in
this series? It's the one in which I had to add some explicit unlock
calls, so it'd be good for someone who knows the code to review this
part.

On Tue, Apr 14, 2020 at 8:15 AM Pino Toscano <ptoscano@redhat.com> wrote:
>
> On Friday, 10 April 2020 15:54:32 CEST Rafael Fonseca wrote:
> > @@ -346,11 +342,12 @@ esxStreamClose(virStreamPtr stream, bool finish)
> >  {
> >      int result = 0;
> >      esxStreamPrivate *priv = stream->privateData;
> > +    g_autoptr(GMutexLocker) locker = NULL;
> >
> >      if (!priv)
> >          return 0;
> >
> > -    virMutexLock(&priv->curl->lock);
> > +    locker = g_mutex_locker_new(&priv->curl->lock);
> >
> >      if (finish && priv->backlog_used > 0) {
> >          virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> > @@ -360,8 +357,6 @@ esxStreamClose(virStreamPtr stream, bool finish)
> >
> >      stream->privateData = NULL;
> >
> > -    virMutexUnlock(&priv->curl->lock);
> > -
> >      esxFreeStreamPrivate(&priv);
>
> Careful here, this is a problematic situation:
> - the lock is indirectly part of the @priv structure
> - esxFreeStreamPrivate calls esxVI_CURL_Free(priv->curl)
> - esxVI_CURL_Free calls virMutexDestroy(&item->lock)
> - lock is still locked, so it will deadlock (or crash, or something
>   not good anyway)
>
> You must unlock the mutex before esxFreeStreamPrivate is called.

Ops, nice catch.

> I did not check other patches of this long series for similar potential
> issues, please do check them.

Ok, will do.

> > diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
> > index 16690edfbe..ed6c6c28cd 100644
> > --- a/src/esx/esx_vi.c
> > +++ b/src/esx/esx_vi.c
> > @@ -433,14 +429,13 @@ int
> >  esxVI_CURL_Upload(esxVI_CURL *curl, const char *url, const char *content)
> >  {
> >      int responseCode = 0;
> > +    g_autoptr(GMutexLocker) locker = g_mutex_locker_new(&curl->lock);
> >
> >      if (!content) {
> >          virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid argument"));
> >          return -1;
> >      }
> >
> > -    virMutexLock(&curl->lock);
> > -
>
> Careful #2 here about locking earlier: while usually this is not an
> issue, it could be in case the code that was executed without the lock
> held can be called by other code branches with the lock held.
>
> Again, this must be thoroughly checked in the whole patch series.

I'll recheck but I tried to do it in places where it was obvious the
lock was not held because of an early return case.


Att.
-- 
Rafael Fonseca