[PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing

Alejandro Zeise posted 2 patches 3 months, 3 weeks ago
There is a newer version of this series
[PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing
Posted by Alejandro Zeise 3 months, 3 weeks ago
Make the Aspeed HACE module use the new qcrypto accumulative hashing functions
when in scatter-gather accumulative mode. A hash context will maintain a
"running-hash" as each scatter-gather chunk is received.

Previously each scatter-gather "chunk" was cached
so the hash could be computed once the final chunk was received.
However, the cache was a shallow copy, so once the guest overwrote the
memory provided to HACE the final hash would not be correct.

Possibly related to: https://gitlab.com/qemu-project/qemu/-/issues/1121
Buglink: https://github.com/openbmc/qemu/issues/36

Signed-off-by: Alejandro Zeise <alejandro.zeise@seagate.com>
---
 hw/misc/aspeed_hace.c         | 91 ++++++++++++++++++-----------------
 include/hw/misc/aspeed_hace.h |  4 ++
 2 files changed, 51 insertions(+), 44 deletions(-)

diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index c06c04ddc6..8306d8986a 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -1,6 +1,7 @@
 /*
  * ASPEED Hash and Crypto Engine
  *
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
  * Copyright (C) 2021 IBM Corp.
  *
  * Joel Stanley <joel@jms.id.au>
@@ -151,47 +152,15 @@ static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
     return iov_count;
 }
 
-/**
- * Generate iov for accumulative mode.
- *
- * @param s             aspeed hace state object
- * @param iov           iov of the current request
- * @param id            index of the current iov
- * @param req_len       length of the current request
- *
- * @return count of iov
- */
-static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
-                            hwaddr *req_len)
-{
-    uint32_t pad_offset;
-    uint32_t total_msg_len;
-    s->total_req_len += *req_len;
-
-    if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
-        if (s->iov_count) {
-            return reconstruct_iov(s, iov, id, &pad_offset);
-        }
-
-        *req_len -= s->total_req_len - total_msg_len;
-        s->total_req_len = 0;
-        iov[id].iov_len = *req_len;
-    } else {
-        s->iov_cache[s->iov_count].iov_base = iov->iov_base;
-        s->iov_cache[s->iov_count].iov_len = *req_len;
-        ++s->iov_count;
-    }
-
-    return id + 1;
-}
-
 static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
                               bool acc_mode)
 {
     struct iovec iov[ASPEED_HACE_MAX_SG];
+    uint32_t total_msg_len;
+    uint32_t pad_offset;
     g_autofree uint8_t *digest_buf = NULL;
     size_t digest_len = 0;
-    int niov = 0;
+    bool sg_acc_mode_final_request = false;
     int i;
     void *haddr;
 
@@ -226,8 +195,15 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
             }
             iov[i].iov_base = haddr;
             if (acc_mode) {
-                niov = gen_acc_mode_iov(s, iov, i, &plen);
-
+                s->total_req_len += plen;
+
+                if (has_padding(s, &iov[i], plen, &total_msg_len, &pad_offset)) {
+                    /* Padding being present indicates the final request */
+                    sg_acc_mode_final_request = true;
+                    iov[i].iov_len = pad_offset;
+                } else {
+                    iov[i].iov_len = plen;
+                }
             } else {
                 iov[i].iov_len = plen;
             }
@@ -252,20 +228,42 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
              * required to check whether cache is empty. If no, we should
              * combine cached iov and the current iov.
              */
-            uint32_t total_msg_len;
-            uint32_t pad_offset;
             s->total_req_len += len;
             if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
-                niov = reconstruct_iov(s, iov, 0, &pad_offset);
+                i = reconstruct_iov(s, iov, 0, &pad_offset);
             }
         }
     }
 
-    if (niov) {
-        i = niov;
-    }
+    if (acc_mode) {
+        if (s->qcrypto_hash_context == NULL &&
+            qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: qcrypto failed to create hash context\n",
+                          __func__);
+            return;
+        }
+
+        if (qcrypto_hash_accumulate_bytesv(algo,
+                                           s->qcrypto_hash_context, iov, i,
+                                           &digest_buf,
+                                           &digest_len, NULL) < 0) {
+
+            qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
+            return;
+        }
 
-    if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
+        if (sg_acc_mode_final_request) {
+            if (qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL) < 0) {
+                qemu_log_mask(LOG_GUEST_ERROR,
+                              "%s: qcrypto failed to free context\n", __func__);
+            }
+
+            s->qcrypto_hash_context = NULL;
+            s->iov_count = 0;
+            s->total_req_len = 0;
+        }
+    } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
         qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
         return;
     }
@@ -397,6 +395,11 @@ static void aspeed_hace_reset(DeviceState *dev)
 {
     struct AspeedHACEState *s = ASPEED_HACE(dev);
 
+    if (s->qcrypto_hash_context != NULL) {
+        qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL);
+        s->qcrypto_hash_context = NULL;
+    }
+
     memset(s->regs, 0, sizeof(s->regs));
     s->iov_count = 0;
     s->total_req_len = 0;
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
index ecb1b67de8..b3c2eb17b7 100644
--- a/include/hw/misc/aspeed_hace.h
+++ b/include/hw/misc/aspeed_hace.h
@@ -1,6 +1,7 @@
 /*
  * ASPEED Hash and Crypto Engine
  *
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
  * Copyright (C) 2021 IBM Corp.
  *
  * SPDX-License-Identifier: GPL-2.0-or-later
@@ -10,6 +11,7 @@
 #define ASPEED_HACE_H
 
 #include "hw/sysbus.h"
+#include "crypto/hash.h"
 
 #define TYPE_ASPEED_HACE "aspeed.hace"
 #define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400"
@@ -35,6 +37,8 @@ struct AspeedHACEState {
 
     MemoryRegion *dram_mr;
     AddressSpace dram_as;
+
+    qcrypto_hash_accumulate_ctx_t *qcrypto_hash_context;
 };
 
 
-- 
2.34.1
Re: [PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing
Posted by Cédric Le Goater 3 months, 3 weeks ago
Hello Alejandro,

On 7/29/24 21:00, Alejandro Zeise wrote:
> Make the Aspeed HACE module use the new qcrypto accumulative hashing functions
> when in scatter-gather accumulative mode. A hash context will maintain a
> "running-hash" as each scatter-gather chunk is received.
> 
> Previously each scatter-gather "chunk" was cached
> so the hash could be computed once the final chunk was received.
> However, the cache was a shallow copy, so once the guest overwrote the
> memory provided to HACE the final hash would not be correct.
> 
> Possibly related to: https://gitlab.com/qemu-project/qemu/-/issues/1121
> Buglink: https://github.com/openbmc/qemu/issues/36


Thanks for these changes.

However, this introduces a regression when compiling QEMU with
--disable-gcrypt. It can be reproduced with :

   make check-avocado AVOCADO_TAGS=machine:ast1030-evb

or download :

   https://github.com/AspeedTech-BMC/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip

unzip and run :

   path/to/qemu-system-arm -M ast1030-evb -kernel ./zephyr.bin -nographic

then, on the console :

   uart:~$ hash test
   sha256_test
   tv[0]:PASS
   tv[1]:PASS
   tv[2]:PASS
   tv[3]:PASS
   tv[4]:PASS
   sha384_test
   tv[0]:hash_final error
   sha512_test
   tv[0]:Segmentation fault (core dumped)

I believe this is due to the change which now assigns s->total_req_len
when accumulation mode is desired. If the crypto context fails to
allocate (no gcrypt), states are not cleared in the model and previous
values are used by the model when the OS runs the next test and QEMU
segvs in has_padding().

To fix, I think we could move :

         if (s->qcrypto_hash_context == NULL &&
             qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) {
             qemu_log_mask(LOG_GUEST_ERROR,
                           "%s: qcrypto failed to create hash context\n",
                           __func__);
             return;
         }

at the beginning of do_hash_operation() ?

C.


> Signed-off-by: Alejandro Zeise <alejandro.zeise@seagate.com>
> ---
>   hw/misc/aspeed_hace.c         | 91 ++++++++++++++++++-----------------
>   include/hw/misc/aspeed_hace.h |  4 ++
>   2 files changed, 51 insertions(+), 44 deletions(-)
> 
> diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> index c06c04ddc6..8306d8986a 100644
> --- a/hw/misc/aspeed_hace.c
> +++ b/hw/misc/aspeed_hace.c
> @@ -1,6 +1,7 @@
>   /*
>    * ASPEED Hash and Crypto Engine
>    *
> + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
>    * Copyright (C) 2021 IBM Corp.
>    *
>    * Joel Stanley <joel@jms.id.au>
> @@ -151,47 +152,15 @@ static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
>       return iov_count;
>   }
>   
> -/**
> - * Generate iov for accumulative mode.
> - *
> - * @param s             aspeed hace state object
> - * @param iov           iov of the current request
> - * @param id            index of the current iov
> - * @param req_len       length of the current request
> - *
> - * @return count of iov
> - */
> -static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
> -                            hwaddr *req_len)
> -{
> -    uint32_t pad_offset;
> -    uint32_t total_msg_len;
> -    s->total_req_len += *req_len;
> -
> -    if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
> -        if (s->iov_count) {
> -            return reconstruct_iov(s, iov, id, &pad_offset);
> -        }
> -
> -        *req_len -= s->total_req_len - total_msg_len;
> -        s->total_req_len = 0;
> -        iov[id].iov_len = *req_len;
> -    } else {
> -        s->iov_cache[s->iov_count].iov_base = iov->iov_base;
> -        s->iov_cache[s->iov_count].iov_len = *req_len;
> -        ++s->iov_count;
> -    }
> -
> -    return id + 1;
> -}
> -
>   static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
>                                 bool acc_mode)
>   {
>       struct iovec iov[ASPEED_HACE_MAX_SG];
> +    uint32_t total_msg_len;
> +    uint32_t pad_offset;
>       g_autofree uint8_t *digest_buf = NULL;
>       size_t digest_len = 0;
> -    int niov = 0;
> +    bool sg_acc_mode_final_request = false;
>       int i;
>       void *haddr;
>   
> @@ -226,8 +195,15 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
>               }
>               iov[i].iov_base = haddr;
>               if (acc_mode) {
> -                niov = gen_acc_mode_iov(s, iov, i, &plen);
> -
> +                s->total_req_len += plen;
> +
> +                if (has_padding(s, &iov[i], plen, &total_msg_len, &pad_offset)) {
> +                    /* Padding being present indicates the final request */
> +                    sg_acc_mode_final_request = true;
> +                    iov[i].iov_len = pad_offset;
> +                } else {
> +                    iov[i].iov_len = plen;
> +                }
>               } else {
>                   iov[i].iov_len = plen;
>               }
> @@ -252,20 +228,42 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
>                * required to check whether cache is empty. If no, we should
>                * combine cached iov and the current iov.
>                */
> -            uint32_t total_msg_len;
> -            uint32_t pad_offset;
>               s->total_req_len += len;
>               if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
> -                niov = reconstruct_iov(s, iov, 0, &pad_offset);
> +                i = reconstruct_iov(s, iov, 0, &pad_offset);
>               }
>           }
>       }
>   
> -    if (niov) {
> -        i = niov;
> -    }
> +    if (acc_mode) {
> +        if (s->qcrypto_hash_context == NULL &&
> +            qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "%s: qcrypto failed to create hash context\n",
> +                          __func__);
> +            return;
> +        }
> +
> +        if (qcrypto_hash_accumulate_bytesv(algo,
> +                                           s->qcrypto_hash_context, iov, i,
> +                                           &digest_buf,
> +                                           &digest_len, NULL) < 0) {
> +
> +            qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
> +            return;
> +        }
>   
> -    if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
> +        if (sg_acc_mode_final_request) {
> +            if (qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL) < 0) {
> +                qemu_log_mask(LOG_GUEST_ERROR,
> +                              "%s: qcrypto failed to free context\n", __func__);
> +            }
> +
> +            s->qcrypto_hash_context = NULL;
> +            s->iov_count = 0;
> +            s->total_req_len = 0;
> +        }
> +    } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
>           qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
>           return;
>       }
> @@ -397,6 +395,11 @@ static void aspeed_hace_reset(DeviceState *dev)
>   {
>       struct AspeedHACEState *s = ASPEED_HACE(dev);
>   
> +    if (s->qcrypto_hash_context != NULL) {
> +        qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL);
> +        s->qcrypto_hash_context = NULL;
> +    }
> +
>       memset(s->regs, 0, sizeof(s->regs));
>       s->iov_count = 0;
>       s->total_req_len = 0;
> diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
> index ecb1b67de8..b3c2eb17b7 100644
> --- a/include/hw/misc/aspeed_hace.h
> +++ b/include/hw/misc/aspeed_hace.h
> @@ -1,6 +1,7 @@
>   /*
>    * ASPEED Hash and Crypto Engine
>    *
> + * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
>    * Copyright (C) 2021 IBM Corp.
>    *
>    * SPDX-License-Identifier: GPL-2.0-or-later
> @@ -10,6 +11,7 @@
>   #define ASPEED_HACE_H
>   
>   #include "hw/sysbus.h"
> +#include "crypto/hash.h"
>   
>   #define TYPE_ASPEED_HACE "aspeed.hace"
>   #define TYPE_ASPEED_AST2400_HACE TYPE_ASPEED_HACE "-ast2400"
> @@ -35,6 +37,8 @@ struct AspeedHACEState {
>   
>       MemoryRegion *dram_mr;
>       AddressSpace dram_as;
> +
> +    qcrypto_hash_accumulate_ctx_t *qcrypto_hash_context;
>   };
>   
>
RE: [PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing
Posted by Alejandro Zeise 3 months, 3 weeks ago
Hello Cédric,

> -----Original Message-----
> From: Cédric Le Goater <clg@kaod.org> 
> Sent: Tuesday, July 30, 2024 5:55 AM
> To: Alejandro Zeise <alejandro.zeise@seagate.com>; qemu-arm@nongnu.org
> Cc: qemu-devel@nongnu.org; peter.maydell@linaro.org; berrange@redhat.com
> Subject: Re: [PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing


> This message has originated from an External Source. Please use proper judgment and caution when opening attachments, clicking links, or responding to this email.


> Hello Alejandro,

> On 7/29/24 21:00, Alejandro Zeise wrote:
>> Make the Aspeed HACE module use the new qcrypto accumulative hashing 
>> functions when in scatter-gather accumulative mode. A hash context 
>> will maintain a "running-hash" as each scatter-gather chunk is received.
>>
>> Previously each scatter-gather "chunk" was cached so the hash could be 
>> computed once the final chunk was received.
>> However, the cache was a shallow copy, so once the guest overwrote the 
>> memory provided to HACE the final hash would not be correct.
>>
>> Possibly related to: 
>> https://secure-web.cisco.com/1m1Z2YCrV2vhocDftauu92ZeOegqeZfIRFUAJJwVk
>> F9fI5CEmq9q8M0oYYTKLIBE7H7Anc9W8PG2iYcK9L1uzXdg80qNty6Zu1X6QN7rQvV39cA
>> qS8AoWcsYMaBzVo6gGKY2upM6_8eJmiOy2M7vrGmEaaUE-SnW6zCD1bbH3et6nErWgGfgs
>> TIxuiRzgwvE7wrTPewA6isDxOsWeEcldCKuKbmYH2sSnQ3jpdZMKmpgX9tVZfICI3nwhwb
>> wz8RYBg_4OH-8kl9ECEW6grKDPPi8Y0ni3zO8HPywr36nwkhAuYmK8hC27vBbf7qfloMu5
>> tzRGJBUPQcdSjWeK_4MtrxQCoIpUFEKPMqf-ielVMsCTsuYh0ABhB9ur84fcQpnXtTFuib
>> WvRU6x-AQGW1WmZzgxUArNn7_Ha_Kf0PknI2b9LJIKaSCuNa4y37x0aQpO_BYL0LBaQEKA
>> cUedbaxB-Q/https%3A%2F%2Fgitlab.com%2Fqemu-project%2Fqemu%2F-%2Fissues
>> %2F1121
>> Buglink: 
>> https://secure-web.cisco.com/1ofiYFe1SNQgzRQ3E3PS9LITMhUsDJZIVWuOog_Vj
>> tgnTabxX9NqW5LYr4EPFrVCzPtMf-hAx87qIkUirL6wMbnKyhgCeobULUm0wBYi9botfSo
>> DKFXa6vBO6GgNdVBAbGMSCO1IgmogBY_Q33lM6M7OFbMipxxsGDhIBJMeYneaqgr5Qatdc
>> gcmCgju8b2v60q-f7xxTJFtDihOD9rfCzPkspw9y9McZyzUqVOb-Fin-SYQEaC9sEN7pFC
>> Zu59djaWocCIRIudGSVPubNCyV6-AfUNlAL6Bfh-e99_VoDpCMv6KCZGVooRjcdCvUW4uJ
>> _qhVa7VQKJMQbHJEE-Qq1hdOlPbR2Ae5dQoizEatClH05JZLE1ljgu2JtAIrioZ7JyfmMn
>> j-w8-ChDDGYPdtVNLKk0_Trhz4Z9WvvlC-gAWirx_aEwzAYF7FRC8I_oqAZUoDDxJRcSYd
>> Yv4XjCcqGA/https%3A%2F%2Fgithub.com%2Fopenbmc%2Fqemu%2Fissues%2F36


> Thanks for these changes.

> However, this introduces a regression when compiling QEMU with --disable-gcrypt. It can be reproduced with :

   > make check-avocado AVOCADO_TAGS=machine:ast1030-evb

> or download :

>    https://secure-web.cisco.com/1umbBg5FhtHfBrEEv8iImyA69NcvzgLwutRYKPVhm9UjD3u1ETDUrEMfnNU50zJVD70Q2QUuebx6IuTRbReF-w23bV5fr6yHwO72IdXBxZSrPuLFZT-sPiIz3uiOByzLlWL6mU4IP1dZ6nxfTqGY2QTegv53M8dqNiUi3D_StfcLIC56_bcgZr2LTW86FAJKy7jACaJsk8AyvYeSrC--YZe_bpzP7TY2eigBe4Qg_BHUcIWDTR0aXyrxQZjIgxx05bXQ0lq9tQuQUYF7d9LoTsFiJ22xrX_gKU7mTcGbc1I1W4nJ1KG69UDBltjnjgPCimqKx0zWhkQncC3CpSwUrhNodbuJgMFKyn58q7WAdL7j0PyDq9kHt9drnai_4lND2mYhIFhQHfeGR6qwTRxh1p-i8EMARTHhMKst7Xu37wvzoFVt1PrHwzhsOD_xTr9q2r6DGK2zPXwp5Wz130qMc5w/https%3A%2F%2Fgithub.com%2FAspeedTech-BMC%2Fzephyr%2Freleases%2Fdownload%2Fv00.01.07%2Fast1030-evb-demo.zip

> unzip and run :

>    path/to/qemu-system-arm -M ast1030-evb -kernel ./zephyr.bin -nographic

> then, on the console :

>    uart:~$ hash test
>    sha256_test
>    tv[0]:PASS
>    tv[1]:PASS
>    tv[2]:PASS
>    tv[3]:PASS
>    tv[4]:PASS
>    sha384_test
>    tv[0]:hash_final error
>    sha512_test
>    tv[0]:Segmentation fault (core dumped)

>  I believe this is due to the change which now assigns s->total_req_len when accumulation mode is desired. If the crypto context fails to allocate 
>  (no gcrypt), states are not cleared in the model and previous values are used by the model when the OS runs the next test and QEMU segvs in 
> has_padding().

> To fix, I think we could move :

>          if (s->qcrypto_hash_context == NULL &&
>              qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) {
>              qemu_log_mask(LOG_GUEST_ERROR,
>                            "%s: qcrypto failed to create hash context\n",
>                            __func__);
>              return;
>          }

>  at the beginning of do_hash_operation() ?

>  C.

Good catch. Moving the context allocation to the beginning of the function does appear to fix the issue.
I will submit a new patch series with that change implemented.

Thanks,
Alejandro

Re: [PATCH v2 2/2] hw/misc/aspeed_hace: Fix SG Accumulative hashing
Posted by Philippe Mathieu-Daudé 3 months, 3 weeks ago
Hi Alejandro,

On 29/7/24 21:00, Alejandro Zeise wrote:
> Make the Aspeed HACE module use the new qcrypto accumulative hashing functions
> when in scatter-gather accumulative mode. A hash context will maintain a
> "running-hash" as each scatter-gather chunk is received.
> 
> Previously each scatter-gather "chunk" was cached
> so the hash could be computed once the final chunk was received.
> However, the cache was a shallow copy, so once the guest overwrote the
> memory provided to HACE the final hash would not be correct.
> 
> Possibly related to: https://gitlab.com/qemu-project/qemu/-/issues/1121

Likely, Cc'ing John.

Reported-by: John Wang <wangzq.jn@gmail.com>

> Buglink: https://github.com/openbmc/qemu/issues/36
> 
> Signed-off-by: Alejandro Zeise <alejandro.zeise@seagate.com>
> ---
>   hw/misc/aspeed_hace.c         | 91 ++++++++++++++++++-----------------
>   include/hw/misc/aspeed_hace.h |  4 ++
>   2 files changed, 51 insertions(+), 44 deletions(-)


> @@ -252,20 +228,42 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,


> -    if (niov) {
> -        i = niov;
> -    }
> +    if (acc_mode) {
> +        if (s->qcrypto_hash_context == NULL &&
> +            qcrypto_hash_accumulate_new_ctx(algo, &s->qcrypto_hash_context, NULL)) {
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                          "%s: qcrypto failed to create hash context\n",
> +                          __func__);
> +            return;
> +        }

Using a instance_init() handler, ...

> @@ -397,6 +395,11 @@ static void aspeed_hace_reset(DeviceState *dev)
>   {
>       struct AspeedHACEState *s = ASPEED_HACE(dev);
>   
> +    if (s->qcrypto_hash_context != NULL) {
> +        qcrypto_hash_accumulate_free_ctx(s->qcrypto_hash_context, NULL);
> +        s->qcrypto_hash_context = NULL;
> +    }

... and instance_finalize() could simplify a bit.

Regards,

Phil.