To improve code readability and maintainability, this commit refactors the hash
buffer preparation logic from "do_hash_operation()" into two helper functions:
- "hash_prepare_direct_iov()": handles non-scatter-gather (direct) mode.
- "hash_prepare_sg_iov()": handles scatter-gather mode with accumulation
and padding detection support
No functional changes are introduced.
Signed-off-by: Jamin Lin <jamin_lin@aspeedtech.com>
---
hw/misc/aspeed_hace.c | 153 ++++++++++++++++++++++++++----------------
1 file changed, 95 insertions(+), 58 deletions(-)
diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index 1256926d22..22eea62693 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -142,6 +142,92 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov,
return false;
}
+static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov)
+{
+ uint32_t src;
+ void *haddr;
+ hwaddr plen;
+ int iov_idx;
+
+ plen = s->regs[R_HASH_SRC_LEN];
+ src = s->regs[R_HASH_SRC];
+ haddr = address_space_map(&s->dram_as, src, &plen, false,
+ MEMTXATTRS_UNSPECIFIED);
+ if (haddr == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unable to map address, addr=0x%x, "
+ "plen=0x%" HWADDR_PRIx "\n",
+ __func__, src, plen);
+ return -1;
+ }
+
+ iov[0].iov_base = haddr;
+ iov[0].iov_len = plen;
+ iov_idx = 1;
+
+ return iov_idx;
+}
+
+static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov,
+ bool acc_mode, bool *acc_final_request)
+{
+ uint32_t total_msg_len;
+ uint32_t pad_offset;
+ uint32_t len = 0;
+ uint32_t sg_addr;
+ uint32_t src;
+ int iov_idx;
+ hwaddr plen;
+ void *haddr;
+
+ for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) {
+ if (iov_idx == ASPEED_HACE_MAX_SG) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Failed to set end of sg list marker\n",
+ __func__);
+ return -1;
+ }
+
+ src = s->regs[R_HASH_SRC] + (iov_idx * SG_LIST_ENTRY_SIZE);
+
+ len = address_space_ldl_le(&s->dram_as, src,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ sg_addr &= SG_LIST_ADDR_MASK;
+
+ plen = len & SG_LIST_LEN_MASK;
+ haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (haddr == NULL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unable to map address, sg_addr=0x%x, "
+ "plen=0x%" HWADDR_PRIx "\n",
+ __func__, sg_addr, plen);
+ return -1;
+ }
+
+ iov[iov_idx].iov_base = haddr;
+ if (acc_mode) {
+ s->total_req_len += plen;
+
+ if (has_padding(s, &iov[iov_idx], plen, &total_msg_len,
+ &pad_offset)) {
+ /* Padding being present indicates the final request */
+ *acc_final_request = true;
+ iov[iov_idx].iov_len = pad_offset;
+ } else {
+ iov[iov_idx].iov_len = plen;
+ }
+ } else {
+ iov[iov_idx].iov_len = plen;
+ }
+ }
+
+ return iov_idx;
+}
+
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
bool acc_mode)
{
@@ -149,15 +235,8 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
struct iovec iov[ASPEED_HACE_MAX_SG];
bool acc_final_request = false;
Error *local_err = NULL;
- uint32_t total_msg_len;
size_t digest_len = 0;
- uint32_t sg_addr = 0;
- uint32_t pad_offset;
- int iov_idx = 0;
- uint32_t len = 0;
- uint32_t src = 0;
- void *haddr;
- hwaddr plen;
+ int iov_idx = -1;
if (acc_mode && s->hash_ctx == NULL) {
s->hash_ctx = qcrypto_hash_new(algo, &local_err);
@@ -169,59 +248,17 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
}
}
+ /* Prepares the iov for hashing operations based on the selected mode */
if (sg_mode) {
- for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) {
- if (iov_idx == ASPEED_HACE_MAX_SG) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "aspeed_hace: guest failed to set end of sg list marker\n");
- break;
- }
-
- src = s->regs[R_HASH_SRC] + (iov_idx * SG_LIST_ENTRY_SIZE);
-
- len = address_space_ldl_le(&s->dram_as, src,
- MEMTXATTRS_UNSPECIFIED, NULL);
-
- sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE,
- MEMTXATTRS_UNSPECIFIED, NULL);
- sg_addr &= SG_LIST_ADDR_MASK;
-
- plen = len & SG_LIST_LEN_MASK;
- haddr = address_space_map(&s->dram_as, sg_addr, &plen, false,
- MEMTXATTRS_UNSPECIFIED);
- if (haddr == NULL) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: qcrypto failed\n", __func__);
- return;
- }
- iov[iov_idx].iov_base = haddr;
- if (acc_mode) {
- s->total_req_len += plen;
-
- if (has_padding(s, &iov[iov_idx], plen, &total_msg_len,
- &pad_offset)) {
- /* Padding being present indicates the final request */
- acc_final_request = true;
- iov[iov_idx].iov_len = pad_offset;
- } else {
- iov[iov_idx].iov_len = plen;
- }
- } else {
- iov[iov_idx].iov_len = plen;
- }
- }
+ iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request);
} else {
- plen = s->regs[R_HASH_SRC_LEN];
+ iov_idx = hash_prepare_direct_iov(s, iov);
+ }
- haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC],
- &plen, false, MEMTXATTRS_UNSPECIFIED);
- if (haddr == NULL) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
- return;
- }
- iov[0].iov_base = haddr;
- iov[0].iov_len = plen;
- iov_idx = 1;
+ if (iov_idx <= 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Failed to prepare iov\n", __func__);
+ return;
}
if (acc_mode) {
--
2.43.0