Use generic routines for 32-bit carry-less multiply.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/ppc/int_helper.c | 26 ++++++--------------------
1 file changed, 6 insertions(+), 20 deletions(-)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index ebb2957fe7..1ea42b4ede 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1445,28 +1445,14 @@ void helper_vpmsumh(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
}
}
-#define PMSUM(name, srcfld, trgfld, trgtyp) \
-void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
-{ \
- int i, j; \
- trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
- \
- VECTOR_FOR_INORDER_I(i, srcfld) { \
- prod[i] = 0; \
- for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
- if (a->srcfld[i] & (1ull << j)) { \
- prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
- } \
- } \
- } \
- \
- VECTOR_FOR_INORDER_I(i, trgfld) { \
- r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
- } \
+void helper_vpmsumw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ for (int i = 0; i < 2; ++i) {
+ uint64_t aa = a->u64[i], bb = b->u64[i];
+ r->u64[i] = clmul_32(aa, bb) ^ clmul_32(aa >> 32, bb >> 32);
+ }
}
-PMSUM(vpmsumw, u32, u64, uint64_t)
-
void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i, j;
--
2.34.1