Use generic routines for 16-bit carry-less multiply.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/ppc/int_helper.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 3bf0f5dbe5..98d6310f59 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1435,6 +1435,15 @@ void helper_vpmsumb(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
r->s128 = int128_xor(e, o);
}
+void helper_vpmsumh(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ Int128 ia = a->s128;
+ Int128 ib = b->s128;
+ Int128 e = clmul_16x4_even(ia, ib);
+ Int128 o = clmul_16x4_odd(ia, ib);
+ r->s128 = int128_xor(e, o);
+}
+
#define PMSUM(name, srcfld, trgfld, trgtyp) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
@@ -1455,7 +1464,6 @@ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
} \
}
-PMSUM(vpmsumh, u16, u32, uint32_t)
PMSUM(vpmsumw, u32, u64, uint64_t)
void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
--
2.34.1