[PATCH 05/18] crypto: Add generic 16-bit carry-less multiply routines

Richard Henderson posted 18 patches 1 year, 5 months ago
Maintainers: "Daniel P. Berrangé" <berrange@redhat.com>, Richard Henderson <richard.henderson@linaro.org>, Paolo Bonzini <pbonzini@redhat.com>, Peter Maydell <peter.maydell@linaro.org>, Daniel Henrique Barboza <danielhb413@gmail.com>, "Cédric Le Goater" <clg@kaod.org>, David Gibson <david@gibson.dropbear.id.au>, Greg Kurz <groug@kaod.org>, Nicholas Piggin <npiggin@gmail.com>, David Hildenbrand <david@redhat.com>, Ilya Leoshkevich <iii@linux.ibm.com>, Thomas Huth <thuth@redhat.com>
There is a newer version of this series
[PATCH 05/18] crypto: Add generic 16-bit carry-less multiply routines
Posted by Richard Henderson 1 year, 5 months ago
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 host/include/generic/host/crypto/clmul.h |  5 +++
 include/crypto/clmul.h                   | 32 +++++++++++++++++++
 crypto/clmul.c                           | 39 ++++++++++++++++++++++++
 3 files changed, 76 insertions(+)

diff --git a/host/include/generic/host/crypto/clmul.h b/host/include/generic/host/crypto/clmul.h
index 694705f703..cba8bbf3e4 100644
--- a/host/include/generic/host/crypto/clmul.h
+++ b/host/include/generic/host/crypto/clmul.h
@@ -14,4 +14,9 @@
 #define clmul_8x8_odd           clmul_8x8_odd_gen
 #define clmul_8x8_packed        clmul_8x8_packed_gen
 
+#define clmul_16x2_even         clmul_16x2_even_gen
+#define clmul_16x2_odd          clmul_16x2_odd_gen
+#define clmul_16x4_even         clmul_16x4_even_gen
+#define clmul_16x4_odd          clmul_16x4_odd_gen
+
 #endif /* GENERIC_HOST_CRYPTO_CLMUL_H */
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
index 7f19205d6f..b701bac9d6 100644
--- a/include/crypto/clmul.h
+++ b/include/crypto/clmul.h
@@ -56,6 +56,38 @@ Int128 clmul_8x8_odd_gen(Int128, Int128);
  */
 Int128 clmul_8x8_packed_gen(uint64_t, uint64_t);
 
+/**
+ * clmul_16x2_even:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The odd words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_even_gen(uint64_t, uint64_t);
+
+/**
+ * clmul_16x2_odd:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_16x2_odd_gen(uint64_t, uint64_t);
+
+/**
+ * clmul_16x4_even:
+ *
+ * Perform four 16x16->32 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+Int128 clmul_16x4_even_gen(Int128, Int128);
+
+/**
+ * clmul_16x4_odd:
+ *
+ * Perform eight 16x16->32 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+Int128 clmul_16x4_odd_gen(Int128, Int128);
+
 #include "host/crypto/clmul.h"
 
 #endif /* CRYPTO_CLMUL_H */
diff --git a/crypto/clmul.c b/crypto/clmul.c
index 866704e751..69a3b6f7ff 100644
--- a/crypto/clmul.c
+++ b/crypto/clmul.c
@@ -74,3 +74,42 @@ Int128 clmul_8x8_packed_gen(uint64_t n, uint64_t m)
     rh = clmul_8x4_even_gen(unpack_8_to_16(n >> 32), unpack_8_to_16(m >> 32));
     return int128_make128(rl, rh);
 }
+
+uint64_t clmul_16x2_even_gen(uint64_t n, uint64_t m)
+{
+    uint64_t r = 0;
+
+    n &= 0x0000ffff0000ffffull;
+    m &= 0x0000ffff0000ffffull;
+
+    for (int i = 0; i < 16; ++i) {
+        uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
+        r ^= m & mask;
+        n >>= 1;
+        m <<= 1;
+    }
+    return r;
+}
+
+uint64_t clmul_16x2_odd_gen(uint64_t n, uint64_t m)
+{
+    return clmul_16x2_even_gen(n >> 16, m >> 16);
+}
+
+Int128 clmul_16x4_even_gen(Int128 n, Int128 m)
+{
+    uint64_t rl, rh;
+
+    rl = clmul_16x2_even_gen(int128_getlo(n), int128_getlo(m));
+    rh = clmul_16x2_even_gen(int128_gethi(n), int128_gethi(m));
+    return int128_make128(rl, rh);
+}
+
+Int128 clmul_16x4_odd_gen(Int128 n, Int128 m)
+{
+    uint64_t rl, rh;
+
+    rl = clmul_16x2_odd_gen(int128_getlo(n), int128_getlo(m));
+    rh = clmul_16x2_odd_gen(int128_gethi(n), int128_gethi(m));
+    return int128_make128(rl, rh);
+}
-- 
2.34.1