Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/crypto/clmul.h | 16 ++++++++++++++++
crypto/clmul.c | 21 +++++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
index 153b5e3057..72672b237c 100644
--- a/include/crypto/clmul.h
+++ b/include/crypto/clmul.h
@@ -38,4 +38,20 @@ uint64_t clmul_8x4_odd(uint64_t, uint64_t);
*/
uint64_t clmul_8x4_packed(uint32_t, uint32_t);
+/**
+ * clmul_16x2_even:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The odd words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_even(uint64_t, uint64_t);
+
+/**
+ * clmul_16x2_odd:
+ *
+ * Perform two 16x16->32 carry-less multiplies.
+ * The even words of the inputs are ignored.
+ */
+uint64_t clmul_16x2_odd(uint64_t, uint64_t);
+
#endif /* CRYPTO_CLMUL_H */
diff --git a/crypto/clmul.c b/crypto/clmul.c
index 82d873fee5..2c87cfbf8a 100644
--- a/crypto/clmul.c
+++ b/crypto/clmul.c
@@ -58,3 +58,24 @@ uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
{
return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
}
+
+uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
+{
+ uint64_t r = 0;
+
+ n &= 0x0000ffff0000ffffull;
+ m &= 0x0000ffff0000ffffull;
+
+ for (int i = 0; i < 16; ++i) {
+ uint64_t mask = (n & 0x0000000100000001ull) * 0xffffffffull;
+ r ^= m & mask;
+ n >>= 1;
+ m <<= 1;
+ }
+ return r;
+}
+
+uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
+{
+ return clmul_16x2_even(n >> 16, m >> 16);
+}
--
2.34.1