The implementation was validated with OpenSSL and with the test vectors in
https://github.com/rust-lang/stdarch/blob/master/crates/core_arch/src/x86/sha.rs.
The instructions provide a ~25% improvement on hashing a 64 MiB file:
runtime goes down from 1.8 seconds to 1.4 seconds; instruction count on
the host goes down from 5.8 billion to 4.8 billion with slightly better
IPC too. Good job Intel. ;)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/cpu.c | 2 +-
target/i386/ops_sse.h | 128 +++++++++++++++++++++++++++
target/i386/tcg/decode-new.c.inc | 11 +++
target/i386/tcg/decode-new.h | 1 +
target/i386/tcg/emit.c.inc | 54 +++++++++++
target/i386/tcg/ops_sse_header.h.inc | 14 +++
6 files changed, 209 insertions(+), 1 deletion(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index bdca901dfaa..070c02000fe 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -714,7 +714,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_RDSEED | \
- CPUID_7_0_EBX_KERNEL_FEATURES)
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_KERNEL_FEATURES)
/* missing:
CPUID_7_0_EBX_HLE
CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM */
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 33908c0691f..6a465a35fdb 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -2527,6 +2527,134 @@ SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd)
SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd)
#endif
+#if SHIFT == 1
+#define SSE_HELPER_SHA1RNDS4(name, F, K) \
+ void name(Reg *d, Reg *a, Reg *b) \
+ { \
+ uint32_t A, B, C, D, E, t, i; \
+ \
+ A = a->L(3); \
+ B = a->L(2); \
+ C = a->L(1); \
+ D = a->L(0); \
+ E = 0; \
+ \
+ for (i = 0; i <= 3; i++) { \
+ t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \
+ E = D; \
+ D = C; \
+ C = rol32(B, 30); \
+ B = A; \
+ A = t; \
+ } \
+ \
+ d->L(3) = A; \
+ d->L(2) = B; \
+ d->L(1) = C; \
+ d->L(0) = D; \
+ }
+
+#define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d)))
+#define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d))
+#define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
+
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC)
+SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6)
+
+void helper_sha1nexte(Reg *d, Reg *a, Reg *b)
+{
+ d->L(3) = b->L(3) + rol32(a->L(3), 30);
+ d->L(2) = b->L(2);
+ d->L(1) = b->L(1);
+ d->L(0) = b->L(0);
+}
+
+void helper_sha1msg1(Reg *d, Reg *a, Reg *b)
+{
+ /* These could be overwritten by the first two assignments, save them. */
+ uint32_t b3 = b->L(3);
+ uint32_t b2 = b->L(2);
+
+ d->L(3) = a->L(3) ^ a->L(1);
+ d->L(2) = a->L(2) ^ a->L(0);
+ d->L(1) = a->L(1) ^ b3;
+ d->L(0) = a->L(0) ^ b2;
+}
+
+void helper_sha1msg2(Reg *d, Reg *a, Reg *b)
+{
+ d->L(3) = rol32(a->L(3) ^ b->L(2), 1);
+ d->L(2) = rol32(a->L(2) ^ b->L(1), 1);
+ d->L(1) = rol32(a->L(1) ^ b->L(0), 1);
+ d->L(0) = rol32(a->L(0) ^ d->L(3), 1);
+}
+
+#define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g)))
+#define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
+
+#define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22))
+#define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25))
+#define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3))
+#define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10))
+
+void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1)
+{
+ uint32_t t, AA, EE;
+
+ uint32_t A = b->L(3);
+ uint32_t B = b->L(2);
+ uint32_t C = a->L(3);
+ uint32_t D = a->L(2);
+ uint32_t E = b->L(1);
+ uint32_t F = b->L(0);
+ uint32_t G = a->L(1);
+ uint32_t H = a->L(0);
+
+ /* Even round */
+ t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H;
+ AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A);
+ EE = t + D;
+
+ /* These will be B and F at the end of the odd round */
+ d->L(2) = AA;
+ d->L(0) = EE;
+
+ D = C, C = B, B = A, A = AA;
+ H = G, G = F, F = E, E = EE;
+
+ /* Odd round */
+ t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H;
+ AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A);
+ EE = t + D;
+
+ d->L(3) = AA;
+ d->L(1) = EE;
+}
+
+void helper_sha256msg1(Reg *d, Reg *a, Reg *b)
+{
+ /* b->L(0) could be overwritten by the first assignment, save it. */
+ uint32_t b0 = b->L(0);
+
+ d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1));
+ d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2));
+ d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3));
+ d->L(3) = a->L(3) + SHA256_MSGS0(b0);
+}
+
+void helper_sha256msg2(Reg *d, Reg *a, Reg *b)
+{
+ /* Earlier assignments cannot overwrite any of the two operands. */
+ d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2));
+ d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3));
+ /* Yes, this reuses the previously computed values. */
+ d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0));
+ d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1));
+}
+#endif
+
#undef SSE_HELPER_S
#undef LANE_WIDTH
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 850271e0898..eb2400095f8 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -504,6 +504,13 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
[0xbe] = X86_OP_ENTRY3(VFNMSUB231Px, V,x, H,x, W,x, vex6 cpuid(FMA) p_66),
[0xbf] = X86_OP_ENTRY3(VFNMSUB231Sx, V,x, H,x, W,x, vex6 cpuid(FMA) p_66),
+ [0xc8] = X86_OP_ENTRY2(SHA1NEXTE, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xc9] = X86_OP_ENTRY2(SHA1MSG1, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xca] = X86_OP_ENTRY2(SHA1MSG2, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcb] = X86_OP_ENTRY2(SHA256RNDS2, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcc] = X86_OP_ENTRY2(SHA256MSG1, V,dq, W,dq, cpuid(SHA_NI)),
+ [0xcd] = X86_OP_ENTRY2(SHA256MSG2, V,dq, W,dq, cpuid(SHA_NI)),
+
[0xdb] = X86_OP_ENTRY3(VAESIMC, V,dq, None,None, W,dq, vex4 cpuid(AES) p_66),
[0xdc] = X86_OP_ENTRY3(VAESENC, V,x, H,x, W,x, vex4 cpuid(AES) p_66),
[0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66),
@@ -653,6 +660,8 @@ static const X86OpEntry opcodes_0F3A[256] = {
[0x4b] = X86_OP_ENTRY4(VBLENDVPD, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66),
[0x4c] = X86_OP_ENTRY4(VPBLENDVB, V,x, H,x, W,x, vex6 chk(W0) cpuid(AVX) p_66 avx2_256),
+ [0xcc] = X86_OP_ENTRY3(SHA1RNDS4, V,dq, W,dq, I,b, cpuid(SHA_NI)),
+
[0xdf] = X86_OP_ENTRY3(VAESKEYGEN, V,dq, W,dq, I,b, vex4 cpuid(AES) p_66),
[0xF0] = X86_OP_ENTRY3(RORX, G,y, E,y, I,b, vex13 cpuid(BMI2) p_f2),
@@ -1500,6 +1509,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2);
case X86_FEAT_AVX2:
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2);
+ case X86_FEAT_SHA_NI:
+ return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI);
}
g_assert_not_reached();
}
diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h
index ae987dfe0ba..ab21fa6db97 100644
--- a/target/i386/tcg/decode-new.h
+++ b/target/i386/tcg/decode-new.h
@@ -108,6 +108,7 @@ typedef enum X86CPUIDFeature {
X86_FEAT_FMA,
X86_FEAT_MOVBE,
X86_FEAT_PCLMULQDQ,
+ X86_FEAT_SHA_NI,
X86_FEAT_SSE,
X86_FEAT_SSE2,
X86_FEAT_SSE3,
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 7c36cf8a6df..82da5488d47 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1796,6 +1796,60 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
tcg_gen_sar_tl(s->T0, s->T0, s->T1);
}
+static void gen_SHA1NEXTE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1msg1(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha1msg2(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA1RNDS4(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ switch(decode->immediate & 3) {
+ case 0:
+ gen_helper_sha1rnds4_f0(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 1:
+ gen_helper_sha1rnds4_f1(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 2:
+ gen_helper_sha1rnds4_f2(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ case 3:
+ gen_helper_sha1rnds4_f3(OP_PTR0, OP_PTR0, OP_PTR1);
+ break;
+ }
+}
+
+static void gen_SHA256MSG1(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha256msg1(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA256MSG2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ gen_helper_sha256msg2(OP_PTR0, OP_PTR1, OP_PTR2);
+}
+
+static void gen_SHA256RNDS2(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
+{
+ TCGv_i32 wk0 = tcg_temp_new_i32();
+ TCGv_i32 wk1 = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(wk0, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_ld_i32(wk1, tcg_env, ZMM_OFFSET(0) + offsetof(ZMMReg, ZMM_L(1)));
+
+ gen_helper_sha256rnds2(OP_PTR0, OP_PTR1, OP_PTR2, wk0, wk1);
+}
+
static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
MemOp ot = decode->op[0].ot;
diff --git a/target/i386/tcg/ops_sse_header.h.inc b/target/i386/tcg/ops_sse_header.h.inc
index 8a7b2f4e2f6..d92c6faf6d6 100644
--- a/target/i386/tcg/ops_sse_header.h.inc
+++ b/target/i386/tcg/ops_sse_header.h.inc
@@ -399,6 +399,20 @@ DEF_HELPER_3(vpermq_ymm, void, Reg, Reg, i32)
#endif
#endif
+/* SHA helpers */
+#if SHIFT == 1
+DEF_HELPER_3(sha1rnds4_f0, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f2, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1rnds4_f3, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1nexte, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1msg1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha1msg2, void, Reg, Reg, Reg)
+DEF_HELPER_5(sha256rnds2, void, Reg, Reg, Reg, i32, i32)
+DEF_HELPER_3(sha256msg1, void, Reg, Reg, Reg)
+DEF_HELPER_3(sha256msg2, void, Reg, Reg, Reg)
+#endif
+
#undef SHIFT
#undef Reg
#undef SUFFIX
--
2.41.0
Hi Paolo, On 19/10/23 12:46, Paolo Bonzini wrote: > The implementation was validated with OpenSSL and with the test vectors in > https://github.com/rust-lang/stdarch/blob/master/crates/core_arch/src/x86/sha.rs. > > The instructions provide a ~25% improvement on hashing a 64 MiB file: > runtime goes down from 1.8 seconds to 1.4 seconds; instruction count on > the host goes down from 5.8 billion to 4.8 billion with slightly better > IPC too. Good job Intel. ;) > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > target/i386/cpu.c | 2 +- > target/i386/ops_sse.h | 128 +++++++++++++++++++++++++++ > target/i386/tcg/decode-new.c.inc | 11 +++ > target/i386/tcg/decode-new.h | 1 + > target/i386/tcg/emit.c.inc | 54 +++++++++++ > target/i386/tcg/ops_sse_header.h.inc | 14 +++ > 6 files changed, 209 insertions(+), 1 deletion(-) > diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h > index 33908c0691f..6a465a35fdb 100644 > --- a/target/i386/ops_sse.h > +++ b/target/i386/ops_sse.h > @@ -2527,6 +2527,134 @@ SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) > SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) > #endif > > +#if SHIFT == 1 > +#define SSE_HELPER_SHA1RNDS4(name, F, K) \ > + void name(Reg *d, Reg *a, Reg *b) \ > + { \ > + uint32_t A, B, C, D, E, t, i; \ > + \ > + A = a->L(3); \ > + B = a->L(2); \ > + C = a->L(1); \ > + D = a->L(0); \ > + E = 0; \ > + \ > + for (i = 0; i <= 3; i++) { \ > + t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \ > + E = D; \ > + D = C; \ > + C = rol32(B, 30); \ > + B = A; \ > + A = t; \ > + } \ > + \ > + d->L(3) = A; \ > + d->L(2) = B; \ > + d->L(1) = C; \ > + d->L(0) = D; \ > + } > + > +#define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d))) > +#define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d)) > +#define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d))) > + > +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999) > +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1) > +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC) > +SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6) > + > +void helper_sha1nexte(Reg *d, Reg *a, Reg *b) > +{ > + d->L(3) = b->L(3) + rol32(a->L(3), 30); > + d->L(2) = b->L(2); > + d->L(1) = b->L(1); > + d->L(0) = b->L(0); > +} > + > +void helper_sha1msg1(Reg *d, Reg *a, Reg *b) > +{ > + /* These could be overwritten by the first two assignments, save them. */ > + uint32_t b3 = b->L(3); > + uint32_t b2 = b->L(2); > + > + d->L(3) = a->L(3) ^ a->L(1); > + d->L(2) = a->L(2) ^ a->L(0); > + d->L(1) = a->L(1) ^ b3; > + d->L(0) = a->L(0) ^ b2; > +} > + > +void helper_sha1msg2(Reg *d, Reg *a, Reg *b) > +{ > + d->L(3) = rol32(a->L(3) ^ b->L(2), 1); > + d->L(2) = rol32(a->L(2) ^ b->L(1), 1); > + d->L(1) = rol32(a->L(1) ^ b->L(0), 1); > + d->L(0) = rol32(a->L(0) ^ d->L(3), 1); > +} > + > +#define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g))) > +#define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c))) > + > +#define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22)) > +#define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25)) > +#define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3)) > +#define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10)) > + > +void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1) > +{ > + uint32_t t, AA, EE; > + > + uint32_t A = b->L(3); > + uint32_t B = b->L(2); > + uint32_t C = a->L(3); > + uint32_t D = a->L(2); > + uint32_t E = b->L(1); > + uint32_t F = b->L(0); > + uint32_t G = a->L(1); > + uint32_t H = a->L(0); > + > + /* Even round */ > + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H; > + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); > + EE = t + D; > + > + /* These will be B and F at the end of the odd round */ > + d->L(2) = AA; > + d->L(0) = EE; > + > + D = C, C = B, B = A, A = AA; > + H = G, G = F, F = E, E = EE; > + > + /* Odd round */ > + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H; > + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); > + EE = t + D; Better would be to implement that generically, so we can reuse host crypto accelerators when available. Can be done later... (See commit range fb250c59aa..ff494c8e2a for example.) > + d->L(3) = AA; > + d->L(1) = EE; > +} > + > +void helper_sha256msg1(Reg *d, Reg *a, Reg *b) > +{ > + /* b->L(0) could be overwritten by the first assignment, save it. */ > + uint32_t b0 = b->L(0); > + > + d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1)); > + d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2)); > + d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3)); > + d->L(3) = a->L(3) + SHA256_MSGS0(b0); > +} > + > +void helper_sha256msg2(Reg *d, Reg *a, Reg *b) > +{ > + /* Earlier assignments cannot overwrite any of the two operands. */ > + d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2)); > + d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3)); > + /* Yes, this reuses the previously computed values. */ > + d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0)); > + d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1)); > +} > +#endif
On 10/19/23 12:59, Philippe Mathieu-Daudé wrote: >> >> + /* Even round */ >> + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H; >> + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); >> + EE = t + D; >> + >> + /* These will be B and F at the end of the odd round */ >> + d->L(2) = AA; >> + d->L(0) = EE; >> + >> + D = C, C = B, B = A, A = AA; >> + H = G, G = F, F = E, E = EE; >> + >> + /* Odd round */ >> + t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H; >> + AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); >> + EE = t + D; > > Better would be to implement that generically, so we can reuse > host crypto accelerators when available. Can be done later... > (See commit range fb250c59aa..ff494c8e2a for example.) ARM extensions are probably too different from x86. ARM does four rounds per instructions, while x86 does two. And Intel passes ABEF/CDGH in the arguments, while ARM passes ABCD/EFGH. Paolo
© 2016 - 2024 Red Hat, Inc.