Switch from the old AES library functions (which use struct
crypto_aes_ctx) to the new ones (which use struct aes_enckey). This
eliminates the unnecessary computation and caching of the decryption
round keys. The new AES en/decryption functions are also much faster
and use AES instructions when supported by the CPU.
Note: aes_encrypt_new() will be renamed to aes_encrypt() once all
callers of the old aes_encrypt() have been updated.
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
arch/arm/crypto/ghash-ce-glue.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index a52dcc8c1e33..9ab03bce352d 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -202,24 +202,28 @@ int pmull_gcm_dec_final(int bytes, u64 dg[], char *tag,
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey,
unsigned int keylen)
{
struct gcm_key *ctx = crypto_aead_ctx(tfm);
- struct crypto_aes_ctx aes_ctx;
+ struct aes_enckey aes_key;
be128 h, k;
int ret;
- ret = aes_expandkey(&aes_ctx, inkey, keylen);
+ ret = aes_prepareenckey(&aes_key, inkey, keylen);
if (ret)
return -EINVAL;
- aes_encrypt(&aes_ctx, (u8 *)&k, (u8[AES_BLOCK_SIZE]){});
+ aes_encrypt_new(&aes_key, (u8 *)&k, (u8[AES_BLOCK_SIZE]){});
- memcpy(ctx->rk, aes_ctx.key_enc, sizeof(ctx->rk));
+ /*
+ * Note: this assumes that the arm implementation of the AES library
+ * stores the standard round keys in k.rndkeys.
+ */
+ memcpy(ctx->rk, aes_key.k.rndkeys, sizeof(ctx->rk));
ctx->rounds = 6 + keylen / 4;
- memzero_explicit(&aes_ctx, sizeof(aes_ctx));
+ memzero_explicit(&aes_key, sizeof(aes_key));
ghash_reflect(ctx->h[0], &k);
h = k;
gf128mul_lle(&h, &k);
--
2.52.0