Now that all callers of the aes_encrypt() and aes_decrypt() type-generic
macros are using the new types, remove the old functions.
Then, replace the macro with direct calls to the new functions, dropping
the "_new" suffix from them.
This completes the change in the type of the key struct that is passed
to aes_encrypt() and aes_decrypt().
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
include/crypto/aes.h | 24 ++-------
lib/crypto/aes.c | 118 +++----------------------------------------
2 files changed, 10 insertions(+), 132 deletions(-)
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 4cb3c27d1bf5..cbf1cc96db52 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -306,39 +306,23 @@ typedef union {
* @out: Buffer to store the ciphertext block
* @in: Buffer containing the plaintext block
*
* Context: Any context.
*/
-#define aes_encrypt(key, out, in) \
- _Generic((key), \
- struct crypto_aes_ctx *: aes_encrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- const struct crypto_aes_ctx *: aes_encrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- struct aes_enckey *: aes_encrypt_new((const struct aes_enckey *)(key), (out), (in)), \
- const struct aes_enckey *: aes_encrypt_new((const struct aes_enckey *)(key), (out), (in)), \
- struct aes_key *: aes_encrypt_new((const struct aes_key *)(key), (out), (in)), \
- const struct aes_key *: aes_encrypt_new((const struct aes_key *)(key), (out), (in)))
-void aes_encrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
-void aes_encrypt_new(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
- const u8 in[at_least AES_BLOCK_SIZE]);
+void aes_encrypt(aes_encrypt_arg key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
/**
* aes_decrypt() - Decrypt a single AES block
* @key: The AES key, previously initialized by aes_preparekey()
* @out: Buffer to store the plaintext block
* @in: Buffer containing the ciphertext block
*
* Context: Any context.
*/
-#define aes_decrypt(key, out, in) \
- _Generic((key), \
- struct crypto_aes_ctx *: aes_decrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- const struct crypto_aes_ctx *: aes_decrypt_old((const struct crypto_aes_ctx *)(key), (out), (in)), \
- struct aes_key *: aes_decrypt_new((const struct aes_key *)(key), (out), (in)), \
- const struct aes_key *: aes_decrypt_new((const struct aes_key *)(key), (out), (in)))
-void aes_decrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
-void aes_decrypt_new(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
- const u8 in[at_least AES_BLOCK_SIZE]);
+void aes_decrypt(const struct aes_key *key, u8 out[at_least AES_BLOCK_SIZE],
+ const u8 in[at_least AES_BLOCK_SIZE]);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
extern const u32 aes_enc_tab[256];
extern const u32 aes_dec_tab[256];
diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c
index 88da68dcf5a8..b7ab2d0c4e59 100644
--- a/lib/crypto/aes.c
+++ b/lib/crypto/aes.c
@@ -249,26 +249,10 @@ static u32 inv_mix_columns(u32 x)
u32 y = mul_by_x2(x);
return mix_columns(x ^ y ^ ror32(y, 16));
}
-static __always_inline u32 subshift(u32 in[], int pos)
-{
- return (aes_sbox[in[pos] & 0xff]) ^
- (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
- (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
- (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
-}
-
-static __always_inline u32 inv_subshift(u32 in[], int pos)
-{
- return (aes_inv_sbox[in[pos] & 0xff]) ^
- (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
- (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
- (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
-}
-
static u32 subw(u32 in)
{
return (aes_sbox[in & 0xff]) ^
(aes_sbox[(in >> 8) & 0xff] << 8) ^
(aes_sbox[(in >> 16) & 0xff] << 16) ^
@@ -343,55 +327,10 @@ int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
aes_expandkey_generic(ctx->key_enc, ctx->key_dec, in_key, key_len);
return 0;
}
EXPORT_SYMBOL(aes_expandkey);
-void aes_encrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
-{
- const u32 *rkp = ctx->key_enc + 4;
- int rounds = 6 + ctx->key_length / 4;
- u32 st0[4], st1[4];
- int round;
-
- st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
- st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
- st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
- st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
-
- /*
- * Force the compiler to emit data independent Sbox references,
- * by xoring the input with Sbox values that are known to add up
- * to zero. This pulls the entire Sbox into the D-cache before any
- * data dependent lookups are done.
- */
- st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
- st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
- st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
- st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
-
- for (round = 0;; round += 2, rkp += 8) {
- st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
- st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
- st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
- st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
-
- if (round == rounds - 2)
- break;
-
- st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
- st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
- st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
- st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
- }
-
- put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
- put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
- put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
- put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
-}
-EXPORT_SYMBOL(aes_encrypt_old);
-
static __always_inline u32 enc_quarterround(const u32 w[4], int i, u32 rk)
{
return rk ^ aes_enc_tab[(u8)w[i]] ^
rol32(aes_enc_tab[(u8)(w[(i + 1) % 4] >> 8)], 8) ^
rol32(aes_enc_tab[(u8)(w[(i + 2) % 4] >> 16)], 16) ^
@@ -496,55 +435,10 @@ static void __maybe_unused aes_decrypt_generic(const u32 inv_rndkeys[],
put_unaligned_le32(declast_quarterround(w, 1, *rkp++), &out[4]);
put_unaligned_le32(declast_quarterround(w, 2, *rkp++), &out[8]);
put_unaligned_le32(declast_quarterround(w, 3, *rkp++), &out[12]);
}
-void aes_decrypt_old(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
-{
- const u32 *rkp = ctx->key_dec + 4;
- int rounds = 6 + ctx->key_length / 4;
- u32 st0[4], st1[4];
- int round;
-
- st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
- st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
- st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
- st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
-
- /*
- * Force the compiler to emit data independent Sbox references,
- * by xoring the input with Sbox values that are known to add up
- * to zero. This pulls the entire Sbox into the D-cache before any
- * data dependent lookups are done.
- */
- st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
- st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
- st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
- st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
-
- for (round = 0;; round += 2, rkp += 8) {
- st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
- st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
- st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
- st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
-
- if (round == rounds - 2)
- break;
-
- st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
- st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
- st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
- st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
- }
-
- put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
- put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
- put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
- put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
-}
-EXPORT_SYMBOL(aes_decrypt_old);
-
/*
* Note: the aes_prepare*key_* names reflect the fact that the implementation
* might not actually expand the key. (The s390 code for example doesn't.)
* Where the key is expanded we use the more specific names aes_expandkey_*.
*
@@ -606,23 +500,23 @@ int aes_prepareenckey(struct aes_enckey *key, const u8 *in_key, size_t key_len)
{
return __aes_preparekey(key, NULL, in_key, key_len);
}
EXPORT_SYMBOL(aes_prepareenckey);
-void aes_encrypt_new(aes_encrypt_arg key, u8 out[AES_BLOCK_SIZE],
- const u8 in[AES_BLOCK_SIZE])
+void aes_encrypt(aes_encrypt_arg key, u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
{
aes_encrypt_arch(key.enc_key, out, in);
}
-EXPORT_SYMBOL(aes_encrypt_new);
+EXPORT_SYMBOL(aes_encrypt);
-void aes_decrypt_new(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
- const u8 in[AES_BLOCK_SIZE])
+void aes_decrypt(const struct aes_key *key, u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
{
aes_decrypt_arch(key, out, in);
}
-EXPORT_SYMBOL(aes_decrypt_new);
+EXPORT_SYMBOL(aes_decrypt);
#ifdef aes_mod_init_arch
static int __init aes_mod_init(void)
{
aes_mod_init_arch();
--
2.52.0
The following patch adds a kunit tests for the aes library. It does a very minimal verification of the aes operation for all key-sizes. The benchmarks, which are also part of the test-suite, can be used to get some rough performance measurements of the aes encrypt and decrypt functions. The aes_prepare*key() APIs are not covered by the benchmarks. Example output of the aes_kunit test-suite: [ 44.748194] KTAP version 1 [ 44.748199] 1..1 [ 44.748240] KTAP version 1 [ 44.748241] # Subtest: aes [ 44.748242] # module: aes_kunit [ 44.748244] 1..9 [ 44.748304] ok 1 aes128_kat_encrypt [ 44.748365] ok 2 aes128_kat_decrypt [ 44.748417] ok 3 aes192_kat_encrypt [ 44.748482] ok 4 aes192_kat_decrypt [ 44.748528] ok 5 aes256_kat_encrypt [ 44.748583] ok 6 aes256_kat_decrypt [ 45.466878] # aes128_benchmark: enc (iter. 10000000, duration 359887225ns) [ 45.466881] # aes128_benchmark: enc (len=16): 423 MB/s [ 45.466883] # aes128_benchmark: dec (iter. 10000000, duration 358322328ns) [ 45.466885] # aes128_benchmark: dec (len=16): 425 MB/s [ 45.466921] ok 7 aes128_benchmark [ 46.205717] # aes192_benchmark: enc (iter. 10000000, duration 367953960ns) [ 46.205720] # aes192_benchmark: enc (len=16): 414 MB/s [ 46.205722] # aes192_benchmark: dec (iter. 10000000, duration 370756491ns) [ 46.205724] # aes192_benchmark: dec (len=16): 411 MB/s [ 46.205752] ok 8 aes192_benchmark [ 46.974536] # aes256_benchmark: enc (iter. 10000000, duration 386414949ns) [ 46.974539] # aes256_benchmark: enc (len=16): 394 MB/s [ 46.974541] # aes256_benchmark: dec (iter. 10000000, duration 382280549ns) [ 46.974542] # aes256_benchmark: dec (len=16): 399 MB/s [ 46.974716] ok 9 aes256_benchmark [ 46.974719] # aes: pass:9 fail:0 skip:0 total:9 [ 46.974721] # Totals: pass:9 fail:0 skip:0 total:9 [ 46.974724] ok 1 aes Holger Dengler (1): lib/crypto: tests: Add KUnit tests for AES lib/crypto/tests/Kconfig | 12 ++++ lib/crypto/tests/Makefile | 1 + lib/crypto/tests/aes-testvecs.h | 78 ++++++++++++++++++++++ lib/crypto/tests/aes_kunit.c | 115 ++++++++++++++++++++++++++++++++ 4 files changed, 206 insertions(+) create mode 100644 lib/crypto/tests/aes-testvecs.h create mode 100644 lib/crypto/tests/aes_kunit.c -- 2.51.0
Add a KUnit test suite for AES library functions, including KAT and
benchmarks.
Signed-off-by: Holger Dengler <dengler@linux.ibm.com>
---
lib/crypto/tests/Kconfig | 12 ++++
lib/crypto/tests/Makefile | 1 +
lib/crypto/tests/aes-testvecs.h | 78 ++++++++++++++++++++++
lib/crypto/tests/aes_kunit.c | 115 ++++++++++++++++++++++++++++++++
4 files changed, 206 insertions(+)
create mode 100644 lib/crypto/tests/aes-testvecs.h
create mode 100644 lib/crypto/tests/aes_kunit.c
diff --git a/lib/crypto/tests/Kconfig b/lib/crypto/tests/Kconfig
index 4970463ea0aa..f34e79093275 100644
--- a/lib/crypto/tests/Kconfig
+++ b/lib/crypto/tests/Kconfig
@@ -118,6 +118,18 @@ config CRYPTO_LIB_SHA3_KUNIT_TEST
including SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128 and
SHAKE256.
+config CRYPTO_LIB_AES_KUNIT_TEST
+ tristate "KUnit tests for AES" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+ select CRYPTO_LIB_AES
+ help
+ KUnit tests for the AES library functions, including known answer
+ tests and benchmarks for encrypt/decrypt with all key sizes. The
+ test suite does not contain any key generation test, nor any error
+ cases.
+
config CRYPTO_LIB_BENCHMARK_VISIBLE
bool
diff --git a/lib/crypto/tests/Makefile b/lib/crypto/tests/Makefile
index f4262379f56c..72234e965cdc 100644
--- a/lib/crypto/tests/Makefile
+++ b/lib/crypto/tests/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_CRYPTO_LIB_SHA1_KUNIT_TEST) += sha1_kunit.o
obj-$(CONFIG_CRYPTO_LIB_SHA256_KUNIT_TEST) += sha224_kunit.o sha256_kunit.o
obj-$(CONFIG_CRYPTO_LIB_SHA512_KUNIT_TEST) += sha384_kunit.o sha512_kunit.o
obj-$(CONFIG_CRYPTO_LIB_SHA3_KUNIT_TEST) += sha3_kunit.o
+obj-$(CONFIG_CRYPTO_LIB_AES_KUNIT_TEST) += aes_kunit.o
diff --git a/lib/crypto/tests/aes-testvecs.h b/lib/crypto/tests/aes-testvecs.h
new file mode 100644
index 000000000000..2bfa646ff2e5
--- /dev/null
+++ b/lib/crypto/tests/aes-testvecs.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _AES_TESTVECS_H
+#define _AES_TESTVECS_H
+
+#include <crypto/aes.h>
+
+struct buf {
+ size_t blen;
+ u8 b[];
+};
+
+struct kat {
+ u8 plain[AES_BLOCK_SIZE];
+ u8 cipher[AES_BLOCK_SIZE];
+ struct {
+ size_t len;
+ u8 b[32];
+ } key;
+};
+
+static const struct kat AES128_KAT = {
+ .plain = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ },
+ .cipher = {
+ 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60,
+ 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97,
+ },
+ .key = {
+ .len = 16,
+ .b = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c,
+ },
+ },
+};
+
+static const struct kat AES192_KAT = {
+ .plain = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ },
+ .cipher = {
+ 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f,
+ 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc,
+ },
+ .key = {
+ .len = 24,
+ .b = {
+ 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52,
+ 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5,
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b,
+ },
+ },
+};
+
+static const struct kat AES256_KAT = {
+ .plain = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ },
+ .cipher = {
+ 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c,
+ 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8,
+ },
+ .key = {
+ .len = 32,
+ .b = {
+ 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe,
+ 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81,
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7,
+ 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4,
+ },
+ },
+};
+
+#endif /* _AES_TESTVECS_H */
diff --git a/lib/crypto/tests/aes_kunit.c b/lib/crypto/tests/aes_kunit.c
new file mode 100644
index 000000000000..057ddc3a1b1f
--- /dev/null
+++ b/lib/crypto/tests/aes_kunit.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+
+#include "aes-testvecs.h"
+
+#define AES_KAT(bits, func, from, to) \
+static void aes##bits##_kat_##func(struct kunit *test) \
+{ \
+ const u8 *in = AES##bits##_KAT.from; \
+ u8 out[AES_BLOCK_SIZE]; \
+ struct aes_key aes_key; \
+ \
+ if (aes_preparekey(&aes_key, AES##bits##_KAT.key.b, \
+ AES##bits##_KAT.key.len)) \
+ kunit_skip(test, "no key"); \
+ \
+ aes_##func(&aes_key, out, in); \
+ KUNIT_ASSERT_MEMEQ(test, out, AES##bits##_KAT.to, \
+ sizeof(out)); \
+}
+
+#define KB (1024)
+#define MB (KB * KB)
+#define NS_PER_SEC (1000000000ULL)
+
+#define AES_BENCHMARK(bits) \
+static void aes##bits##_benchmark(struct kunit *test) \
+{ \
+ const size_t num_iters = 10000000; \
+ const u8 *cipher = AES##bits##_KAT.cipher; \
+ const u8 *plain = AES##bits##_KAT.plain; \
+ u8 out[AES_BLOCK_SIZE]; \
+ struct aes_key aes_key; \
+ u64 t_enc, t_dec; \
+ \
+ if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK)) \
+ kunit_skip(test, "not enabled"); \
+ \
+ if (aes_preparekey(&aes_key, AES##bits##_KAT.key.b, \
+ AES##bits##_KAT.key.len)) \
+ kunit_skip(test, "no key"); \
+ \
+ /* warm-up enc */ \
+ for (size_t i = 0; i < 1000; i++) \
+ aes_encrypt(&aes_key, out, plain); \
+ \
+ preempt_disable(); \
+ t_enc = ktime_get_ns(); \
+ \
+ for (size_t i = 0; i < num_iters; i++) \
+ aes_encrypt(&aes_key, out, plain); \
+ \
+ t_enc = ktime_get_ns() - t_enc; \
+ preempt_enable(); \
+ \
+ /* warm-up dec */ \
+ for (size_t i = 0; i < 1000; i++) \
+ aes_decrypt(&aes_key, out, cipher); \
+ \
+ preempt_disable(); \
+ t_dec = ktime_get_ns(); \
+ \
+ for (size_t i = 0; i < num_iters; i++) \
+ aes_decrypt(&aes_key, out, cipher); \
+ \
+ t_dec = ktime_get_ns() - t_dec; \
+ preempt_enable(); \
+ \
+ kunit_info(test, "enc (iter. %zu, duration %lluns)", \
+ num_iters, t_enc); \
+ kunit_info(test, "enc (len=%zu): %llu MB/s", \
+ (size_t)AES_BLOCK_SIZE, \
+ div64_u64((u64)AES_BLOCK_SIZE * num_iters * NS_PER_SEC, \
+ (t_enc ?: 1) * MB)); \
+ \
+ kunit_info(test, "dec (iter. %zu, duration %lluns)", \
+ num_iters, t_dec); \
+ kunit_info(test, "dec (len=%zu): %llu MB/s", \
+ (size_t)AES_BLOCK_SIZE, \
+ div64_u64((u64)AES_BLOCK_SIZE * num_iters * NS_PER_SEC, \
+ (t_dec ?: 1) * MB)); \
+}
+
+AES_KAT(128, encrypt, plain, cipher);
+AES_KAT(192, encrypt, plain, cipher);
+AES_KAT(256, encrypt, plain, cipher);
+AES_KAT(128, decrypt, cipher, plain);
+AES_KAT(192, decrypt, cipher, plain);
+AES_KAT(256, decrypt, cipher, plain);
+AES_BENCHMARK(128);
+AES_BENCHMARK(192);
+AES_BENCHMARK(256);
+
+static struct kunit_case aes_test_cases[] = {
+ KUNIT_CASE(aes128_kat_encrypt),
+ KUNIT_CASE(aes128_kat_decrypt),
+ KUNIT_CASE(aes192_kat_encrypt),
+ KUNIT_CASE(aes192_kat_decrypt),
+ KUNIT_CASE(aes256_kat_encrypt),
+ KUNIT_CASE(aes256_kat_decrypt),
+ KUNIT_CASE(aes128_benchmark),
+ KUNIT_CASE(aes192_benchmark),
+ KUNIT_CASE(aes256_benchmark),
+ {},
+};
+
+static struct kunit_suite aes_test_suite = {
+ .name = "aes",
+ .test_cases = aes_test_cases,
+};
+
+kunit_test_suite(aes_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests and benchmark aes library");
+MODULE_LICENSE("GPL");
--
2.51.0
Thanks for writing this!
On Wed, Jan 14, 2026 at 04:31:38PM +0100, Holger Dengler wrote:
> diff --git a/lib/crypto/tests/Kconfig b/lib/crypto/tests/Kconfig
> index 4970463ea0aa..f34e79093275 100644
> --- a/lib/crypto/tests/Kconfig
> +++ b/lib/crypto/tests/Kconfig
> @@ -118,6 +118,18 @@ config CRYPTO_LIB_SHA3_KUNIT_TEST
> including SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128 and
> SHAKE256.
>
> +config CRYPTO_LIB_AES_KUNIT_TEST
> + tristate "KUnit tests for AES" if !KUNIT_ALL_TESTS
> + depends on KUNIT
> + default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
> + select CRYPTO_LIB_BENCHMARK_VISIBLE
> + select CRYPTO_LIB_AES
> + help
> + KUnit tests for the AES library functions, including known answer
> + tests and benchmarks for encrypt/decrypt with all key sizes. The
> + test suite does not contain any key generation test, nor any error
> + cases.
It should go first in the file, to maintain the existing alphabetical
order.
> diff --git a/lib/crypto/tests/Makefile b/lib/crypto/tests/Makefile
> index f4262379f56c..72234e965cdc 100644
> --- a/lib/crypto/tests/Makefile
> +++ b/lib/crypto/tests/Makefile
> @@ -12,3 +12,4 @@ obj-$(CONFIG_CRYPTO_LIB_SHA1_KUNIT_TEST) += sha1_kunit.o
> obj-$(CONFIG_CRYPTO_LIB_SHA256_KUNIT_TEST) += sha224_kunit.o sha256_kunit.o
> obj-$(CONFIG_CRYPTO_LIB_SHA512_KUNIT_TEST) += sha384_kunit.o sha512_kunit.o
> obj-$(CONFIG_CRYPTO_LIB_SHA3_KUNIT_TEST) += sha3_kunit.o
> +obj-$(CONFIG_CRYPTO_LIB_AES_KUNIT_TEST) += aes_kunit.o
Likewise in the Makefile.
> diff --git a/lib/crypto/tests/aes_kunit.c b/lib/crypto/tests/aes_kunit.c
> new file mode 100644
> index 000000000000..057ddc3a1b1f
> --- /dev/null
> +++ b/lib/crypto/tests/aes_kunit.c
> @@ -0,0 +1,115 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <kunit/test.h>
> +
> +#include "aes-testvecs.h"
> +
> +#define AES_KAT(bits, func, from, to) \
> +static void aes##bits##_kat_##func(struct kunit *test) \
> +{ \
> + const u8 *in = AES##bits##_KAT.from; \
> + u8 out[AES_BLOCK_SIZE]; \
> + struct aes_key aes_key; \
> + \
> + if (aes_preparekey(&aes_key, AES##bits##_KAT.key.b, \
> + AES##bits##_KAT.key.len)) \
> + kunit_skip(test, "no key"); \
Skipping on failure seems wrong.
> +#define KB (1024)
> +#define MB (KB * KB)
> +#define NS_PER_SEC (1000000000ULL)
If you'd like to use named constants for these, note that the kernel
headers already have constants SZ_1K, SZ_1M, and NSEC_PER_SEC. So these
local definitions aren't needed.
> +
> +#define AES_BENCHMARK(bits) \
> +static void aes##bits##_benchmark(struct kunit *test) \
> +{ \
> + const size_t num_iters = 10000000; \
> + const u8 *cipher = AES##bits##_KAT.cipher; \
> + const u8 *plain = AES##bits##_KAT.plain; \
> + u8 out[AES_BLOCK_SIZE]; \
> + struct aes_key aes_key; \
> + u64 t_enc, t_dec; \
> + \
> + if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK)) \
> + kunit_skip(test, "not enabled"); \
> + \
> + if (aes_preparekey(&aes_key, AES##bits##_KAT.key.b, \
> + AES##bits##_KAT.key.len)) \
> + kunit_skip(test, "no key"); \
> + \
> + /* warm-up enc */ \
> + for (size_t i = 0; i < 1000; i++) \
> + aes_encrypt(&aes_key, out, plain); \
> + \
> + preempt_disable(); \
> + t_enc = ktime_get_ns(); \
> + \
> + for (size_t i = 0; i < num_iters; i++) \
> + aes_encrypt(&aes_key, out, plain); \
> + \
> + t_enc = ktime_get_ns() - t_enc; \
> + preempt_enable(); \
> + \
> + /* warm-up dec */ \
> + for (size_t i = 0; i < 1000; i++) \
> + aes_decrypt(&aes_key, out, cipher); \
> + \
> + preempt_disable(); \
> + t_dec = ktime_get_ns(); \
> + \
> + for (size_t i = 0; i < num_iters; i++) \
> + aes_decrypt(&aes_key, out, cipher); \
> + \
> + t_dec = ktime_get_ns() - t_dec; \
> + preempt_enable(); \
> + \
> + kunit_info(test, "enc (iter. %zu, duration %lluns)", \
> + num_iters, t_enc); \
> + kunit_info(test, "enc (len=%zu): %llu MB/s", \
> + (size_t)AES_BLOCK_SIZE, \
> + div64_u64((u64)AES_BLOCK_SIZE * num_iters * NS_PER_SEC, \
> + (t_enc ?: 1) * MB)); \
> + \
> + kunit_info(test, "dec (iter. %zu, duration %lluns)", \
> + num_iters, t_dec); \
> + kunit_info(test, "dec (len=%zu): %llu MB/s", \
> + (size_t)AES_BLOCK_SIZE, \
> + div64_u64((u64)AES_BLOCK_SIZE * num_iters * NS_PER_SEC, \
> + (t_dec ?: 1) * MB)); \
> +}
> +
> +AES_KAT(128, encrypt, plain, cipher);
> +AES_KAT(192, encrypt, plain, cipher);
> +AES_KAT(256, encrypt, plain, cipher);
> +AES_KAT(128, decrypt, cipher, plain);
> +AES_KAT(192, decrypt, cipher, plain);
> +AES_KAT(256, decrypt, cipher, plain);
> +AES_BENCHMARK(128);
> +AES_BENCHMARK(192);
> +AES_BENCHMARK(256);
The heavy use of macros doesn't seem that helpful here. The API is
already unified, where we have aes_preparekey(), aes_encrypt(), and
aes_decrypt() that handle all of AES-128, AES-192, and AES-256. So we
don't need entirely different code to test each variant.
We could just write helper functions, e.g. aes_test() and
aes_benchmark(). They would take in a pointer to a test vector, and the
individual KUnit case functions would call them.
See lib/crypto/tests/mldsa_kunit.c which does something similar.
- Eric
On 15/01/2026 00:04, Eric Biggers wrote: > Thanks for writing this! Thanks for your feedback. I'll send new version with all the changes. -- Mit freundlichen Grüßen / Kind regards Holger Dengler -- IBM Systems, Linux on IBM Z Development dengler@linux.ibm.com
© 2016 - 2026 Red Hat, Inc.