Every field in struct aesni_xts_ctx is a pointer to a byte array. Each
array has a size of struct crypto_aes_ctx. Then, the field can be
redefined as that struct type instead of the obscure pointer.
Subsequently, the address to struct aesni_xts_ctx should be aligned
right away rather than on every access to the field.
Thus, redefine struct aesni_xts_ctx, and align its address on the
front. This draws a rework by refactoring the common alignment code.
Then, the refactored code itself appears to be useful to simplify
the overall runtime alignment. So, use it for other modes.
Suggested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
Changes from v7:
* Massage the helper function to be usable for other alignment code
such as aesni_rfc4106_gcm_ctx_get() and generic_gcmaes_ctx_get().
(Eric Biggers)
Changes from v6:
* Add as a new patch. (Eric Biggers)
This fix was considered to be better addressed before the preparatory
AES-NI code rework.
---
arch/x86/crypto/aesni-intel_glue.c | 51 +++++++++++++++---------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..589648142c17 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -61,8 +61,8 @@ struct generic_gcmaes_ctx {
};
struct aesni_xts_ctx {
- u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
- u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
};
#define GCM_BLOCK_LEN 16
@@ -80,6 +80,13 @@ struct gcm_context_data {
u8 hash_keys[GCM_BLOCK_LEN * 16];
};
+static inline void *aes_align_addr(void *addr)
+{
+ if (crypto_tfm_ctx_alignment() >= AESNI_ALIGN)
+ return addr;
+ return PTR_ALIGN(addr, AESNI_ALIGN);
+}
+
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
@@ -201,32 +208,24 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
- unsigned long align = AESNI_ALIGN;
-
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return PTR_ALIGN(crypto_aead_ctx(tfm), align);
+ return (struct aesni_rfc4106_gcm_ctx *)aes_align_addr(crypto_aead_ctx(tfm));
}
static inline struct
generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
{
- unsigned long align = AESNI_ALIGN;
-
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return PTR_ALIGN(crypto_aead_ctx(tfm), align);
+ return (struct generic_gcmaes_ctx *)aes_align_addr(crypto_aead_ctx(tfm));
}
#endif
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
- unsigned long addr = (unsigned long)raw_ctx;
- unsigned long align = AESNI_ALIGN;
+ return (struct crypto_aes_ctx *)aes_align_addr(raw_ctx);
+}
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return (struct crypto_aes_ctx *)ALIGN(addr, align);
+static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
+{
+ return (struct aesni_xts_ctx *)aes_align_addr(crypto_skcipher_ctx(tfm));
}
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
@@ -883,7 +882,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -893,20 +892,20 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+ err = aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->crypt_ctx,
key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+ return aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->tweak_ctx,
key + keylen, keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
@@ -942,7 +941,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
/* calculate first value of T */
- aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
+ aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
@@ -951,11 +950,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
@@ -983,11 +982,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
--
2.17.1
On Sat, Jun 03, 2023 at 08:22:25AM -0700, Chang S. Bae wrote:
> Every field in struct aesni_xts_ctx is a pointer to a byte array.
A byte array. Not a pointer to a byte array.
> Then, the field can be redefined as that struct type instead of the obscure
> pointer.
There's no pointer.
> static inline struct
> aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
> {
> - unsigned long align = AESNI_ALIGN;
> -
> - if (align <= crypto_tfm_ctx_alignment())
> - align = 1;
> - return PTR_ALIGN(crypto_aead_ctx(tfm), align);
> + return (struct aesni_rfc4106_gcm_ctx *)aes_align_addr(crypto_aead_ctx(tfm));
> }
Explicit casts from 'void *' are unnecessary.
> static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
> unsigned int keylen)
> {
> - struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
> + struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
> int err;
>
> err = xts_verify_key(tfm, key, keylen);
> @@ -893,20 +892,20 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
> keylen /= 2;
>
> /* first half of xts-key is for crypt */
> - err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
> + err = aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->crypt_ctx,
> key, keylen);
> if (err)
> return err;
>
> /* second half of xts-key is for tweak */
> - return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
> + return aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->tweak_ctx,
> key + keylen, keylen);
> }
To re-iterate what I said on v6, the runtime alignment to a 16-byte boundary
should happen when translating the raw crypto_skcipher_ctx() into the pointer to
the aes_xts_ctx. It should not happen when accessing each individual field in
the aes_xts_ctx.
Yet, this code is still doing runtime alignment when accessing each individual
field, as the second argument to aes_set_key_common() is 'void *raw_ctx' which
aes_set_key_common() runtime-aligns to crypto_aes_ctx.
We should keep everything consistent, which means making aes_set_key_common()
take a pointer to crypto_aes_ctx and not do the runtime alignment.
- Eric
On 6/4/2023 8:34 AM, Eric Biggers wrote: > > To re-iterate what I said on v6, the runtime alignment to a 16-byte boundary > should happen when translating the raw crypto_skcipher_ctx() into the pointer to > the aes_xts_ctx. It should not happen when accessing each individual field in > the aes_xts_ctx. > > Yet, this code is still doing runtime alignment when accessing each individual > field, as the second argument to aes_set_key_common() is 'void *raw_ctx' which > aes_set_key_common() runtime-aligns to crypto_aes_ctx. > > We should keep everything consistent, which means making aes_set_key_common() > take a pointer to crypto_aes_ctx and not do the runtime alignment. Let me clarify what is the problem this patch tried to solve here. The current struct aesni_xts_ctx is ugly. So, the main story is let's fix it before using the code for AES-KL. Then, the rework part may be applicable for code re-usability. That seems to be okay to do here. Fixing the runtime alignment entirely seems to be touching other code than AES-XTS. Yes, that's ideal cleanup for consistency. But, it seems to be less relevant in this series. I'd be happy to follow up on that improvement though. Thanks, Chang
On Sun, Jun 04, 2023 at 03:02:32PM -0700, Chang S. Bae wrote:
> On 6/4/2023 8:34 AM, Eric Biggers wrote:
> >
> > To re-iterate what I said on v6, the runtime alignment to a 16-byte boundary
> > should happen when translating the raw crypto_skcipher_ctx() into the pointer to
> > the aes_xts_ctx. It should not happen when accessing each individual field in
> > the aes_xts_ctx.
> >
> > Yet, this code is still doing runtime alignment when accessing each individual
> > field, as the second argument to aes_set_key_common() is 'void *raw_ctx' which
> > aes_set_key_common() runtime-aligns to crypto_aes_ctx.
> >
> > We should keep everything consistent, which means making aes_set_key_common()
> > take a pointer to crypto_aes_ctx and not do the runtime alignment.
>
> Let me clarify what is the problem this patch tried to solve here. The
> current struct aesni_xts_ctx is ugly. So, the main story is let's fix it
> before using the code for AES-KL.
>
> Then, the rework part may be applicable for code re-usability. That seems to
> be okay to do here.
>
> Fixing the runtime alignment entirely seems to be touching other code than
> AES-XTS. Yes, that's ideal cleanup for consistency. But, it seems to be less
> relevant in this series. I'd be happy to follow up on that improvement
> though.
IMO the issue is that your patch makes the code (including the XTS code)
inconsistent because it makes it use a mix of both approaches: it aligns each
field individually, *and* it aligns the ctx up-front. I was hoping to switch
fully from the former approach to the latter approach, instead of switching from
the former approach to a mix of the two approaches as you are proposing.
The following on top of this patch is what I am asking for. I think it would be
appropriate to fold into this patch.
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 589648142c173..ad1ae7a88b59d 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -228,10 +228,10 @@ static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
return (struct aesni_xts_ctx *)aes_align_addr(crypto_skcipher_ctx(tfm));
}
-static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
+static int aes_set_key_common(struct crypto_tfm *tfm,
+ struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
- struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
@@ -252,7 +252,8 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
- return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
+ return aes_set_key_common(tfm, aes_ctx(crypto_tfm_ctx(tfm)),
+ in_key, key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -285,7 +286,7 @@ static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
return aes_set_key_common(crypto_skcipher_tfm(tfm),
- crypto_skcipher_ctx(tfm), key, len);
+ aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
}
static int ecb_encrypt(struct skcipher_request *req)
On 6/4/2023 7:46 PM, Eric Biggers wrote:
>
> IMO the issue is that your patch makes the code (including the XTS code)
> inconsistent because it makes it use a mix of both approaches: it aligns each
> field individually, *and* it aligns the ctx up-front.
But, the code did this already:
common_rfc4106_set_key()
-> aesni_rfc4106_gcm_ctx_get(aead) // align ->ctx
-> aes_set_key_common()
-> aes_ctx() // here, this aligns again
And generic_gcmaes_set_key() as well.
Hmm, maybe a separate patch can spell out this issue more clearly for
the record, no?
Thanks,
Chang
aes_set_key_common() performs runtime alignment to the void *raw_ctx
pointer. This facilitates consistent access to the 16byte-aligned
address during key extension.
However, the alignment is already handlded in the GCM-related setkey
functions before invoking the common function. Consequently, the
alignment in the common function is unnecessary for those functions.
To establish a consistent approach throughout the glue code, remove
the aes_ctx() call from its current location. Instead, place it at
each call site where the runtime alignment is currently absent.
Link: https://lore.kernel.org/lkml/20230605024623.GA4653@quark.localdomain/
Suggested-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: linux-crypto@vger.kernel.org
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
---
The need for this fix was discovered during Eric's review of the Key
Locker series [1]. Considering the upstream code also requires this
improvement, this is applicable regardless of the Key Locker enabling
[2].
[1] https://lore.kernel.org/lkml/20230605024623.GA4653@quark.localdomain/
[2] https://lore.kernel.org/lkml/f1093780-cdda-35ec-3ef1-e5fab4139bef@intel.com/
---
arch/x86/crypto/aesni-intel_glue.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..c4eea7e746e7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -229,10 +229,10 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return (struct crypto_aes_ctx *)ALIGN(addr, align);
}
-static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
+static int aes_set_key_common(struct crypto_tfm *tfm,
+ struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
- struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
@@ -253,7 +253,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
- return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
+ return aes_set_key_common(tfm, aes_ctx(crypto_tfm_ctx(tfm)), in_key, key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -286,7 +286,7 @@ static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
return aes_set_key_common(crypto_skcipher_tfm(tfm),
- crypto_skcipher_ctx(tfm), key, len);
+ aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
}
static int ecb_encrypt(struct skcipher_request *req)
@@ -893,13 +893,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+ err = aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_crypt_ctx),
key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+ return aes_set_key_common(crypto_skcipher_tfm(tfm), aes_ctx(ctx->raw_tweak_ctx),
key + keylen, keylen);
}
--
2.34.1
On Wed, Jun 21, 2023 at 05:06:53AM -0700, Chang S. Bae wrote: > aes_set_key_common() performs runtime alignment to the void *raw_ctx > pointer. This facilitates consistent access to the 16byte-aligned > address during key extension. > > However, the alignment is already handlded in the GCM-related setkey > functions before invoking the common function. Consequently, the > alignment in the common function is unnecessary for those functions. > > To establish a consistent approach throughout the glue code, remove > the aes_ctx() call from its current location. Instead, place it at > each call site where the runtime alignment is currently absent. > > Link: https://lore.kernel.org/lkml/20230605024623.GA4653@quark.localdomain/ > Suggested-by: Eric Biggers <ebiggers@kernel.org> > Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> > Cc: linux-crypto@vger.kernel.org > Cc: x86@kernel.org > Cc: linux-kernel@vger.kernel.org > --- > The need for this fix was discovered during Eric's review of the Key > Locker series [1]. Considering the upstream code also requires this > improvement, this is applicable regardless of the Key Locker enabling > [2]. > > [1] https://lore.kernel.org/lkml/20230605024623.GA4653@quark.localdomain/ > [2] https://lore.kernel.org/lkml/f1093780-cdda-35ec-3ef1-e5fab4139bef@intel.com/ > --- > arch/x86/crypto/aesni-intel_glue.c | 12 ++++++------ > 1 file changed, 6 insertions(+), 6 deletions(-) Patch applied. Thanks. -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
© 2016 - 2026 Red Hat, Inc.