Using 64-bit arithmetic increases the performance for xts-aes-128
when built with gcrypt:
Encrypt: 272 MB/s -> 355 MB/s
Decrypt: 275 MB/s -> 362 MB/s
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
---
crypto/xts.c | 84 ++++++++++++++++++++++++++++++++++++----------------
1 file changed, 58 insertions(+), 26 deletions(-)
diff --git a/crypto/xts.c b/crypto/xts.c
index bee23f890e..2e3430672c 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -31,6 +31,13 @@ typedef union {
uint64_t u[2];
} xts_uint128;
+static inline void xts_uint128_xor(xts_uint128 *D,
+ const xts_uint128 *S1,
+ const xts_uint128 *S2)
+{
+ D->u[0] = S1->u[0] ^ S2->u[0];
+ D->u[1] = S1->u[1] ^ S2->u[1];
+}
static void xts_mult_x(uint8_t *I)
{
@@ -60,25 +67,19 @@ static void xts_mult_x(uint8_t *I)
*/
static void xts_tweak_encdec(const void *ctx,
xts_cipher_func *func,
- const uint8_t *src,
- uint8_t *dst,
- uint8_t *iv)
+ const xts_uint128 *src,
+ xts_uint128 *dst,
+ xts_uint128 *iv)
{
- unsigned long x;
-
/* tweak encrypt block i */
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = src[x] ^ iv[x];
- }
+ xts_uint128_xor(dst, src, iv);
- func(ctx, XTS_BLOCK_SIZE, dst, dst);
+ func(ctx, XTS_BLOCK_SIZE, dst->b, dst->b);
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = dst[x] ^ iv[x];
- }
+ xts_uint128_xor(dst, dst, iv);
/* LFSR the tweak */
- xts_mult_x(iv);
+ xts_mult_x(iv->b);
}
@@ -110,20 +111,34 @@ void xts_decrypt(const void *datactx,
/* encrypt the iv */
encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
- for (i = 0; i < lim; i++) {
- xts_tweak_encdec(datactx, decfunc, src, dst, T.b);
-
- src += XTS_BLOCK_SIZE;
- dst += XTS_BLOCK_SIZE;
+ if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
+ QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
+ xts_uint128 *S = (xts_uint128 *)src;
+ xts_uint128 *D = (xts_uint128 *)dst;
+ for (i = 0; i < lim; i++, S++, D++) {
+ xts_tweak_encdec(datactx, decfunc, S, D, &T);
+ }
+ } else {
+ xts_uint128 S, D;
+
+ for (i = 0; i < lim; i++) {
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, decfunc, &S, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
+ src += XTS_BLOCK_SIZE;
+ dst += XTS_BLOCK_SIZE;
+ }
}
/* if length is not a multiple of XTS_BLOCK_SIZE then */
if (mo > 0) {
+ xts_uint128 S, D;
memcpy(&CC, &T, XTS_BLOCK_SIZE);
xts_mult_x(CC.b);
/* PP = tweak decrypt block m-1 */
- xts_tweak_encdec(datactx, decfunc, src, PP.b, CC.b);
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, decfunc, &S, &PP, &CC);
/* Pm = first length % XTS_BLOCK_SIZE bytes of PP */
for (i = 0; i < mo; i++) {
@@ -135,7 +150,8 @@ void xts_decrypt(const void *datactx,
}
/* Pm-1 = Tweak uncrypt CC */
- xts_tweak_encdec(datactx, decfunc, CC.b, dst, T.b);
+ xts_tweak_encdec(datactx, decfunc, &CC, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
}
/* Decrypt the iv back */
@@ -171,17 +187,32 @@ void xts_encrypt(const void *datactx,
/* encrypt the iv */
encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
- for (i = 0; i < lim; i++) {
- xts_tweak_encdec(datactx, encfunc, src, dst, T.b);
+ if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
+ QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
+ xts_uint128 *S = (xts_uint128 *)src;
+ xts_uint128 *D = (xts_uint128 *)dst;
+ for (i = 0; i < lim; i++, S++, D++) {
+ xts_tweak_encdec(datactx, encfunc, S, D, &T);
+ }
+ } else {
+ xts_uint128 S, D;
+
+ for (i = 0; i < lim; i++) {
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, encfunc, &S, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
- dst += XTS_BLOCK_SIZE;
- src += XTS_BLOCK_SIZE;
+ dst += XTS_BLOCK_SIZE;
+ src += XTS_BLOCK_SIZE;
+ }
}
/* if length is not a multiple of XTS_BLOCK_SIZE then */
if (mo > 0) {
+ xts_uint128 S, D;
/* CC = tweak encrypt block m-1 */
- xts_tweak_encdec(datactx, encfunc, src, CC.b, T.b);
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, encfunc, &S, &CC, &T);
/* Cm = first length % XTS_BLOCK_SIZE bytes of CC */
for (i = 0; i < mo; i++) {
@@ -194,7 +225,8 @@ void xts_encrypt(const void *datactx,
}
/* Cm-1 = Tweak encrypt PP */
- xts_tweak_encdec(datactx, encfunc, PP.b, dst, T.b);
+ xts_tweak_encdec(datactx, encfunc, &PP, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
}
/* Decrypt the iv back */
--
2.17.2
On Tue 16 Oct 2018 12:09:14 PM CEST, Daniel P. Berrangé wrote:
> @@ -110,20 +111,34 @@ void xts_decrypt(const void *datactx,
> /* encrypt the iv */
> encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
>
> - for (i = 0; i < lim; i++) {
> - xts_tweak_encdec(datactx, decfunc, src, dst, T.b);
> -
> - src += XTS_BLOCK_SIZE;
> - dst += XTS_BLOCK_SIZE;
> + if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
> + QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
> + xts_uint128 *S = (xts_uint128 *)src;
> + xts_uint128 *D = (xts_uint128 *)dst;
> + for (i = 0; i < lim; i++, S++, D++) {
> + xts_tweak_encdec(datactx, decfunc, S, D, &T);
> + }
> + } else {
> + xts_uint128 S, D;
> +
> + for (i = 0; i < lim; i++) {
> + memcpy(&S, src, XTS_BLOCK_SIZE);
> + xts_tweak_encdec(datactx, decfunc, &S, &D, &T);
> + memcpy(dst, &D, XTS_BLOCK_SIZE);
> + src += XTS_BLOCK_SIZE;
> + dst += XTS_BLOCK_SIZE;
> + }
The patch looks good to me, but a couple of comments:
- As far as I can see xts_tweak_encdec() works the same regardless of
whether src and dst point to the same address or not. As a matter of
fact both qcrypto_block_decrypt() and qcrypto_block_encrypt() do the
decryption and encryption in place, and as you can see the
qcrypto_cipher_*crypt() calls in crypto/block.c pass the same buffer
as input and output.
So instead of having S and D you should be fine with just one of them.
- I think this is just a matter of style preference, but in the first
for loop you can remove the comma operator (i++, S++, D++) and use
S[i] and D[I] instead in the line after that. I'm fine if you prefer
the current style, though.
Berto
On Tue, Oct 16, 2018 at 03:09:16PM +0200, Alberto Garcia wrote:
> On Tue 16 Oct 2018 12:09:14 PM CEST, Daniel P. Berrangé wrote:
>
> > @@ -110,20 +111,34 @@ void xts_decrypt(const void *datactx,
> > /* encrypt the iv */
> > encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
> >
> > - for (i = 0; i < lim; i++) {
> > - xts_tweak_encdec(datactx, decfunc, src, dst, T.b);
> > -
> > - src += XTS_BLOCK_SIZE;
> > - dst += XTS_BLOCK_SIZE;
> > + if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
> > + QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
> > + xts_uint128 *S = (xts_uint128 *)src;
> > + xts_uint128 *D = (xts_uint128 *)dst;
> > + for (i = 0; i < lim; i++, S++, D++) {
> > + xts_tweak_encdec(datactx, decfunc, S, D, &T);
> > + }
> > + } else {
> > + xts_uint128 S, D;
> > +
> > + for (i = 0; i < lim; i++) {
> > + memcpy(&S, src, XTS_BLOCK_SIZE);
> > + xts_tweak_encdec(datactx, decfunc, &S, &D, &T);
> > + memcpy(dst, &D, XTS_BLOCK_SIZE);
> > + src += XTS_BLOCK_SIZE;
> > + dst += XTS_BLOCK_SIZE;
> > + }
>
> The patch looks good to me, but a couple of comments:
>
> - As far as I can see xts_tweak_encdec() works the same regardless of
> whether src and dst point to the same address or not. As a matter of
> fact both qcrypto_block_decrypt() and qcrypto_block_encrypt() do the
> decryption and encryption in place, and as you can see the
> qcrypto_cipher_*crypt() calls in crypto/block.c pass the same buffer
> as input and output.
>
> So instead of having S and D you should be fine with just one of them.
Yes, I could do that in the 2nd loop.
>
> - I think this is just a matter of style preference, but in the first
> for loop you can remove the comma operator (i++, S++, D++) and use
> S[i] and D[I] instead in the line after that. I'm fine if you prefer
> the current style, though.
The syntax I used results in slightly more efficient asm code.
Regards,
Daniel
--
|: https://berrange.com -o- https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o- https://fstop138.berrange.com :|
|: https://entangle-photo.org -o- https://www.instagram.com/dberrange :|
© 2016 - 2025 Red Hat, Inc.