Support the subfunctions XTS-AES-128 and XTS-AES-256
for the cpacf km instruction.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
---
target/s390x/gen-features.c | 2 +
target/s390x/tcg/cpacf.h | 3 +
target/s390x/tcg/cpacf_aes.c | 103 +++++++++++++++++++++++++++++++
target/s390x/tcg/crypto_helper.c | 6 ++
4 files changed, 114 insertions(+)
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 1b6a874b90..f9b1a40c7c 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -924,6 +924,8 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_KM_AES_128,
S390_FEAT_KM_AES_192,
S390_FEAT_KM_AES_256,
+ S390_FEAT_KM_XTS_AES_128,
+ S390_FEAT_KM_XTS_AES_256,
S390_FEAT_KMC_AES_128,
S390_FEAT_KMC_AES_192,
S390_FEAT_KMC_AES_256,
diff --git a/target/s390x/tcg/cpacf.h b/target/s390x/tcg/cpacf.h
index aa39a3c85a..8c1dbee67f 100644
--- a/target/s390x/tcg/cpacf.h
+++ b/target/s390x/tcg/cpacf.h
@@ -28,5 +28,8 @@ int cpacf_aes_ctr(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint64_t *ctr_ptr, uint32_t type, uint8_t fc, uint8_t mod);
int cpacf_aes_pcc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint8_t fc);
+int cpacf_aes_xts(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod);
#endif
diff --git a/target/s390x/tcg/cpacf_aes.c b/target/s390x/tcg/cpacf_aes.c
index 1076322e30..f067939f25 100644
--- a/target/s390x/tcg/cpacf_aes.c
+++ b/target/s390x/tcg/cpacf_aes.c
@@ -346,3 +346,106 @@ int cpacf_aes_pcc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
return 0;
}
+
+static void aes_xts_prep_next_tweak(uint8_t tweak[AES_BLOCK_SIZE])
+{
+ uint8_t carry;
+ int i;
+
+ carry = tweak[AES_BLOCK_SIZE - 1] >> 7;
+
+ for (i = AES_BLOCK_SIZE - 1; i > 0; i--) {
+ tweak[i] = (uint8_t)((tweak[i] << 1) | (tweak[i - 1] >> 7));
+ }
+
+ tweak[i] = (uint8_t)(tweak[i] << 1);
+ tweak[i] ^= (uint8_t)(0x87 & (uint8_t)(-(int8_t)carry));
+}
+
+int cpacf_aes_xts(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod)
+{
+ enum { MAX_BLOCKS_PER_RUN = 8192 / AES_BLOCK_SIZE };
+ uint8_t buf1[AES_BLOCK_SIZE], buf2[AES_BLOCK_SIZE];
+ uint64_t addr, len = *src_len, processed = 0;
+ uint8_t key[32], tweak[AES_BLOCK_SIZE];
+ int i, keysize, data_reg_len = 64;
+ AES_KEY exkey;
+
+ g_assert(type == S390_FEAT_TYPE_KM);
+
+ switch (fc) {
+ case 0x32: /* CPACF_KM_XTS_128 */
+ keysize = 16;
+ break;
+ case 0x34: /* CPACF_KM_XTS_256 */
+ keysize = 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ len = (uint32_t)len;
+ data_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24;
+ }
+
+ /* length has to be properly aligned. */
+ if (!QEMU_IS_ALIGNED(len, AES_BLOCK_SIZE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* fetch key from param block */
+ for (i = 0; i < keysize; i++) {
+ addr = wrap_address(env, param_addr + i);
+ key[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* expand key */
+ if (mod) {
+ AES_set_decrypt_key(key, keysize * 8, &exkey);
+ } else {
+ AES_set_encrypt_key(key, keysize * 8, &exkey);
+ }
+
+ /* fetch tweak from param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + keysize + i);
+ tweak[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* process up to MAX_BLOCKS_PER_RUN aes blocks */
+ for (i = 0; i < MAX_BLOCKS_PER_RUN && len >= AES_BLOCK_SIZE; i++) {
+ /* fetch one AES block into buf1 */
+ aes_read_block(env, *src_ptr + processed, buf1, ra);
+ /* buf1 xor tweak => buf2 */
+ aes_xor(buf1, tweak, buf2);
+ if (mod) {
+ /* decrypt buf2 => buf1 */
+ AES_decrypt(buf2, buf1, &exkey);
+ } else {
+ /* encrypt buf2 => buf1 */
+ AES_encrypt(buf2, buf1, &exkey);
+ }
+ /* buf1 xor tweak => buf2 */
+ aes_xor(buf1, tweak, buf2);
+ /* prep tweak for next round */
+ aes_xts_prep_next_tweak(tweak);
+ /* write out this processed block from buf2 */
+ aes_write_block(env, *dst_ptr + processed, buf2, ra);
+ len -= AES_BLOCK_SIZE, processed += AES_BLOCK_SIZE;
+ }
+
+ /* update tweak in param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + keysize + i);
+ cpu_stb_data_ra(env, addr, tweak[i], ra);
+ }
+
+ *src_ptr = deposit64(*src_ptr, 0, data_reg_len, *src_ptr + processed);
+ *dst_ptr = deposit64(*dst_ptr, 0, data_reg_len, *dst_ptr + processed);
+ *src_len -= processed;
+
+ return !len ? 0 : 3;
+}
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index a7f4e135e1..7129b38703 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -101,6 +101,12 @@ static int cpacf_km(CPUS390XState *env, uintptr_t ra, uint32_t r1,
&env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
S390_FEAT_TYPE_KM, fc, mod);
break;
+ case 0x32: /* CPACF_KM_XTS_128 */
+ case 0x34: /* CPACF_KM_XTS_256 */
+ rc = cpacf_aes_xts(env, ra, env->regs[1],
+ &env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
+ S390_FEAT_TYPE_KM, fc, mod);
+ break;
default:
g_assert_not_reached();
}
--
2.43.0