Support the subfunctions CPACF_KM_PXTS_128 and CPACF_KM_PAES_256
for the cpacf km instruction.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
---
target/s390x/gen-features.c | 2 +
target/s390x/tcg/cpacf.h | 3 +
target/s390x/tcg/cpacf_aes.c | 102 +++++++++++++++++++++++++++++++
target/s390x/tcg/crypto_helper.c | 6 ++
4 files changed, 113 insertions(+)
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 126bacb281..c4c59c3504 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -929,6 +929,8 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_KM_EAES_256,
S390_FEAT_KM_XTS_AES_128,
S390_FEAT_KM_XTS_AES_256,
+ S390_FEAT_KM_XTS_EAES_128,
+ S390_FEAT_KM_XTS_EAES_256,
S390_FEAT_KMC_AES_128,
S390_FEAT_KMC_AES_192,
S390_FEAT_KMC_AES_256,
diff --git a/target/s390x/tcg/cpacf.h b/target/s390x/tcg/cpacf.h
index 9d0801b217..47dfa6bef7 100644
--- a/target/s390x/tcg/cpacf.h
+++ b/target/s390x/tcg/cpacf.h
@@ -44,5 +44,8 @@ int cpacf_paes_ctr(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint64_t *ctr_ptr, uint32_t type, uint8_t fc, uint8_t mod);
int cpacf_paes_pcc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint8_t fc);
+int cpacf_paes_xts(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod);
#endif
diff --git a/target/s390x/tcg/cpacf_aes.c b/target/s390x/tcg/cpacf_aes.c
index c4406f4a34..0892413e74 100644
--- a/target/s390x/tcg/cpacf_aes.c
+++ b/target/s390x/tcg/cpacf_aes.c
@@ -865,3 +865,105 @@ int cpacf_paes_pcc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
return 0;
}
+
+int cpacf_paes_xts(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod)
+{
+ enum { MAX_BLOCKS_PER_RUN = 8192 / AES_BLOCK_SIZE };
+ uint8_t buf1[AES_BLOCK_SIZE], buf2[AES_BLOCK_SIZE];
+ uint8_t key[32], wkvp[32], tweak[AES_BLOCK_SIZE];
+ uint64_t addr, len = *src_len, processed = 0;
+ int i, keysize, data_reg_len = 64;
+ AES_KEY exkey;
+
+ g_assert(type == S390_FEAT_TYPE_KM);
+
+ switch (fc) {
+ case 0x3a: /* CPACF_KM_PXTS_128 */
+ keysize = 16;
+ break;
+ case 0x3c: /* CPACF_KM_PXTS_256 */
+ keysize = 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ len = (uint32_t)len;
+ data_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24;
+ }
+
+ /* length has to be properly aligned. */
+ if (!QEMU_IS_ALIGNED(len, AES_BLOCK_SIZE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* fetch and check wkvp from param block */
+ for (i = 0; i < sizeof(wkvp); i++) {
+ addr = wrap_address(env, param_addr + keysize + i);
+ wkvp[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+ if (memcmp(wkvp, protkey_wkvp, sizeof(wkvp))) {
+ /* wkvp mismatch -> return with cc 1 */
+ return 1;
+ }
+
+ /* fetch protected key from param block */
+ for (i = 0; i < keysize; i++) {
+ addr = wrap_address(env, param_addr + i);
+ key[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+ /* 'decrypt' the protected key */
+ for (i = 0; i < keysize; i++) {
+ key[i] ^= protkey_xor_pattern[i];
+ }
+
+ /* expand key */
+ if (mod) {
+ AES_set_decrypt_key(key, keysize * 8, &exkey);
+ } else {
+ AES_set_encrypt_key(key, keysize * 8, &exkey);
+ }
+
+ /* fetch tweak from param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + keysize + sizeof(wkvp) + i);
+ tweak[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* process up to MAX_BLOCKS_PER_RUN aes blocks */
+ for (i = 0; i < MAX_BLOCKS_PER_RUN && len >= AES_BLOCK_SIZE; i++) {
+ /* fetch one AES block into buf1 */
+ aes_read_block(env, *src_ptr + processed, buf1, ra);
+ /* buf1 xor tweak => buf2 */
+ aes_xor(buf1, tweak, buf2);
+ if (mod) {
+ /* decrypt buf2 => buf1 */
+ AES_decrypt(buf2, buf1, &exkey);
+ } else {
+ /* encrypt buf2 => buf1 */
+ AES_encrypt(buf2, buf1, &exkey);
+ }
+ /* buf1 xor tweak => buf2 */
+ aes_xor(buf1, tweak, buf2);
+ /* prep tweak for next round */
+ aes_xts_prep_next_tweak(tweak);
+ /* write out this processed block from buf2 */
+ aes_write_block(env, *dst_ptr + processed, buf2, ra);
+ len -= AES_BLOCK_SIZE, processed += AES_BLOCK_SIZE;
+ }
+
+ /* update tweak in param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + keysize + sizeof(wkvp) + i);
+ cpu_stb_data_ra(env, addr, tweak[i], ra);
+ }
+
+ *src_ptr = deposit64(*src_ptr, 0, data_reg_len, *src_ptr + processed);
+ *dst_ptr = deposit64(*dst_ptr, 0, data_reg_len, *dst_ptr + processed);
+ *src_len -= processed;
+
+ return !len ? 0 : 3;
+}
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index 3ff8331993..1dd61cac5e 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -114,6 +114,12 @@ static int cpacf_km(CPUS390XState *env, uintptr_t ra, uint32_t r1,
&env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
S390_FEAT_TYPE_KM, fc, mod);
break;
+ case 0x3a: /* CPACF_KM_PXTS_128 */
+ case 0x3c: /* CPACF_KM_PXTS_256 */
+ rc = cpacf_paes_xts(env, ra, env->regs[1],
+ &env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
+ S390_FEAT_TYPE_KM, fc, mod);
+ break;
default:
g_assert_not_reached();
}
--
2.43.0