Support the subfunctions CPACF_KMC_PAES_128, CPACF_KMC_PAES_192
and CPACF_KMC_PAES_256 for the cpacf kmc instruction.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
---
target/s390x/gen-features.c | 3 +
target/s390x/tcg/cpacf.h | 3 +
target/s390x/tcg/cpacf_aes.c | 105 +++++++++++++++++++++++++++++++
target/s390x/tcg/crypto_helper.c | 7 +++
4 files changed, 118 insertions(+)
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 71e0e41d6e..074c53aecd 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -932,6 +932,9 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_KMC_AES_128,
S390_FEAT_KMC_AES_192,
S390_FEAT_KMC_AES_256,
+ S390_FEAT_KMC_EAES_128,
+ S390_FEAT_KMC_EAES_192,
+ S390_FEAT_KMC_EAES_256,
S390_FEAT_KMCTR_AES_128,
S390_FEAT_KMCTR_AES_192,
S390_FEAT_KMCTR_AES_256,
diff --git a/target/s390x/tcg/cpacf.h b/target/s390x/tcg/cpacf.h
index 4c1e0ee58d..920c1f50fb 100644
--- a/target/s390x/tcg/cpacf.h
+++ b/target/s390x/tcg/cpacf.h
@@ -36,5 +36,8 @@ int cpacf_aes_pckmo(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
int cpacf_paes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
uint32_t type, uint8_t fc, uint8_t mod);
+int cpacf_paes_cbc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod);
#endif
diff --git a/target/s390x/tcg/cpacf_aes.c b/target/s390x/tcg/cpacf_aes.c
index 6f909e433d..c0edc57572 100644
--- a/target/s390x/tcg/cpacf_aes.c
+++ b/target/s390x/tcg/cpacf_aes.c
@@ -597,3 +597,108 @@ int cpacf_paes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
return !len ? 0 : 3;
}
+
+int cpacf_paes_cbc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod)
+{
+ enum { MAX_BLOCKS_PER_RUN = 8192 / AES_BLOCK_SIZE };
+ uint8_t in[AES_BLOCK_SIZE], out[AES_BLOCK_SIZE], buf[AES_BLOCK_SIZE];
+ uint8_t key[32], wkvp[32], iv[AES_BLOCK_SIZE];
+ uint64_t addr, len = *src_len, processed = 0;
+ int i, keysize, data_reg_len = 64;
+ AES_KEY exkey;
+
+ g_assert(type == S390_FEAT_TYPE_KMC);
+
+ switch (fc) {
+ case 0x1a: /* CPACF_KMC_PAES_128 */
+ keysize = 16;
+ break;
+ case 0x1b: /* CPACF_KMC_PAES_192 */
+ keysize = 24;
+ break;
+ case 0x1c: /* CPACF_KMC_PAES_256 */
+ keysize = 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ len = (uint32_t)len;
+ data_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24;
+ }
+
+ /* length has to be properly aligned. */
+ if (!QEMU_IS_ALIGNED(len, AES_BLOCK_SIZE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* fetch and check wkvp from param block */
+ for (i = 0; i < sizeof(wkvp); i++) {
+ addr = wrap_address(env, param_addr + AES_BLOCK_SIZE + keysize + i);
+ wkvp[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+ if (memcmp(wkvp, protkey_wkvp, sizeof(wkvp))) {
+ /* wkvp mismatch -> return with cc 1 */
+ return 1;
+ }
+
+ /* fetch iv from param blick */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + i);
+ iv[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* fetch protected key from param block */
+ for (i = 0; i < keysize; i++) {
+ addr = wrap_address(env, param_addr + AES_BLOCK_SIZE + i);
+ key[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+ /* 'decrypt' the protected key */
+ for (i = 0; i < keysize; i++) {
+ key[i] ^= protkey_xor_pattern[i];
+ }
+
+ /* expand key */
+ if (mod) {
+ AES_set_decrypt_key(key, keysize * 8, &exkey);
+ } else {
+ AES_set_encrypt_key(key, keysize * 8, &exkey);
+ }
+
+ /* process up to MAX_BLOCKS_PER_RUN aes blocks */
+ for (i = 0; i < MAX_BLOCKS_PER_RUN && len >= AES_BLOCK_SIZE; i++) {
+ aes_read_block(env, *src_ptr + processed, in, ra);
+ if (mod) {
+ /* decrypt in => buf */
+ AES_decrypt(in, buf, &exkey);
+ /* buf xor iv => out */
+ aes_xor(buf, iv, out);
+ /* prep iv for next round */
+ memcpy(iv, in, AES_BLOCK_SIZE);
+ } else {
+ /* in xor iv => buf */
+ aes_xor(in, iv, buf);
+ /* encrypt buf => out */
+ AES_encrypt(buf, out, &exkey);
+ /* prep iv for next round */
+ memcpy(iv, out, AES_BLOCK_SIZE);
+ }
+ aes_write_block(env, *dst_ptr + processed, out, ra);
+ len -= AES_BLOCK_SIZE, processed += AES_BLOCK_SIZE;
+ }
+
+ /* update iv in param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + i);
+ cpu_stb_data_ra(env, addr, iv[i], ra);
+ }
+
+ *src_ptr = deposit64(*src_ptr, 0, data_reg_len, *src_ptr + processed);
+ *dst_ptr = deposit64(*dst_ptr, 0, data_reg_len, *dst_ptr + processed);
+ *src_len -= processed;
+
+ return !len ? 0 : 3;
+}
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index 146f970713..a69f4edf74 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -134,6 +134,13 @@ static int cpacf_kmc(CPUS390XState *env, uintptr_t ra, uint32_t r1,
&env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
S390_FEAT_TYPE_KMC, fc, mod);
break;
+ case 0x1a: /* CPACF_KMC_PAES_128 */
+ case 0x1b: /* CPACF_KMC_PAES_192 */
+ case 0x1c: /* CPACF_KMC_PAES_256 */
+ rc = cpacf_paes_cbc(env, ra, env->regs[1],
+ &env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
+ S390_FEAT_TYPE_KMC, fc, mod);
+ break;
default:
g_assert_not_reached();
}
--
2.43.0