Support the subfunctions CPACF_KMC_AES_128, CPACF_KMC_AES_192
and CPACF_KMC_AES_256 for the cpacf kmc instruction.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
---
target/s390x/gen-features.c | 3 +
target/s390x/tcg/cpacf.h | 3 +
target/s390x/tcg/cpacf_aes.c | 100 +++++++++++++++++++++++++++++++
target/s390x/tcg/crypto_helper.c | 23 +++++++
4 files changed, 129 insertions(+)
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index a35d1fd2f9..9c0c0b229f 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -924,6 +924,9 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_KM_AES_128,
S390_FEAT_KM_AES_192,
S390_FEAT_KM_AES_256,
+ S390_FEAT_KMC_AES_128,
+ S390_FEAT_KMC_AES_192,
+ S390_FEAT_KMC_AES_256,
};
/****** END FEATURE DEFS ******/
diff --git a/target/s390x/tcg/cpacf.h b/target/s390x/tcg/cpacf.h
index 06423abc00..18686991f7 100644
--- a/target/s390x/tcg/cpacf.h
+++ b/target/s390x/tcg/cpacf.h
@@ -20,5 +20,8 @@ int cpacf_sha512(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
int cpacf_aes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
uint32_t type, uint8_t fc, uint8_t mod);
+int cpacf_aes_cbc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod);
#endif
diff --git a/target/s390x/tcg/cpacf_aes.c b/target/s390x/tcg/cpacf_aes.c
index 6d234d8ce9..df3d05db41 100644
--- a/target/s390x/tcg/cpacf_aes.c
+++ b/target/s390x/tcg/cpacf_aes.c
@@ -106,3 +106,103 @@ int cpacf_aes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
return !len ? 0 : 3;
}
+
+static void aes_xor(const uint8_t *src1, const uint8_t *src2, uint8_t *dst)
+{
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(uint32_t); i++) {
+ ((uint32_t *)dst)[i] = ((uint32_t *)src1)[i] ^ ((uint32_t *)src2)[i];
+ }
+}
+
+int cpacf_aes_cbc(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod)
+{
+ enum { MAX_BLOCKS_PER_RUN = 8192 / AES_BLOCK_SIZE };
+ uint8_t in[AES_BLOCK_SIZE], out[AES_BLOCK_SIZE], buf[AES_BLOCK_SIZE];
+ uint64_t addr, len = *src_len, processed = 0;
+ int i, keysize, data_reg_len = 64;
+ uint8_t key[32], iv[AES_BLOCK_SIZE];
+ AES_KEY exkey;
+
+ g_assert(type == S390_FEAT_TYPE_KMC);
+
+ switch (fc) {
+ case 0x12: /* CPACF_KMC_AES_128 */
+ keysize = 16;
+ break;
+ case 0x13: /* CPACF_KMC_AES_192 */
+ keysize = 24;
+ break;
+ case 0x14: /* CPACF_KMC_AES_256 */
+ keysize = 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ len = (uint32_t)len;
+ data_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24;
+ }
+
+ /* length has to be properly aligned. */
+ if (!QEMU_IS_ALIGNED(len, AES_BLOCK_SIZE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* fetch iv from param blick */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + i);
+ iv[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* fetch key from param block */
+ for (i = 0; i < keysize; i++) {
+ addr = wrap_address(env, param_addr + AES_BLOCK_SIZE + i);
+ key[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* expand key */
+ if (mod) {
+ AES_set_decrypt_key(key, keysize * 8, &exkey);
+ } else {
+ AES_set_encrypt_key(key, keysize * 8, &exkey);
+ }
+
+ /* process up to MAX_BLOCKS_PER_RUN aes blocks */
+ for (i = 0; i < MAX_BLOCKS_PER_RUN && len >= AES_BLOCK_SIZE; i++) {
+ aes_read_block(env, *src_ptr + processed, in, ra);
+ if (mod) {
+ /* decrypt in => buf */
+ AES_decrypt(in, buf, &exkey);
+ /* buf xor iv => out */
+ aes_xor(buf, iv, out);
+ /* prep iv for next round */
+ memcpy(iv, in, AES_BLOCK_SIZE);
+ } else {
+ /* in xor iv => buf */
+ aes_xor(in, iv, buf);
+ /* encrypt buf => out */
+ AES_encrypt(buf, out, &exkey);
+ /* prep iv for next round */
+ memcpy(iv, out, AES_BLOCK_SIZE);
+ }
+ aes_write_block(env, *dst_ptr + processed, out, ra);
+ len -= AES_BLOCK_SIZE, processed += AES_BLOCK_SIZE;
+ }
+
+ /* update iv in param block */
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ addr = wrap_address(env, param_addr + i);
+ cpu_stb_data_ra(env, addr, iv[i], ra);
+ }
+
+ *src_ptr = deposit64(*src_ptr, 0, data_reg_len, *src_ptr + processed);
+ *dst_ptr = deposit64(*dst_ptr, 0, data_reg_len, *dst_ptr + processed);
+ *src_len -= processed;
+
+ return !len ? 0 : 3;
+}
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index dff119176e..df01c1c54e 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -108,6 +108,26 @@ static int cpacf_km(CPUS390XState *env, uintptr_t ra, uint32_t r1,
return rc;
}
+static int cpacf_kmc(CPUS390XState *env, uintptr_t ra, uint32_t r1,
+ uint32_t r2, uint32_t r3, uint8_t fc, uint8_t mod)
+{
+ int rc = 0;
+
+ switch (fc) {
+ case 0x12: /* CPACF_KMC_AES_128 */
+ case 0x13: /* CPACF_KMC_AES_192 */
+ case 0x14: /* CPACF_KMC_AES_256 */
+ rc = cpacf_aes_cbc(env, ra, env->regs[1],
+ &env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
+ S390_FEAT_TYPE_KMC, fc, mod);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return rc;
+}
+
static int cpacf_ppno(CPUS390XState *env, uintptr_t ra,
uint32_t r1, uint32_t r2, uint32_t r3, uint8_t fc)
{
@@ -175,6 +195,9 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
case S390_FEAT_TYPE_KM:
rc = cpacf_km(env, ra, r1, r2, r3, fc, mod);
break;
+ case S390_FEAT_TYPE_KMC:
+ rc = cpacf_kmc(env, ra, r1, r2, r3, fc, mod);
+ break;
default:
g_assert_not_reached();
}
--
2.43.0