Support the subfunctions CPACF_KM_AES_128, CPACF_KM_AES_192
and CPACF_KM_AES_256 for the cpacf km instruction.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
---
target/s390x/gen-features.c | 3 +
target/s390x/tcg/cpacf.h | 5 ++
target/s390x/tcg/cpacf_aes.c | 108 +++++++++++++++++++++++++++++++
target/s390x/tcg/crypto_helper.c | 24 +++++++
target/s390x/tcg/meson.build | 1 +
5 files changed, 141 insertions(+)
create mode 100644 target/s390x/tcg/cpacf_aes.c
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 5cf5b92c37..a35d1fd2f9 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -921,6 +921,9 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_KLMD_SHA_256,
S390_FEAT_KLMD_SHA_512,
S390_FEAT_PRNO_TRNG,
+ S390_FEAT_KM_AES_128,
+ S390_FEAT_KM_AES_192,
+ S390_FEAT_KM_AES_256,
};
/****** END FEATURE DEFS ******/
diff --git a/target/s390x/tcg/cpacf.h b/target/s390x/tcg/cpacf.h
index 79f05e1e14..06423abc00 100644
--- a/target/s390x/tcg/cpacf.h
+++ b/target/s390x/tcg/cpacf.h
@@ -16,4 +16,9 @@ int cpacf_sha256(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
int cpacf_sha512(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
uint64_t *message_reg, uint64_t *len_reg, uint32_t type);
+/* from crypto_aes.c */
+int cpacf_aes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod);
+
#endif
diff --git a/target/s390x/tcg/cpacf_aes.c b/target/s390x/tcg/cpacf_aes.c
new file mode 100644
index 0000000000..6d234d8ce9
--- /dev/null
+++ b/target/s390x/tcg/cpacf_aes.c
@@ -0,0 +1,108 @@
+/*
+ * s390 cpacf aes
+ *
+ * Authors:
+ * Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "s390x-internal.h"
+#include "tcg_s390x.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "crypto/aes.h"
+#include "cpacf.h"
+
+static void aes_read_block(CPUS390XState *env, uint64_t addr,
+ uint8_t *a, uintptr_t ra)
+{
+ uint64_t _addr;
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE; i++, addr += 1) {
+ _addr = wrap_address(env, addr);
+ a[i] = cpu_ldub_data_ra(env, _addr, ra);
+ }
+}
+
+static void aes_write_block(CPUS390XState *env, uint64_t addr,
+ uint8_t *a, uintptr_t ra)
+{
+ uint64_t _addr;
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE; i++, addr += 1) {
+ _addr = wrap_address(env, addr);
+ cpu_stb_data_ra(env, _addr, a[i], ra);
+ }
+}
+
+int cpacf_aes_ecb(CPUS390XState *env, uintptr_t ra, uint64_t param_addr,
+ uint64_t *dst_ptr, uint64_t *src_ptr, uint64_t *src_len,
+ uint32_t type, uint8_t fc, uint8_t mod)
+{
+ enum { MAX_BLOCKS_PER_RUN = 8192 / AES_BLOCK_SIZE };
+ uint8_t in[AES_BLOCK_SIZE], out[AES_BLOCK_SIZE];
+ uint64_t addr, len = *src_len, processed = 0;
+ int i, keysize, data_reg_len = 64;
+ uint8_t key[32];
+ AES_KEY exkey;
+
+ g_assert(type == S390_FEAT_TYPE_KM);
+ switch (fc) {
+ case 0x12: /* CPACF_KM_AES_128 */
+ keysize = 16;
+ break;
+ case 0x13: /* CPACF_KM_AES_192 */
+ keysize = 24;
+ break;
+ case 0x14: /* CPACF_KM_AES_256 */
+ keysize = 32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ len = (uint32_t)len;
+ data_reg_len = (env->psw.mask & PSW_MASK_32) ? 32 : 24;
+ }
+
+ /* length has to be properly aligned. */
+ if (!QEMU_IS_ALIGNED(len, AES_BLOCK_SIZE)) {
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
+ }
+
+ /* fetch key from param block */
+ for (i = 0; i < keysize; i++) {
+ addr = wrap_address(env, param_addr + i);
+ key[i] = cpu_ldub_data_ra(env, addr, ra);
+ }
+
+ /* expand key */
+ if (mod) {
+ AES_set_decrypt_key(key, keysize * 8, &exkey);
+ } else {
+ AES_set_encrypt_key(key, keysize * 8, &exkey);
+ }
+
+ /* process up to MAX_BLOCKS_PER_RUN aes blocks */
+ for (i = 0; i < MAX_BLOCKS_PER_RUN && len >= AES_BLOCK_SIZE; i++) {
+ aes_read_block(env, *src_ptr + processed, in, ra);
+ if (mod) {
+ AES_decrypt(in, out, &exkey);
+ } else {
+ AES_encrypt(in, out, &exkey);
+ }
+ aes_write_block(env, *dst_ptr + processed, out, ra);
+ len -= AES_BLOCK_SIZE, processed += AES_BLOCK_SIZE;
+ }
+
+ *src_ptr = deposit64(*src_ptr, 0, data_reg_len, *src_ptr + processed);
+ *dst_ptr = deposit64(*dst_ptr, 0, data_reg_len, *dst_ptr + processed);
+ *src_len -= processed;
+
+ return !len ? 0 : 3;
+}
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index b69dbb61a6..dff119176e 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -87,6 +87,27 @@ static int cpacf_klmd(CPUS390XState *env, const uintptr_t ra,
return rc;
}
+
+static int cpacf_km(CPUS390XState *env, uintptr_t ra, uint32_t r1,
+ uint32_t r2, uint32_t r3, uint8_t fc, uint8_t mod)
+{
+ int rc = 0;
+
+ switch (fc) {
+ case 0x12: /* CPACF_KM_AES_128 */
+ case 0x13: /* CPACF_KM_AES_192 */
+ case 0x14: /* CPACF_KM_AES_256 */
+ rc = cpacf_aes_ecb(env, ra, env->regs[1],
+ &env->regs[r1], &env->regs[r2], &env->regs[r2 + 1],
+ S390_FEAT_TYPE_KM, fc, mod);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return rc;
+}
+
static int cpacf_ppno(CPUS390XState *env, uintptr_t ra,
uint32_t r1, uint32_t r2, uint32_t r3, uint8_t fc)
{
@@ -151,6 +172,9 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
case S390_FEAT_TYPE_PPNO:
rc = cpacf_ppno(env, ra, r1, r2, r3, fc);
break;
+ case S390_FEAT_TYPE_KM:
+ rc = cpacf_km(env, ra, r1, r2, r3, fc, mod);
+ break;
default:
g_assert_not_reached();
}
diff --git a/target/s390x/tcg/meson.build b/target/s390x/tcg/meson.build
index 4115f4c704..da948471ea 100644
--- a/target/s390x/tcg/meson.build
+++ b/target/s390x/tcg/meson.build
@@ -1,5 +1,6 @@
s390x_ss.add(when: 'CONFIG_TCG', if_true: files(
'cc_helper.c',
+ 'cpacf_aes.c',
'cpacf_sha256.c',
'cpacf_sha512.c',
'crypto_helper.c',
--
2.43.0