From nobody Thu Dec 18 22:17:54 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) client-ip=208.118.235.17; envelope-from=qemu-devel-bounces+importer=patchew.org@nongnu.org; helo=lists.gnu.org; Authentication-Results: mx.zohomail.com; spf=pass (zoho.com: domain of gnu.org designates 208.118.235.17 as permitted sender) smtp.mailfrom=qemu-devel-bounces+importer=patchew.org@nongnu.org; dmarc=fail(p=none dis=none) header.from=linaro.org Return-Path: Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) by mx.zohomail.com with SMTPS id 1539008364603138.37103345837897; Mon, 8 Oct 2018 07:19:24 -0700 (PDT) Received: from localhost ([::1]:46455 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1g9WNX-00073f-4f for importer@patchew.org; Mon, 08 Oct 2018 10:19:23 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:46093) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1g9W5S-00018k-KZ for qemu-devel@nongnu.org; Mon, 08 Oct 2018 10:00:48 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1g9W5J-00075a-MS for qemu-devel@nongnu.org; Mon, 08 Oct 2018 10:00:42 -0400 Received: from orth.archaic.org.uk ([2001:8b0:1d0::2]:51690) by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1g9W5I-0006iu-RL for qemu-devel@nongnu.org; Mon, 08 Oct 2018 10:00:33 -0400 Received: from pm215 by orth.archaic.org.uk with local (Exim 4.89) (envelope-from ) id 1g9W5E-0003iN-T1 for qemu-devel@nongnu.org; Mon, 08 Oct 2018 15:00:28 +0100 From: Peter Maydell To: qemu-devel@nongnu.org Date: Mon, 8 Oct 2018 14:59:50 +0100 Message-Id: <20181008140004.12612-20-peter.maydell@linaro.org> X-Mailer: git-send-email 2.19.0 In-Reply-To: <20181008140004.12612-1-peter.maydell@linaro.org> References: <20181008140004.12612-1-peter.maydell@linaro.org> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 2001:8b0:1d0::2 Subject: [Qemu-devel] [PULL 19/33] target/arm: Pass TCGMemOpIdx to sve memory helpers X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+importer=patchew.org@nongnu.org Sender: "Qemu-devel" X-ZohoMail: RDMRC_1 RSF_0 Z_629925259 SPT_0 Content-Type: text/plain; charset="utf-8" From: Richard Henderson There is quite a lot of code required to compute cpu_mem_index, or even put together the full TCGMemOpIdx. This can easily be done at translation time. Reviewed-by: Peter Maydell Tested-by: Laurent Desnogues Signed-off-by: Richard Henderson Message-id: 20181005175350.30752-16-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/internals.h | 5 ++ target/arm/sve_helper.c | 138 +++++++++++++++++++------------------ target/arm/translate-sve.c | 67 +++++++++++------- 3 files changed, 121 insertions(+), 89 deletions(-) diff --git a/target/arm/internals.h b/target/arm/internals.h index dc9357766c9..24c0444c8d2 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -796,4 +796,9 @@ static inline uint32_t arm_debug_exception_fsr(CPUARMSt= ate *env) } } =20 +/* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3. + * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits. + */ +#define MEMOPIDX_SHIFT 8 + #endif diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 7756c0b0989..8cbc6516ab5 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -19,6 +19,7 @@ =20 #include "qemu/osdep.h" #include "cpu.h" +#include "internals.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" @@ -3990,7 +3991,7 @@ typedef intptr_t sve_ld1_host_fn(void *vd, void *vg, = void *host, * The controlling predicate is known to be true. */ typedef void sve_ld1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off, - target_ulong vaddr, int mmu_idx, uintptr_t ra); + target_ulong vaddr, TCGMemOpIdx oi, uintptr_t = ra); typedef sve_ld1_tlb_fn sve_st1_tlb_fn; =20 /* @@ -4017,16 +4018,15 @@ static intptr_t sve_##NAME##_host(void *vd, void *v= g, void *host, \ #ifdef CONFIG_SOFTMMU #define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off,= \ - target_ulong addr, int mmu_idx, uintptr_t ra)= \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t = ra) \ { = \ - TCGMemOpIdx oi =3D make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_id= x); \ TYPEM val =3D TLB(env, addr, oi, ra); = \ *(TYPEE *)(vd + H(reg_off)) =3D val; = \ } #else #define DO_LD_TLB(NAME, H, TYPEE, TYPEM, HOST, MOEND, TLB) = \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off,= \ - target_ulong addr, int mmu_idx, uintptr_t ra)= \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t = ra) \ { = \ TYPEM val =3D HOST(g2h(addr)); = \ *(TYPEE *)(vd + H(reg_off)) =3D val; = \ @@ -4154,11 +4154,13 @@ static void sve_ld1_r(CPUARMState *env, void *vg, c= onst target_ulong addr, sve_ld1_host_fn *host_fn, sve_ld1_tlb_fn *tlb_fn) { - void *vd =3D &env->vfp.zregs[simd_data(desc)]; + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int mmu_idx =3D get_mmuidx(oi); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); + void *vd =3D &env->vfp.zregs[rd]; const int diffsz =3D esz - msz; const intptr_t reg_max =3D simd_oprsz(desc); const intptr_t mem_max =3D reg_max >> diffsz; - const int mmu_idx =3D cpu_mmu_index(env, false); ARMVectorReg scratch; void *host; intptr_t split, reg_off, mem_off; @@ -4232,7 +4234,7 @@ static void sve_ld1_r(CPUARMState *env, void *vg, con= st target_ulong addr, * on I/O memory, it may succeed but not bring in the TLB entry. * But even then we have still made forward progress. */ - tlb_fn(env, &scratch, reg_off, addr + mem_off, mmu_idx, retaddr); + tlb_fn(env, &scratch, reg_off, addr + mem_off, oi, retaddr); reg_off +=3D 1 << esz; } #endif @@ -4293,9 +4295,9 @@ static void sve_ld2_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); ARMVectorReg scratch[2] =3D { }; =20 set_helper_retaddr(ra); @@ -4303,8 +4305,8 @@ static void sve_ld2_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra); - tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra); + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); } i +=3D size, pg >>=3D size; addr +=3D 2 * size; @@ -4321,9 +4323,9 @@ static void sve_ld3_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); ARMVectorReg scratch[3] =3D { }; =20 set_helper_retaddr(ra); @@ -4331,9 +4333,9 @@ static void sve_ld3_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra); - tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra); - tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra); + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); + tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); } i +=3D size, pg >>=3D size; addr +=3D 3 * size; @@ -4351,9 +4353,9 @@ static void sve_ld4_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint32_t desc, int size, uintptr_t ra, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); ARMVectorReg scratch[4] =3D { }; =20 set_helper_retaddr(ra); @@ -4361,10 +4363,10 @@ static void sve_ld4_r(CPUARMState *env, void *vg, t= arget_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, &scratch[0], i, addr, mmu_idx, ra); - tlb_fn(env, &scratch[1], i, addr + size, mmu_idx, ra); - tlb_fn(env, &scratch[2], i, addr + 2 * size, mmu_idx, ra); - tlb_fn(env, &scratch[3], i, addr + 3 * size, mmu_idx, ra); + tlb_fn(env, &scratch[0], i, addr, oi, ra); + tlb_fn(env, &scratch[1], i, addr + size, oi, ra); + tlb_fn(env, &scratch[2], i, addr + 2 * size, oi, ra); + tlb_fn(env, &scratch[3], i, addr + 3 * size, oi, ra); } i +=3D size, pg >>=3D size; addr +=3D 4 * size; @@ -4459,11 +4461,13 @@ static void sve_ldff1_r(CPUARMState *env, void *vg,= const target_ulong addr, sve_ld1_host_fn *host_fn, sve_ld1_tlb_fn *tlb_fn) { - void *vd =3D &env->vfp.zregs[simd_data(desc)]; + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int mmu_idx =3D get_mmuidx(oi); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); + void *vd =3D &env->vfp.zregs[rd]; const int diffsz =3D esz - msz; const intptr_t reg_max =3D simd_oprsz(desc); const intptr_t mem_max =3D reg_max >> diffsz; - const int mmu_idx =3D cpu_mmu_index(env, false); intptr_t split, reg_off, mem_off; void *host; =20 @@ -4515,7 +4519,7 @@ static void sve_ldff1_r(CPUARMState *env, void *vg, c= onst target_ulong addr, * Perform one normal read, which will fault or not. * But it is likely to bring the page into the tlb. */ - tlb_fn(env, vd, reg_off, addr + mem_off, mmu_idx, retaddr); + tlb_fn(env, vd, reg_off, addr + mem_off, oi, retaddr); =20 /* After any fault, zero any leading predicated false elts. */ swap_memzero(vd, reg_off); @@ -4544,7 +4548,8 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, c= onst target_ulong addr, uint32_t desc, const int esz, const int msz, sve_ld1_host_fn *host_fn) { - void *vd =3D &env->vfp.zregs[simd_data(desc)]; + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); + void *vd =3D &env->vfp.zregs[rd]; const int diffsz =3D esz - msz; const intptr_t reg_max =3D simd_oprsz(desc); const intptr_t mem_max =3D reg_max >> diffsz; @@ -4677,15 +4682,14 @@ DO_LDFF1_LDNF1_2(dd, 3, 3) #ifdef CONFIG_SOFTMMU #define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off,= \ - target_ulong addr, int mmu_idx, uintptr_t ra)= \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t = ra) \ { = \ - TCGMemOpIdx oi =3D make_memop_idx(ctz32(sizeof(TYPEM)) | MOEND, mmu_id= x); \ TLB(env, addr, *(TYPEM *)(vd + H(reg_off)), oi, ra); = \ } #else #define DO_ST_TLB(NAME, H, TYPEM, HOST, MOEND, TLB) \ static void sve_##NAME##_tlb(CPUARMState *env, void *vd, intptr_t reg_off,= \ - target_ulong addr, int mmu_idx, uintptr_t ra)= \ + target_ulong addr, TCGMemOpIdx oi, uintptr_t = ra) \ { = \ HOST(g2h(addr), *(TYPEM *)(vd + H(reg_off))); = \ } @@ -4724,9 +4728,9 @@ static void sve_st1_r(CPUARMState *env, void *vg, tar= get_ulong addr, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); void *vd =3D &env->vfp.zregs[rd]; =20 set_helper_retaddr(ra); @@ -4734,7 +4738,7 @@ static void sve_st1_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, vd, i, addr, mmu_idx, ra); + tlb_fn(env, vd, i, addr, oi, ra); } i +=3D esize, pg >>=3D esize; addr +=3D msize; @@ -4748,9 +4752,9 @@ static void sve_st2_r(CPUARMState *env, void *vg, tar= get_ulong addr, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); void *d1 =3D &env->vfp.zregs[rd]; void *d2 =3D &env->vfp.zregs[(rd + 1) & 31]; =20 @@ -4759,8 +4763,8 @@ static void sve_st2_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, d1, i, addr, mmu_idx, ra); - tlb_fn(env, d2, i, addr + msize, mmu_idx, ra); + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); } i +=3D esize, pg >>=3D esize; addr +=3D 2 * msize; @@ -4774,9 +4778,9 @@ static void sve_st3_r(CPUARMState *env, void *vg, tar= get_ulong addr, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); void *d1 =3D &env->vfp.zregs[rd]; void *d2 =3D &env->vfp.zregs[(rd + 1) & 31]; void *d3 =3D &env->vfp.zregs[(rd + 2) & 31]; @@ -4786,9 +4790,9 @@ static void sve_st3_r(CPUARMState *env, void *vg, tar= get_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, d1, i, addr, mmu_idx, ra); - tlb_fn(env, d2, i, addr + msize, mmu_idx, ra); - tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra); + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); + tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); } i +=3D esize, pg >>=3D esize; addr +=3D 3 * msize; @@ -4802,9 +4806,9 @@ static void sve_st4_r(CPUARMState *env, void *vg, tar= get_ulong addr, const int esize, const int msize, sve_st1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const unsigned rd =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT= , 5); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned rd =3D simd_data(desc); void *d1 =3D &env->vfp.zregs[rd]; void *d2 =3D &env->vfp.zregs[(rd + 1) & 31]; void *d3 =3D &env->vfp.zregs[(rd + 2) & 31]; @@ -4815,10 +4819,10 @@ static void sve_st4_r(CPUARMState *env, void *vg, t= arget_ulong addr, uint16_t pg =3D *(uint16_t *)(vg + H1_2(i >> 3)); do { if (pg & 1) { - tlb_fn(env, d1, i, addr, mmu_idx, ra); - tlb_fn(env, d2, i, addr + msize, mmu_idx, ra); - tlb_fn(env, d3, i, addr + 2 * msize, mmu_idx, ra); - tlb_fn(env, d4, i, addr + 3 * msize, mmu_idx, ra); + tlb_fn(env, d1, i, addr, oi, ra); + tlb_fn(env, d2, i, addr + msize, oi, ra); + tlb_fn(env, d3, i, addr + 2 * msize, oi, ra); + tlb_fn(env, d4, i, addr + 3 * msize, oi, ra); } i +=3D esize, pg >>=3D esize; addr +=3D 4 * msize; @@ -4916,9 +4920,9 @@ static void sve_ld1_zs(CPUARMState *env, void *vd, vo= id *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned scale =3D simd_data(desc); ARMVectorReg scratch =3D { }; =20 set_helper_retaddr(ra); @@ -4927,7 +4931,7 @@ static void sve_ld1_zs(CPUARMState *env, void *vd, vo= id *vg, void *vm, do { if (likely(pg & 1)) { target_ulong off =3D off_fn(vm, i); - tlb_fn(env, &scratch, i, base + (off << scale), mmu_idx, r= a); + tlb_fn(env, &scratch, i, base + (off << scale), oi, ra); } i +=3D 4, pg >>=3D 4; } while (i & 15); @@ -4942,9 +4946,9 @@ static void sve_ld1_zd(CPUARMState *env, void *vd, vo= id *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t i, oprsz =3D simd_oprsz(desc) / 8; - unsigned scale =3D simd_data(desc); ARMVectorReg scratch =3D { }; =20 set_helper_retaddr(ra); @@ -4952,7 +4956,7 @@ static void sve_ld1_zd(CPUARMState *env, void *vd, vo= id *vg, void *vm, uint8_t pg =3D *(uint8_t *)(vg + H1(i)); if (likely(pg & 1)) { target_ulong off =3D off_fn(vm, i * 8); - tlb_fn(env, &scratch, i * 8, base + (off << scale), mmu_idx, r= a); + tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra); } } set_helper_retaddr(0); @@ -5058,7 +5062,7 @@ typedef bool sve_ld1_nf_fn(CPUARMState *env, void *vd= , intptr_t reg_off, #ifdef CONFIG_SOFTMMU #define DO_LD_NF(NAME, H, TYPEE, TYPEM, HOST) \ static bool sve_ld##NAME##_nf(CPUARMState *env, void *vd, intptr_t reg_off= , \ - target_ulong addr, int mmu_idx) = \ + target_ulong addr, int mmu_idx) = \ { = \ target_ulong next_page =3D -(addr | TARGET_PAGE_MASK); = \ if (likely(next_page - addr >=3D sizeof(TYPEM))) { = \ @@ -5117,9 +5121,10 @@ static inline void sve_ldff1_zs(CPUARMState *env, vo= id *vd, void *vg, void *vm, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_f= n, sve_ld1_nf_fn *nonfault_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int mmu_idx =3D get_mmuidx(oi); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t reg_off, reg_max =3D simd_oprsz(desc); - unsigned scale =3D simd_data(desc); target_ulong addr; =20 /* Skip to the first true predicate. */ @@ -5129,7 +5134,7 @@ static inline void sve_ldff1_zs(CPUARMState *env, voi= d *vd, void *vg, void *vm, set_helper_retaddr(ra); addr =3D off_fn(vm, reg_off); addr =3D base + (addr << scale); - tlb_fn(env, vd, reg_off, addr, mmu_idx, ra); + tlb_fn(env, vd, reg_off, addr, oi, ra); =20 /* The rest of the reads will be non-faulting. */ set_helper_retaddr(0); @@ -5158,9 +5163,10 @@ static inline void sve_ldff1_zd(CPUARMState *env, vo= id *vd, void *vg, void *vm, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_f= n, sve_ld1_nf_fn *nonfault_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int mmu_idx =3D get_mmuidx(oi); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t reg_off, reg_max =3D simd_oprsz(desc); - unsigned scale =3D simd_data(desc); target_ulong addr; =20 /* Skip to the first true predicate. */ @@ -5170,7 +5176,7 @@ static inline void sve_ldff1_zd(CPUARMState *env, voi= d *vd, void *vg, void *vm, set_helper_retaddr(ra); addr =3D off_fn(vm, reg_off); addr =3D base + (addr << scale); - tlb_fn(env, vd, reg_off, addr, mmu_idx, ra); + tlb_fn(env, vd, reg_off, addr, oi, ra); =20 /* The rest of the reads will be non-faulting. */ set_helper_retaddr(0); @@ -5282,9 +5288,9 @@ static void sve_st1_zs(CPUARMState *env, void *vd, vo= id *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t i, oprsz =3D simd_oprsz(desc); - unsigned scale =3D simd_data(desc); =20 set_helper_retaddr(ra); for (i =3D 0; i < oprsz; ) { @@ -5292,7 +5298,7 @@ static void sve_st1_zs(CPUARMState *env, void *vd, vo= id *vg, void *vm, do { if (likely(pg & 1)) { target_ulong off =3D off_fn(vm, i); - tlb_fn(env, vd, i, base + (off << scale), mmu_idx, ra); + tlb_fn(env, vd, i, base + (off << scale), oi, ra); } i +=3D 4, pg >>=3D 4; } while (i & 15); @@ -5304,16 +5310,16 @@ static void sve_st1_zd(CPUARMState *env, void *vd, = void *vg, void *vm, target_ulong base, uint32_t desc, uintptr_t ra, zreg_off_fn *off_fn, sve_ld1_tlb_fn *tlb_fn) { - const int mmu_idx =3D cpu_mmu_index(env, false); + const TCGMemOpIdx oi =3D extract32(desc, SIMD_DATA_SHIFT, MEMOPIDX_SHI= FT); + const int scale =3D extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, = 2); intptr_t i, oprsz =3D simd_oprsz(desc) / 8; - unsigned scale =3D simd_data(desc); =20 set_helper_retaddr(ra); for (i =3D 0; i < oprsz; i++) { uint8_t pg =3D *(uint8_t *)(vg + H1(i)); if (likely(pg & 1)) { target_ulong off =3D off_fn(vm, i * 8); - tlb_fn(env, vd, i * 8, base + (off << scale), mmu_idx, ra); + tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra); } } set_helper_retaddr(0); diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index 888a968ddc5..fe7aebdc19f 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -4600,25 +4600,34 @@ static const uint8_t dtype_esz[16] =3D { 3, 2, 1, 3 }; =20 +static TCGMemOpIdx sve_memopidx(DisasContext *s, int dtype) +{ + return make_memop_idx(s->be_data | dtype_mop[dtype], get_mem_index(s)); +} + static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, - gen_helper_gvec_mem *fn) + int dtype, gen_helper_gvec_mem *fn) { unsigned vsz =3D vec_full_reg_size(s); TCGv_ptr t_pg; - TCGv_i32 desc; + TCGv_i32 t_desc; + int desc; =20 /* For e.g. LD4, there are not enough arguments to pass all 4 * registers as pointers, so encode the regno into the data field. * For consistency, do this even for LD1. */ - desc =3D tcg_const_i32(simd_desc(vsz, vsz, zt)); + desc =3D sve_memopidx(s, dtype); + desc |=3D zt << MEMOPIDX_SHIFT; + desc =3D simd_desc(vsz, vsz, desc); + t_desc =3D tcg_const_i32(desc); t_pg =3D tcg_temp_new_ptr(); =20 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); - fn(cpu_env, t_pg, addr, desc); + fn(cpu_env, t_pg, addr, t_desc); =20 tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(desc); + tcg_temp_free_i32(t_desc); } =20 static void do_ld_zpa(DisasContext *s, int zt, int pg, @@ -4681,7 +4690,7 @@ static void do_ld_zpa(DisasContext *s, int zt, int pg, * accessible via the instruction encoding. */ assert(fn !=3D NULL); - do_mem_zpa(s, zt, pg, addr, fn); + do_mem_zpa(s, zt, pg, addr, dtype, fn); } =20 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a, uint32_t insn) @@ -4763,7 +4772,8 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rpr= r_load *a, uint32_t insn) TCGv_i64 addr =3D new_tmp_a64(s); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); - do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data =3D=3D MO_BE][a->= dtype]); + do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, + fns[s->be_data =3D=3D MO_BE][a->dtype]); } return true; } @@ -4821,7 +4831,8 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpr= i_load *a, uint32_t insn) TCGv_i64 addr =3D new_tmp_a64(s); =20 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); - do_mem_zpa(s, a->rd, a->pg, addr, fns[s->be_data =3D=3D MO_BE][a->= dtype]); + do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, + fns[s->be_data =3D=3D MO_BE][a->dtype]); } return true; } @@ -4836,11 +4847,14 @@ static void do_ldrq(DisasContext *s, int zt, int pg= , TCGv_i64 addr, int msz) }; unsigned vsz =3D vec_full_reg_size(s); TCGv_ptr t_pg; - TCGv_i32 desc; - int poff; + TCGv_i32 t_desc; + int desc, poff; =20 /* Load the first quadword using the normal predicated load helpers. = */ - desc =3D tcg_const_i32(simd_desc(16, 16, zt)); + desc =3D sve_memopidx(s, msz_dtype(msz)); + desc |=3D zt << MEMOPIDX_SHIFT; + desc =3D simd_desc(16, 16, desc); + t_desc =3D tcg_const_i32(desc); =20 poff =3D pred_full_reg_offset(s, pg); if (vsz > 16) { @@ -4864,10 +4878,10 @@ static void do_ldrq(DisasContext *s, int zt, int pg= , TCGv_i64 addr, int msz) t_pg =3D tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_pg, cpu_env, poff); =20 - fns[s->be_data =3D=3D MO_BE][msz](cpu_env, t_pg, addr, desc); + fns[s->be_data =3D=3D MO_BE][msz](cpu_env, t_pg, addr, t_desc); =20 tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(desc); + tcg_temp_free_i32(t_desc); =20 /* Replicate that first quadword. */ if (vsz > 16) { @@ -5019,7 +5033,7 @@ static void do_st_zpa(DisasContext *s, int zt, int pg= , TCGv_i64 addr, fn =3D fn_multiple[be][nreg - 1][msz]; } assert(fn !=3D NULL); - do_mem_zpa(s, zt, pg, addr, fn); + do_mem_zpa(s, zt, pg, addr, msz_dtype(msz), fn); } =20 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a, uint32_t ins= n) @@ -5057,24 +5071,31 @@ static bool trans_ST_zpri(DisasContext *s, arg_rpri= _store *a, uint32_t insn) *** SVE gather loads / scatter stores */ =20 -static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, int scale, - TCGv_i64 scalar, gen_helper_gvec_mem_scatter *fn) +static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, + int scale, TCGv_i64 scalar, int msz, + gen_helper_gvec_mem_scatter *fn) { unsigned vsz =3D vec_full_reg_size(s); - TCGv_i32 desc =3D tcg_const_i32(simd_desc(vsz, vsz, scale)); TCGv_ptr t_zm =3D tcg_temp_new_ptr(); TCGv_ptr t_pg =3D tcg_temp_new_ptr(); TCGv_ptr t_zt =3D tcg_temp_new_ptr(); + TCGv_i32 t_desc; + int desc; + + desc =3D sve_memopidx(s, msz_dtype(msz)); + desc |=3D scale << MEMOPIDX_SHIFT; + desc =3D simd_desc(vsz, vsz, desc); + t_desc =3D tcg_const_i32(desc); =20 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); - fn(cpu_env, t_zt, t_pg, t_zm, scalar, desc); + fn(cpu_env, t_zt, t_pg, t_zm, scalar, t_desc); =20 tcg_temp_free_ptr(t_zt); tcg_temp_free_ptr(t_zm); tcg_temp_free_ptr(t_pg); - tcg_temp_free_i32(desc); + tcg_temp_free_i32(t_desc); } =20 /* Indexed by [be][ff][xs][u][msz]. */ @@ -5263,7 +5284,7 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_z= prz *a, uint32_t insn) assert(fn !=3D NULL); =20 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, - cpu_reg_sp(s, a->rn), fn); + cpu_reg_sp(s, a->rn), a->msz, fn); return true; } =20 @@ -5294,7 +5315,7 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_z= piz *a, uint32_t insn) * by loading the immediate into the scalar parameter. */ imm =3D tcg_const_i64(a->imm << a->msz); - do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); tcg_temp_free_i64(imm); return true; } @@ -5369,7 +5390,7 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_z= prz *a, uint32_t insn) g_assert_not_reached(); } do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, - cpu_reg_sp(s, a->rn), fn); + cpu_reg_sp(s, a->rn), a->msz, fn); return true; } =20 @@ -5400,7 +5421,7 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_z= piz *a, uint32_t insn) * by loading the immediate into the scalar parameter. */ imm =3D tcg_const_i64(a->imm << a->msz); - do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, fn); + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, imm, a->msz, fn); tcg_temp_free_i64(imm); return true; } --=20 2.19.0