1
The following changes since commit c95bd5ff1660883d15ad6e0005e4c8571604f51a:
1
From: Alistair Francis <alistair.francis@wdc.com>
2
2
3
Merge remote-tracking branch 'remotes/philmd/tags/mips-fixes-20210322' into staging (2021-03-22 14:26:13 +0000)
3
The following changes since commit d52dff5d8048d4982437db9606c27bb4127cf9d0:
4
5
Merge remote-tracking branch 'remotes/marcandre/tags/clip-pull-request' into staging (2021-08-31 14:38:15 +0100)
4
6
5
are available in the Git repository at:
7
are available in the Git repository at:
6
8
7
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20210322-2
9
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20210901-2
8
10
9
for you to fetch changes up to 9a27f69bd668d9d71674407badc412ce1231c7d5:
11
for you to fetch changes up to 8e034ae44dba6291beb07f7f2a932c1e5ab83e98:
10
12
11
target/riscv: Prevent lost illegal instruction exceptions (2021-03-22 21:54:40 -0400)
13
target/riscv: Use {get,dest}_gpr for RVV (2021-09-01 11:59:12 +1000)
12
14
13
----------------------------------------------------------------
15
----------------------------------------------------------------
14
RISC-V PR for 6.0
16
First RISC-V PR for QEMU 6.2
15
17
16
This PR includes:
18
- Add a config for Shakti UART
17
- Fix for vector CSR access
19
- Fixup virt flash node
18
- Improvements to the Ibex UART device
20
- Don't override users supplied ISA version
19
- PMP improvements and bug fixes
21
- Fixup some CSR accesses
20
- Hypervisor extension bug fixes
22
- Use g_strjoinv() for virt machine PLIC string config
21
- ramfb support for the virt machine
23
- Fix an overflow in the SiFive CLINT
22
- Fast read support for SST flash
24
- Add 64-bit register access helpers
23
- Improvements to the microchip_pfsoc machine
25
- Replace tcg_const_* with direct constant usage
24
26
25
----------------------------------------------------------------
27
----------------------------------------------------------------
26
Alexander Wagner (1):
28
Bin Meng (2):
27
hw/char: disable ibex uart receive if the buffer is full
29
hw/riscv: virt: Move flash node to root
30
target/riscv: Correct a comment in riscv_csrrw()
28
31
29
Asherah Connor (2):
32
David Hoppenbrouwers (1):
30
hw/riscv: Add fw_cfg support to virt
33
hw/intc/sifive_clint: Fix muldiv64 overflow in sifive_clint_write_timecmp()
31
hw/riscv: allow ramfb on virt
32
34
33
Bin Meng (3):
35
Joe Komlodi (2):
34
hw/block: m25p80: Support fast read for SST flashes
36
hw/core/register: Add more 64-bit utilities
35
hw/riscv: microchip_pfsoc: Map EMMC/SD mux register
37
hw/registerfields: Use 64-bit bitfield for FIELD_DP64
36
docs/system: riscv: Add documentation for 'microchip-icicle-kit' machine
37
38
38
Frank Chang (1):
39
LIU Zhiwei (2):
39
target/riscv: fix vs() to return proper error code
40
target/riscv: Don't wrongly override isa version
41
target/riscv: Add User CSRs read-only check
40
42
41
Georg Kotheimer (6):
43
Peter Maydell (1):
42
target/riscv: Adjust privilege level for HLV(X)/HSV instructions
44
hw/riscv/virt.c: Assemble plic_hart_config string with g_strjoinv()
43
target/riscv: Make VSTIP and VSEIP read-only in hip
44
target/riscv: Use background registers also for MSTATUS_MPV
45
target/riscv: Fix read and write accesses to vsip and vsie
46
target/riscv: Add proper two-stage lookup exception detection
47
target/riscv: Prevent lost illegal instruction exceptions
48
45
49
Jim Shu (3):
46
Richard Henderson (24):
50
target/riscv: propagate PMP permission to TLB page
47
target/riscv: Use tcg_constant_*
51
target/riscv: add log of PMP permission checking
48
tests/tcg/riscv64: Add test for division
52
target/riscv: flush TLB pages if PMP permission has been changed
49
target/riscv: Clean up division helpers
50
target/riscv: Add DisasContext to gen_get_gpr, gen_set_gpr
51
target/riscv: Introduce DisasExtend and new helpers
52
target/riscv: Add DisasExtend to gen_arith*
53
target/riscv: Remove gen_arith_div*
54
target/riscv: Use gen_arith for mulh and mulhu
55
target/riscv: Move gen_* helpers for RVM
56
target/riscv: Move gen_* helpers for RVB
57
target/riscv: Add DisasExtend to gen_unary
58
target/riscv: Use DisasExtend in shift operations
59
target/riscv: Use extracts for sraiw and srliw
60
target/riscv: Use get_gpr in branches
61
target/riscv: Use {get, dest}_gpr for integer load/store
62
target/riscv: Fix rmw_sip, rmw_vsip, rmw_hsip vs write-only operation
63
target/riscv: Fix hgeie, hgeip
64
target/riscv: Reorg csr instructions
65
target/riscv: Use {get,dest}_gpr for RVA
66
target/riscv: Use gen_shift_imm_fn for slli_uw
67
target/riscv: Use {get,dest}_gpr for RVF
68
target/riscv: Use {get,dest}_gpr for RVD
69
target/riscv: Tidy trans_rvh.c.inc
70
target/riscv: Use {get,dest}_gpr for RVV
53
71
54
docs/system/riscv/microchip-icicle-kit.rst | 89 ++++++++++++++
72
Vijai Kumar K (1):
55
docs/system/target-riscv.rst | 1 +
73
hw/char: Add config for shakti uart
56
include/hw/char/ibex_uart.h | 4 +
57
include/hw/riscv/microchip_pfsoc.h | 1 +
58
include/hw/riscv/virt.h | 2 +
59
target/riscv/cpu.h | 4 +
60
target/riscv/pmp.h | 4 +-
61
hw/block/m25p80.c | 3 +
62
hw/char/ibex_uart.c | 23 +++-
63
hw/riscv/microchip_pfsoc.c | 6 +
64
hw/riscv/virt.c | 33 ++++++
65
target/riscv/cpu.c | 1 +
66
target/riscv/cpu_helper.c | 144 +++++++++++++++--------
67
target/riscv/csr.c | 77 +++++++------
68
target/riscv/pmp.c | 84 ++++++++++----
69
target/riscv/translate.c | 179 +----------------------------
70
hw/riscv/Kconfig | 1 +
71
17 files changed, 367 insertions(+), 289 deletions(-)
72
create mode 100644 docs/system/riscv/microchip-icicle-kit.rst
73
74
75
include/hw/register.h | 8 +
76
include/hw/registerfields.h | 10 +-
77
target/riscv/helper.h | 6 +-
78
target/riscv/insn32.decode | 1 +
79
hw/core/register.c | 12 +
80
hw/intc/sifive_clint.c | 25 +-
81
hw/riscv/virt.c | 35 +-
82
target/riscv/cpu.c | 14 +-
83
target/riscv/csr.c | 59 ++-
84
target/riscv/op_helper.c | 18 +-
85
target/riscv/translate.c | 689 +++++++-------------------------
86
tests/tcg/riscv64/test-div.c | 58 +++
87
target/riscv/insn_trans/trans_rva.c.inc | 49 +--
88
target/riscv/insn_trans/trans_rvb.c.inc | 366 +++++++++++++----
89
target/riscv/insn_trans/trans_rvd.c.inc | 127 +++---
90
target/riscv/insn_trans/trans_rvf.c.inc | 149 ++++---
91
target/riscv/insn_trans/trans_rvh.c.inc | 266 +++---------
92
target/riscv/insn_trans/trans_rvi.c.inc | 372 +++++++++--------
93
target/riscv/insn_trans/trans_rvm.c.inc | 193 +++++++--
94
target/riscv/insn_trans/trans_rvv.c.inc | 149 +++----
95
hw/char/Kconfig | 3 +
96
hw/char/meson.build | 2 +-
97
hw/riscv/Kconfig | 5 +-
98
tests/tcg/riscv64/Makefile.target | 5 +
99
24 files changed, 1240 insertions(+), 1381 deletions(-)
100
create mode 100644 tests/tcg/riscv64/test-div.c
101
create mode 100644 tests/tcg/riscv64/Makefile.target
102
diff view generated by jsdifflib
New patch
1
From: Vijai Kumar K <vijai@behindbytes.com>
1
2
3
Use a dedicated UART config(CONFIG_SHAKTI_UART) to select
4
shakti uart.
5
6
Signed-off-by: Vijai Kumar K <vijai@behindbytes.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20210731190229.137483-1-vijai@behindbytes.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
hw/char/Kconfig | 3 +++
12
hw/char/meson.build | 2 +-
13
hw/riscv/Kconfig | 5 +----
14
3 files changed, 5 insertions(+), 5 deletions(-)
15
16
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/char/Kconfig
19
+++ b/hw/char/Kconfig
20
@@ -XXX,XX +XXX,XX @@ config SIFIVE_UART
21
22
config GOLDFISH_TTY
23
bool
24
+
25
+config SHAKTI_UART
26
+ bool
27
diff --git a/hw/char/meson.build b/hw/char/meson.build
28
index XXXXXXX..XXXXXXX 100644
29
--- a/hw/char/meson.build
30
+++ b/hw/char/meson.build
31
@@ -XXX,XX +XXX,XX @@ softmmu_ss.add(when: 'CONFIG_SERIAL', if_true: files('serial.c'))
32
softmmu_ss.add(when: 'CONFIG_SERIAL_ISA', if_true: files('serial-isa.c'))
33
softmmu_ss.add(when: 'CONFIG_SERIAL_PCI', if_true: files('serial-pci.c'))
34
softmmu_ss.add(when: 'CONFIG_SERIAL_PCI_MULTI', if_true: files('serial-pci-multi.c'))
35
-softmmu_ss.add(when: 'CONFIG_SHAKTI', if_true: files('shakti_uart.c'))
36
+softmmu_ss.add(when: 'CONFIG_SHAKTI_UART', if_true: files('shakti_uart.c'))
37
softmmu_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-console.c'))
38
softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen_console.c'))
39
softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_uartlite.c'))
40
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/riscv/Kconfig
43
+++ b/hw/riscv/Kconfig
44
@@ -XXX,XX +XXX,XX @@ config OPENTITAN
45
select IBEX
46
select UNIMP
47
48
-config SHAKTI
49
- bool
50
-
51
config SHAKTI_C
52
bool
53
select UNIMP
54
- select SHAKTI
55
+ select SHAKTI_UART
56
select SIFIVE_CLINT
57
select SIFIVE_PLIC
58
59
--
60
2.31.1
61
62
diff view generated by jsdifflib
1
From: Asherah Connor <ashe@kivikakk.ee>
1
From: Bin Meng <bmeng.cn@gmail.com>
2
2
3
Allow ramfb on virt. This lets `-device ramfb' work.
3
The flash is not inside the SoC, so it's inappropriate to put it
4
under the /soc node. Move it to root instead.
4
5
5
Signed-off-by: Asherah Connor <ashe@kivikakk.ee>
6
Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20210318235041.17175-3-ashe@kivikakk.ee
9
Message-id: 20210807035641.22449-1-bmeng.cn@gmail.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
---
11
hw/riscv/virt.c | 3 +++
12
hw/riscv/virt.c | 2 +-
12
1 file changed, 3 insertions(+)
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
14
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
15
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/riscv/virt.c
17
--- a/hw/riscv/virt.c
17
+++ b/hw/riscv/virt.c
18
+++ b/hw/riscv/virt.c
18
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
19
#include "sysemu/sysemu.h"
20
qemu_fdt_setprop_cell(fdt, name, "interrupts", RTC_IRQ);
20
#include "hw/pci/pci.h"
21
g_free(name);
21
#include "hw/pci-host/gpex.h"
22
22
+#include "hw/display/ramfb.h"
23
- name = g_strdup_printf("/soc/flash@%" PRIx64, flashbase);
23
24
+ name = g_strdup_printf("/flash@%" PRIx64, flashbase);
24
static const MemMapEntry virt_memmap[] = {
25
qemu_fdt_add_subnode(mc->fdt, name);
25
[VIRT_DEBUG] = { 0x0, 0x100 },
26
qemu_fdt_setprop_string(mc->fdt, name, "compatible", "cfi-flash");
26
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
27
qemu_fdt_setprop_sized_cells(mc->fdt, name, "reg",
27
mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props;
28
mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id;
29
mc->numa_mem_supported = true;
30
+
31
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
32
}
33
34
static const TypeInfo virt_machine_typeinfo = {
35
--
28
--
36
2.30.1
29
2.31.1
37
30
38
31
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Bin Meng <bmeng.cn@gmail.com>
2
2
3
vs() should return -RISCV_EXCP_ILLEGAL_INST instead of -1 if rvv feature
3
When privilege check fails, RISCV_EXCP_ILLEGAL_INST is returned,
4
is not enabled.
4
not -1 (RISCV_EXCP_NONE).
5
5
6
If -1 is returned, exception will be raised and cs->exception_index will
6
Signed-off-by: Bin Meng <bmeng.cn@gmail.com>
7
be set to the negative return value. The exception will then be treated
8
as an instruction access fault instead of illegal instruction fault.
9
10
Signed-off-by: Frank Chang <frank.chang@sifive.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-id: 20210223065935.20208-1-frank.chang@sifive.com
8
Message-id: 20210807141025.31808-1-bmeng.cn@gmail.com
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
10
---
16
target/riscv/csr.c | 2 +-
11
target/riscv/csr.c | 2 +-
17
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 1 insertion(+), 1 deletion(-)
18
13
19
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
14
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/csr.c
16
--- a/target/riscv/csr.c
22
+++ b/target/riscv/csr.c
17
+++ b/target/riscv/csr.c
23
@@ -XXX,XX +XXX,XX @@ static int vs(CPURISCVState *env, int csrno)
18
@@ -XXX,XX +XXX,XX @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
24
if (env->misa & RVV) {
19
target_ulong old_value;
25
return 0;
20
RISCVCPU *cpu = env_archcpu(env);
26
}
21
27
- return -1;
22
- /* check privileges and return -1 if check fails */
28
+ return -RISCV_EXCP_ILLEGAL_INST;
23
+ /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
29
}
24
#if !defined(CONFIG_USER_ONLY)
30
25
int effective_priv = env->priv;
31
static int ctr(CPURISCVState *env, int csrno)
26
int read_only = get_field(csrno, 0xC00) == 3;
32
--
27
--
33
2.30.1
28
2.31.1
34
29
35
30
diff view generated by jsdifflib
1
From: Jim Shu <cwshu@andestech.com>
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
2
3
Like MMU translation, add qemu log of PMP permission checking for
3
For some cpu, the isa version has already been set in cpu init function.
4
debugging.
4
Thus only override the isa version when isa version is not set, or
5
users set different isa version explicitly by cpu parameters.
5
6
6
Signed-off-by: Jim Shu <cwshu@andestech.com>
7
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
8
Message-id: 1613916082-19528-3-git-send-email-cwshu@andestech.com
9
Message-id: 20210811144612.68674-1-zhiwei_liu@c-sky.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
---
11
target/riscv/cpu_helper.c | 12 ++++++++++++
12
target/riscv/cpu.c | 14 ++++++++------
12
1 file changed, 12 insertions(+)
13
1 file changed, 8 insertions(+), 6 deletions(-)
13
14
14
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu_helper.c
17
--- a/target/riscv/cpu.c
17
+++ b/target/riscv/cpu_helper.c
18
+++ b/target/riscv/cpu.c
18
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
19
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
19
if (ret == TRANSLATE_SUCCESS) {
20
RISCVCPU *cpu = RISCV_CPU(dev);
20
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
21
CPURISCVState *env = &cpu->env;
21
size, access_type, mode);
22
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
22
+
23
- int priv_version = PRIV_VERSION_1_11_0;
23
+ qemu_log_mask(CPU_LOG_MMU,
24
- int bext_version = BEXT_VERSION_0_93_0;
24
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
25
- int vext_version = VEXT_VERSION_0_07_1;
25
+ " %d tlb_size " TARGET_FMT_lu "\n",
26
+ int priv_version = 0;
26
+ __func__, pa, ret, prot_pmp, tlb_size);
27
target_ulong target_misa = env->misa;
27
+
28
Error *local_err = NULL;
28
prot &= prot_pmp;
29
29
}
30
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
30
31
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
32
if (ret == TRANSLATE_SUCCESS) {
33
ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
34
size, access_type, mode);
35
+
36
+ qemu_log_mask(CPU_LOG_MMU,
37
+ "%s PMP address=" TARGET_FMT_plx " ret %d prot"
38
+ " %d tlb_size " TARGET_FMT_lu "\n",
39
+ __func__, pa, ret, prot_pmp, tlb_size);
40
+
41
prot &= prot_pmp;
42
}
31
}
43
}
32
}
33
34
- set_priv_version(env, priv_version);
35
- set_bext_version(env, bext_version);
36
- set_vext_version(env, vext_version);
37
+ if (priv_version) {
38
+ set_priv_version(env, priv_version);
39
+ } else if (!env->priv_ver) {
40
+ set_priv_version(env, PRIV_VERSION_1_11_0);
41
+ }
42
43
if (cpu->cfg.mmu) {
44
set_feature(env, RISCV_FEATURE_MMU);
45
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
46
target_misa |= RVH;
47
}
48
if (cpu->cfg.ext_b) {
49
+ int bext_version = BEXT_VERSION_0_93_0;
50
target_misa |= RVB;
51
52
if (cpu->cfg.bext_spec) {
53
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
54
set_bext_version(env, bext_version);
55
}
56
if (cpu->cfg.ext_v) {
57
+ int vext_version = VEXT_VERSION_0_07_1;
58
target_misa |= RVV;
59
if (!is_power_of_2(cpu->cfg.vlen)) {
60
error_setg(errp,
44
--
61
--
45
2.30.1
62
2.31.1
46
63
47
64
diff view generated by jsdifflib
1
From: Alexander Wagner <alexander.wagner@ulal.de>
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
2
3
Not disabling the UART leads to QEMU overwriting the UART receive buffer with
3
For U-mode CSRs, read-only check is also needed.
4
the newest received byte. The rx_level variable is added to allow the use of
5
the existing OpenTitan driver libraries.
6
4
7
Signed-off-by: Alexander Wagner <alexander.wagner@ulal.de>
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
9
Message-id: 20210309152130.13038-1-alexander.wagner@ulal.de
7
Message-id: 20210810014552.4884-1-zhiwei_liu@c-sky.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
9
---
12
include/hw/char/ibex_uart.h | 4 ++++
10
target/riscv/csr.c | 8 +++++---
13
hw/char/ibex_uart.c | 23 ++++++++++++++++++-----
11
1 file changed, 5 insertions(+), 3 deletions(-)
14
2 files changed, 22 insertions(+), 5 deletions(-)
15
12
16
diff --git a/include/hw/char/ibex_uart.h b/include/hw/char/ibex_uart.h
13
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/char/ibex_uart.h
15
--- a/target/riscv/csr.c
19
+++ b/include/hw/char/ibex_uart.h
16
+++ b/target/riscv/csr.c
20
@@ -XXX,XX +XXX,XX @@ REG32(FIFO_CTRL, 0x1c)
17
@@ -XXX,XX +XXX,XX @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
21
FIELD(FIFO_CTRL, RXILVL, 2, 3)
18
RISCVException ret;
22
FIELD(FIFO_CTRL, TXILVL, 5, 2)
19
target_ulong old_value;
23
REG32(FIFO_STATUS, 0x20)
20
RISCVCPU *cpu = env_archcpu(env);
24
+ FIELD(FIFO_STATUS, TXLVL, 0, 5)
21
+ int read_only = get_field(csrno, 0xC00) == 3;
25
+ FIELD(FIFO_STATUS, RXLVL, 16, 5)
22
26
REG32(OVRD, 0x24)
23
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
27
REG32(VAL, 0x28)
24
#if !defined(CONFIG_USER_ONLY)
28
REG32(TIMEOUT_CTRL, 0x2c)
25
int effective_priv = env->priv;
29
@@ -XXX,XX +XXX,XX @@ struct IbexUartState {
26
- int read_only = get_field(csrno, 0xC00) == 3;
30
uint8_t tx_fifo[IBEX_UART_TX_FIFO_SIZE];
27
31
uint32_t tx_level;
28
if (riscv_has_ext(env, RVH) &&
32
29
env->priv == PRV_S &&
33
+ uint32_t rx_level;
30
@@ -XXX,XX +XXX,XX @@ RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
34
+
31
effective_priv++;
35
QEMUTimer *fifo_trigger_handle;
36
uint64_t char_tx_time;
37
38
diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/hw/char/ibex_uart.c
41
+++ b/hw/char/ibex_uart.c
42
@@ -XXX,XX +XXX,XX @@ static int ibex_uart_can_receive(void *opaque)
43
{
44
IbexUartState *s = opaque;
45
46
- if (s->uart_ctrl & R_CTRL_RX_ENABLE_MASK) {
47
+ if ((s->uart_ctrl & R_CTRL_RX_ENABLE_MASK)
48
+ && !(s->uart_status & R_STATUS_RXFULL_MASK)) {
49
return 1;
50
}
32
}
51
33
52
@@ -XXX,XX +XXX,XX @@ static void ibex_uart_receive(void *opaque, const uint8_t *buf, int size)
34
- if ((write_mask && read_only) ||
53
35
- (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) {
54
s->uart_status &= ~R_STATUS_RXIDLE_MASK;
36
+ if (!env->debugger && (effective_priv < get_field(csrno, 0x300))) {
55
s->uart_status &= ~R_STATUS_RXEMPTY_MASK;
37
return RISCV_EXCP_ILLEGAL_INST;
56
+ /* The RXFULL is set after receiving a single byte
38
}
57
+ * as the FIFO buffers are not yet implemented.
39
#endif
58
+ */
40
+ if (write_mask && read_only) {
59
+ s->uart_status |= R_STATUS_RXFULL_MASK;
41
+ return RISCV_EXCP_ILLEGAL_INST;
60
+ s->rx_level += 1;
42
+ }
61
43
62
if (size > rx_fifo_level) {
44
/* ensure the CSR extension is enabled. */
63
s->uart_intr_state |= R_INTR_STATE_RX_WATERMARK_MASK;
45
if (!cpu->cfg.ext_icsr) {
64
@@ -XXX,XX +XXX,XX @@ static void ibex_uart_reset(DeviceState *dev)
65
s->uart_timeout_ctrl = 0x00000000;
66
67
s->tx_level = 0;
68
+ s->rx_level = 0;
69
70
s->char_tx_time = (NANOSECONDS_PER_SECOND / 230400) * 10;
71
72
@@ -XXX,XX +XXX,XX @@ static uint64_t ibex_uart_read(void *opaque, hwaddr addr,
73
74
case R_RDATA:
75
retvalue = s->uart_rdata;
76
- if (s->uart_ctrl & R_CTRL_RX_ENABLE_MASK) {
77
+ if ((s->uart_ctrl & R_CTRL_RX_ENABLE_MASK) && (s->rx_level > 0)) {
78
qemu_chr_fe_accept_input(&s->chr);
79
80
- s->uart_status |= R_STATUS_RXIDLE_MASK;
81
- s->uart_status |= R_STATUS_RXEMPTY_MASK;
82
+ s->rx_level -= 1;
83
+ s->uart_status &= ~R_STATUS_RXFULL_MASK;
84
+ if (s->rx_level == 0) {
85
+ s->uart_status |= R_STATUS_RXIDLE_MASK;
86
+ s->uart_status |= R_STATUS_RXEMPTY_MASK;
87
+ }
88
}
89
break;
90
case R_WDATA:
91
@@ -XXX,XX +XXX,XX @@ static uint64_t ibex_uart_read(void *opaque, hwaddr addr,
92
case R_FIFO_STATUS:
93
retvalue = s->uart_fifo_status;
94
95
- retvalue |= s->tx_level & 0x1F;
96
+ retvalue |= (s->rx_level & 0x1F) << R_FIFO_STATUS_RXLVL_SHIFT;
97
+ retvalue |= (s->tx_level & 0x1F) << R_FIFO_STATUS_TXLVL_SHIFT;
98
99
qemu_log_mask(LOG_UNIMP,
100
"%s: RX fifos are not supported\n", __func__);
101
@@ -XXX,XX +XXX,XX @@ static void ibex_uart_write(void *opaque, hwaddr addr,
102
s->uart_fifo_ctrl = value;
103
104
if (value & R_FIFO_CTRL_RXRST_MASK) {
105
+ s->rx_level = 0;
106
qemu_log_mask(LOG_UNIMP,
107
"%s: RX fifos are not supported\n", __func__);
108
}
109
--
46
--
110
2.30.1
47
2.31.1
111
48
112
49
diff view generated by jsdifflib
1
From: Asherah Connor <ashe@kivikakk.ee>
1
From: Peter Maydell <peter.maydell@linaro.org>
2
2
3
Provides fw_cfg for the virt machine on riscv. This enables
3
In the riscv virt machine init function, We assemble a string
4
using e.g. ramfb later.
4
plic_hart_config which is a comma-separated list of N copies of the
5
VIRT_PLIC_HART_CONFIG string. The code that does this has a
6
misunderstanding of the strncat() length argument. If the source
7
string is too large strncat() will write a maximum of length+1 bytes
8
(length bytes from the source string plus a trailing NUL), but the
9
code here assumes that it will write only length bytes at most.
5
10
6
Signed-off-by: Asherah Connor <ashe@kivikakk.ee>
11
This isn't an actual bug because the code has correctly precalculated
7
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
12
the amount of memory it needs to allocate so that it will never be
13
too small (i.e. we could have used plain old strcat()), but it does
14
mean that the code looks like it has a guard against accidental
15
overrun when it doesn't.
16
17
Rewrite the string handling here to use the glib g_strjoinv()
18
function, which means we don't need to do careful accountancy of
19
string lengths, and makes it clearer that what we're doing is
20
"create a comma-separated string".
21
22
Fixes: Coverity 1460752
23
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20210318235041.17175-2-ashe@kivikakk.ee
26
Message-id: 20210812144647.10516-1-peter.maydell@linaro.org
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
28
---
12
include/hw/riscv/virt.h | 2 ++
29
hw/riscv/virt.c | 33 ++++++++++++++++++++-------------
13
hw/riscv/virt.c | 30 ++++++++++++++++++++++++++++++
30
1 file changed, 20 insertions(+), 13 deletions(-)
14
hw/riscv/Kconfig | 1 +
15
3 files changed, 33 insertions(+)
16
31
17
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/riscv/virt.h
20
+++ b/include/hw/riscv/virt.h
21
@@ -XXX,XX +XXX,XX @@ struct RISCVVirtState {
22
RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
23
DeviceState *plic[VIRT_SOCKETS_MAX];
24
PFlashCFI01 *flash[2];
25
+ FWCfgState *fw_cfg;
26
27
int fdt_size;
28
};
29
@@ -XXX,XX +XXX,XX @@ enum {
30
VIRT_PLIC,
31
VIRT_UART0,
32
VIRT_VIRTIO,
33
+ VIRT_FW_CFG,
34
VIRT_FLASH,
35
VIRT_DRAM,
36
VIRT_PCIE_MMIO,
37
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
32
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
38
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
39
--- a/hw/riscv/virt.c
34
--- a/hw/riscv/virt.c
40
+++ b/hw/riscv/virt.c
35
+++ b/hw/riscv/virt.c
41
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry virt_memmap[] = {
36
@@ -XXX,XX +XXX,XX @@ static FWCfgState *create_fw_cfg(const MachineState *mc)
42
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
37
return fw_cfg;
43
[VIRT_UART0] = { 0x10000000, 0x100 },
44
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
45
+ [VIRT_FW_CFG] = { 0x10100000, 0x18 },
46
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
47
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
48
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
49
@@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
50
return dev;
51
}
38
}
52
39
53
+static FWCfgState *create_fw_cfg(const MachineState *mc)
40
+/*
41
+ * Return the per-socket PLIC hart topology configuration string
42
+ * (caller must free with g_free())
43
+ */
44
+static char *plic_hart_config_string(int hart_count)
54
+{
45
+{
55
+ hwaddr base = virt_memmap[VIRT_FW_CFG].base;
46
+ g_autofree const char **vals = g_new(const char *, hart_count + 1);
56
+ hwaddr size = virt_memmap[VIRT_FW_CFG].size;
47
+ int i;
57
+ FWCfgState *fw_cfg;
58
+ char *nodename;
59
+
48
+
60
+ fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16,
49
+ for (i = 0; i < hart_count; i++) {
61
+ &address_space_memory);
50
+ vals[i] = VIRT_PLIC_HART_CONFIG;
62
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)mc->smp.cpus);
51
+ }
52
+ vals[i] = NULL;
63
+
53
+
64
+ nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
54
+ /* g_strjoinv() obliges us to cast away const here */
65
+ qemu_fdt_add_subnode(mc->fdt, nodename);
55
+ return g_strjoinv(",", (char **)vals);
66
+ qemu_fdt_setprop_string(mc->fdt, nodename,
67
+ "compatible", "qemu,fw-cfg-mmio");
68
+ qemu_fdt_setprop_sized_cells(mc->fdt, nodename, "reg",
69
+ 2, base, 2, size);
70
+ qemu_fdt_setprop(mc->fdt, nodename, "dma-coherent", NULL, 0);
71
+ g_free(nodename);
72
+ return fw_cfg;
73
+}
56
+}
74
+
57
+
75
static void virt_machine_init(MachineState *machine)
58
static void virt_machine_init(MachineState *machine)
76
{
59
{
77
const MemMapEntry *memmap = virt_memmap;
60
const MemMapEntry *memmap = virt_memmap;
78
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
61
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
79
start_addr = virt_memmap[VIRT_FLASH].base;
62
MemoryRegion *main_mem = g_new(MemoryRegion, 1);
80
}
63
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
81
64
char *plic_hart_config, *soc_name;
82
+ /*
65
- size_t plic_hart_config_len;
83
+ * Init fw_cfg. Must be done before riscv_load_fdt, otherwise the device
66
target_ulong start_addr = memmap[VIRT_DRAM].base;
84
+ * tree cannot be altered and we get FDT_ERR_NOSPACE.
67
target_ulong firmware_end_addr, kernel_start_addr;
85
+ */
68
uint32_t fdt_load_addr;
86
+ s->fw_cfg = create_fw_cfg(machine);
69
uint64_t kernel_entry;
87
+ rom_set_fw(s->fw_cfg);
70
DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
88
+
71
- int i, j, base_hartid, hart_count;
89
/* Compute the fdt load address in dram */
72
+ int i, base_hartid, hart_count;
90
fdt_load_addr = riscv_load_fdt(memmap[VIRT_DRAM].base,
73
91
machine->ram_size, machine->fdt);
74
/* Check socket count limit */
92
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
75
if (VIRT_SOCKETS_MAX < riscv_socket_count(machine)) {
93
index XXXXXXX..XXXXXXX 100644
76
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
94
--- a/hw/riscv/Kconfig
77
SIFIVE_CLINT_TIMEBASE_FREQ, true);
95
+++ b/hw/riscv/Kconfig
78
96
@@ -XXX,XX +XXX,XX @@ config RISCV_VIRT
79
/* Per-socket PLIC hart topology configuration string */
97
select SIFIVE_PLIC
80
- plic_hart_config_len =
98
select SIFIVE_TEST
81
- (strlen(VIRT_PLIC_HART_CONFIG) + 1) * hart_count;
99
select VIRTIO_MMIO
82
- plic_hart_config = g_malloc0(plic_hart_config_len);
100
+ select FW_CFG_DMA
83
- for (j = 0; j < hart_count; j++) {
101
84
- if (j != 0) {
102
config SIFIVE_E
85
- strncat(plic_hart_config, ",", plic_hart_config_len);
103
bool
86
- }
87
- strncat(plic_hart_config, VIRT_PLIC_HART_CONFIG,
88
- plic_hart_config_len);
89
- plic_hart_config_len -= (strlen(VIRT_PLIC_HART_CONFIG) + 1);
90
- }
91
+ plic_hart_config = plic_hart_config_string(hart_count);
92
93
/* Per-socket PLIC */
94
s->plic[i] = sifive_plic_create(
104
--
95
--
105
2.30.1
96
2.31.1
106
97
107
98
diff view generated by jsdifflib
New patch
1
From: David Hoppenbrouwers <david@salt-inc.org>
1
2
3
`muldiv64` would overflow in cases where the final 96-bit value does not
4
fit in a `uint64_t`. This would result in small values that cause an
5
interrupt to be triggered much sooner than intended.
6
7
The overflow can be detected in most cases by checking if the new value is
8
smaller than the previous value. If the final result is larger than
9
`diff` it is either correct or it doesn't matter as it is effectively
10
infinite anyways.
11
12
`next` is an `uint64_t` value, but `timer_mod` takes an `int64_t`. This
13
resulted in high values such as `UINT64_MAX` being converted to `-1`,
14
which caused an immediate timer interrupt.
15
16
By limiting `next` to `INT64_MAX` no overflow will happen while the
17
timer will still be effectively set to "infinitely" far in the future.
18
19
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/493
20
Signed-off-by: David Hoppenbrouwers <david@salt-inc.org>
21
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Message-id: 20210827152324.5201-1-david@salt-inc.org
23
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
---
25
hw/intc/sifive_clint.c | 25 +++++++++++++++++++++++--
26
1 file changed, 23 insertions(+), 2 deletions(-)
27
28
diff --git a/hw/intc/sifive_clint.c b/hw/intc/sifive_clint.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/intc/sifive_clint.c
31
+++ b/hw/intc/sifive_clint.c
32
@@ -XXX,XX +XXX,XX @@ static void sifive_clint_write_timecmp(RISCVCPU *cpu, uint64_t value,
33
riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(0));
34
diff = cpu->env.timecmp - rtc_r;
35
/* back to ns (note args switched in muldiv64) */
36
- next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
37
- muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
38
+ uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
39
+
40
+ /*
41
+ * check if ns_diff overflowed and check if the addition would potentially
42
+ * overflow
43
+ */
44
+ if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) ||
45
+ ns_diff > INT64_MAX) {
46
+ next = INT64_MAX;
47
+ } else {
48
+ /*
49
+ * as it is very unlikely qemu_clock_get_ns will return a value
50
+ * greater than INT64_MAX, no additional check is needed for an
51
+ * unsigned integer overflow.
52
+ */
53
+ next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff;
54
+ /*
55
+ * if ns_diff is INT64_MAX next may still be outside the range
56
+ * of a signed integer.
57
+ */
58
+ next = MIN(next, INT64_MAX);
59
+ }
60
+
61
timer_mod(cpu->env.timer, next);
62
}
63
64
--
65
2.31.1
66
67
diff view generated by jsdifflib
New patch
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
1
2
3
We already have some utilities to handle 64-bit wide registers, so this just
4
adds some more for:
5
- Initializing 64-bit registers
6
- Extracting and depositing to an array of 64-bit registers
7
8
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 1626805903-162860-2-git-send-email-joe.komlodi@xilinx.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
include/hw/register.h | 8 ++++++++
14
include/hw/registerfields.h | 8 ++++++++
15
hw/core/register.c | 12 ++++++++++++
16
3 files changed, 28 insertions(+)
17
18
diff --git a/include/hw/register.h b/include/hw/register.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/register.h
21
+++ b/include/hw/register.h
22
@@ -XXX,XX +XXX,XX @@ RegisterInfoArray *register_init_block32(DeviceState *owner,
23
bool debug_enabled,
24
uint64_t memory_size);
25
26
+RegisterInfoArray *register_init_block64(DeviceState *owner,
27
+ const RegisterAccessInfo *rae,
28
+ int num, RegisterInfo *ri,
29
+ uint64_t *data,
30
+ const MemoryRegionOps *ops,
31
+ bool debug_enabled,
32
+ uint64_t memory_size);
33
+
34
/**
35
* This function should be called to cleanup the registers that were initialized
36
* when calling register_init_block32(). This function should only be called
37
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/hw/registerfields.h
40
+++ b/include/hw/registerfields.h
41
@@ -XXX,XX +XXX,XX @@
42
enum { A_ ## reg = (addr) }; \
43
enum { R_ ## reg = (addr) / 2 };
44
45
+#define REG64(reg, addr) \
46
+ enum { A_ ## reg = (addr) }; \
47
+ enum { R_ ## reg = (addr) / 8 };
48
+
49
/* Define SHIFT, LENGTH and MASK constants for a field within a register */
50
51
/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH
52
@@ -XXX,XX +XXX,XX @@
53
/* Extract a field from an array of registers */
54
#define ARRAY_FIELD_EX32(regs, reg, field) \
55
FIELD_EX32((regs)[R_ ## reg], reg, field)
56
+#define ARRAY_FIELD_EX64(regs, reg, field) \
57
+ FIELD_EX64((regs)[R_ ## reg], reg, field)
58
59
/* Deposit a register field.
60
* Assigning values larger then the target field will result in
61
@@ -XXX,XX +XXX,XX @@
62
/* Deposit a field to array of registers. */
63
#define ARRAY_FIELD_DP32(regs, reg, field, val) \
64
(regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val);
65
+#define ARRAY_FIELD_DP64(regs, reg, field, val) \
66
+ (regs)[R_ ## reg] = FIELD_DP64((regs)[R_ ## reg], reg, field, val);
67
68
#endif
69
diff --git a/hw/core/register.c b/hw/core/register.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/hw/core/register.c
72
+++ b/hw/core/register.c
73
@@ -XXX,XX +XXX,XX @@ RegisterInfoArray *register_init_block32(DeviceState *owner,
74
data, ops, debug_enabled, memory_size, 32);
75
}
76
77
+RegisterInfoArray *register_init_block64(DeviceState *owner,
78
+ const RegisterAccessInfo *rae,
79
+ int num, RegisterInfo *ri,
80
+ uint64_t *data,
81
+ const MemoryRegionOps *ops,
82
+ bool debug_enabled,
83
+ uint64_t memory_size)
84
+{
85
+ return register_init_block(owner, rae, num, ri, (void *)
86
+ data, ops, debug_enabled, memory_size, 64);
87
+}
88
+
89
void register_finalize_block(RegisterInfoArray *r_array)
90
{
91
object_unparent(OBJECT(&r_array->mem));
92
--
93
2.31.1
94
95
diff view generated by jsdifflib
New patch
1
From: Joe Komlodi <joe.komlodi@xilinx.com>
1
2
3
If we have a field that's wider than 32-bits, we need a data type wide enough to
4
be able to create the bitfield used to deposit the value.
5
6
Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 1626805903-162860-3-git-send-email-joe.komlodi@xilinx.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
include/hw/registerfields.h | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/registerfields.h
18
+++ b/include/hw/registerfields.h
19
@@ -XXX,XX +XXX,XX @@
20
_d; })
21
#define FIELD_DP64(storage, reg, field, val) ({ \
22
struct { \
23
- unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
24
+ uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
25
} _v = { .v = val }; \
26
uint64_t _d; \
27
_d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
28
--
29
2.31.1
30
31
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Replace uses of tcg_const_* with the allocate and free close together.
4
5
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210823195529.560295-2-richard.henderson@linaro.org
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/translate.c | 36 ++++----------
13
target/riscv/insn_trans/trans_rvf.c.inc | 3 +-
14
target/riscv/insn_trans/trans_rvv.c.inc | 65 +++++++++----------------
15
3 files changed, 34 insertions(+), 70 deletions(-)
16
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
20
+++ b/target/riscv/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in)
22
*/
23
static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
24
{
25
- TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull);
26
- TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull);
27
+ TCGv_i64 t_max = tcg_constant_i64(0xffffffff00000000ull);
28
+ TCGv_i64 t_nan = tcg_constant_i64(0xffffffff7fc00000ull);
29
30
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
31
- tcg_temp_free_i64(t_max);
32
- tcg_temp_free_i64(t_nan);
33
}
34
35
static void generate_exception(DisasContext *ctx, int excp)
36
{
37
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
38
- TCGv_i32 helper_tmp = tcg_const_i32(excp);
39
- gen_helper_raise_exception(cpu_env, helper_tmp);
40
- tcg_temp_free_i32(helper_tmp);
41
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
42
ctx->base.is_jmp = DISAS_NORETURN;
43
}
44
45
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
46
{
47
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
48
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
49
- TCGv_i32 helper_tmp = tcg_const_i32(excp);
50
- gen_helper_raise_exception(cpu_env, helper_tmp);
51
- tcg_temp_free_i32(helper_tmp);
52
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
53
ctx->base.is_jmp = DISAS_NORETURN;
54
}
55
56
static void gen_exception_debug(void)
57
{
58
- TCGv_i32 helper_tmp = tcg_const_i32(EXCP_DEBUG);
59
- gen_helper_raise_exception(cpu_env, helper_tmp);
60
- tcg_temp_free_i32(helper_tmp);
61
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
62
}
63
64
/* Wrapper around tcg_gen_exit_tb that handles single stepping */
65
@@ -XXX,XX +XXX,XX @@ static void gen_div(TCGv ret, TCGv source1, TCGv source2)
66
*/
67
cond1 = tcg_temp_new();
68
cond2 = tcg_temp_new();
69
- zeroreg = tcg_const_tl(0);
70
+ zeroreg = tcg_constant_tl(0);
71
resultopt1 = tcg_temp_new();
72
73
tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
74
@@ -XXX,XX +XXX,XX @@ static void gen_div(TCGv ret, TCGv source1, TCGv source2)
75
76
tcg_temp_free(cond1);
77
tcg_temp_free(cond2);
78
- tcg_temp_free(zeroreg);
79
tcg_temp_free(resultopt1);
80
}
81
82
@@ -XXX,XX +XXX,XX @@ static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
83
TCGv cond1, zeroreg, resultopt1;
84
cond1 = tcg_temp_new();
85
86
- zeroreg = tcg_const_tl(0);
87
+ zeroreg = tcg_constant_tl(0);
88
resultopt1 = tcg_temp_new();
89
90
tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
91
@@ -XXX,XX +XXX,XX @@ static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
92
tcg_gen_divu_tl(ret, source1, source2);
93
94
tcg_temp_free(cond1);
95
- tcg_temp_free(zeroreg);
96
tcg_temp_free(resultopt1);
97
}
98
99
@@ -XXX,XX +XXX,XX @@ static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
100
101
cond1 = tcg_temp_new();
102
cond2 = tcg_temp_new();
103
- zeroreg = tcg_const_tl(0);
104
+ zeroreg = tcg_constant_tl(0);
105
resultopt1 = tcg_temp_new();
106
107
tcg_gen_movi_tl(resultopt1, 1L);
108
@@ -XXX,XX +XXX,XX @@ static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
109
110
tcg_temp_free(cond1);
111
tcg_temp_free(cond2);
112
- tcg_temp_free(zeroreg);
113
tcg_temp_free(resultopt1);
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
117
{
118
TCGv cond1, zeroreg, resultopt1;
119
cond1 = tcg_temp_new();
120
- zeroreg = tcg_const_tl(0);
121
+ zeroreg = tcg_constant_tl(0);
122
resultopt1 = tcg_temp_new();
123
124
tcg_gen_movi_tl(resultopt1, (target_ulong)1);
125
@@ -XXX,XX +XXX,XX @@ static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
126
source1);
127
128
tcg_temp_free(cond1);
129
- tcg_temp_free(zeroreg);
130
tcg_temp_free(resultopt1);
131
}
132
133
@@ -XXX,XX +XXX,XX @@ static inline void mark_fs_dirty(DisasContext *ctx) { }
134
135
static void gen_set_rm(DisasContext *ctx, int rm)
136
{
137
- TCGv_i32 t0;
138
-
139
if (ctx->frm == rm) {
140
return;
141
}
142
ctx->frm = rm;
143
- t0 = tcg_const_i32(rm);
144
- gen_helper_set_rounding_mode(cpu_env, t0);
145
- tcg_temp_free_i32(t0);
146
+ gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
147
}
148
149
static int ex_plus_1(DisasContext *ctx, int nf)
150
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/riscv/insn_trans/trans_rvf.c.inc
153
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
154
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
155
* Replace bit 31 in rs1 with inverse in rs2.
156
* This formulation retains the nanboxing of rs1.
157
*/
158
- mask = tcg_const_i64(~MAKE_64BIT_MASK(31, 1));
159
+ mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
160
tcg_gen_nor_i64(rs2, rs2, mask);
161
tcg_gen_and_i64(rs1, mask, rs1);
162
tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
163
164
- tcg_temp_free_i64(mask);
165
tcg_temp_free_i64(rs2);
166
}
167
tcg_temp_free_i64(rs1);
168
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
169
index XXXXXXX..XXXXXXX 100644
170
--- a/target/riscv/insn_trans/trans_rvv.c.inc
171
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
172
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
173
/* Using x0 as the rs1 register specifier, encodes an infinite AVL */
174
if (a->rs1 == 0) {
175
/* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
176
- s1 = tcg_const_tl(RV_VLEN_MAX);
177
+ s1 = tcg_constant_tl(RV_VLEN_MAX);
178
} else {
179
s1 = tcg_temp_new();
180
gen_get_gpr(s1, a->rs1);
181
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
182
return false;
183
}
184
185
- s2 = tcg_const_tl(a->zimm);
186
+ s2 = tcg_constant_tl(a->zimm);
187
dst = tcg_temp_new();
188
189
/* Using x0 as the rs1 register specifier, encodes an infinite AVL */
190
if (a->rs1 == 0) {
191
/* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
192
- s1 = tcg_const_tl(RV_VLEN_MAX);
193
+ s1 = tcg_constant_tl(RV_VLEN_MAX);
194
} else {
195
s1 = tcg_temp_new();
196
gen_get_gpr(s1, a->rs1);
197
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
198
ctx->base.is_jmp = DISAS_NORETURN;
199
200
tcg_temp_free(s1);
201
- tcg_temp_free(s2);
202
tcg_temp_free(dst);
203
return true;
204
}
205
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
206
* The first part is vlen in bytes, encoded in maxsz of simd_desc.
207
* The second part is lmul, encoded in data of simd_desc.
208
*/
209
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
210
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
211
212
gen_get_gpr(base, rs1);
213
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
214
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
215
tcg_temp_free_ptr(dest);
216
tcg_temp_free_ptr(mask);
217
tcg_temp_free(base);
218
- tcg_temp_free_i32(desc);
219
gen_set_label(over);
220
return true;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
223
mask = tcg_temp_new_ptr();
224
base = tcg_temp_new();
225
stride = tcg_temp_new();
226
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
227
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
228
229
gen_get_gpr(base, rs1);
230
gen_get_gpr(stride, rs2);
231
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
232
tcg_temp_free_ptr(mask);
233
tcg_temp_free(base);
234
tcg_temp_free(stride);
235
- tcg_temp_free_i32(desc);
236
gen_set_label(over);
237
return true;
238
}
239
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
240
mask = tcg_temp_new_ptr();
241
index = tcg_temp_new_ptr();
242
base = tcg_temp_new();
243
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
244
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
245
246
gen_get_gpr(base, rs1);
247
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
248
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
249
tcg_temp_free_ptr(mask);
250
tcg_temp_free_ptr(index);
251
tcg_temp_free(base);
252
- tcg_temp_free_i32(desc);
253
gen_set_label(over);
254
return true;
255
}
256
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
257
dest = tcg_temp_new_ptr();
258
mask = tcg_temp_new_ptr();
259
base = tcg_temp_new();
260
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
261
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
262
263
gen_get_gpr(base, rs1);
264
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
265
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
266
tcg_temp_free_ptr(dest);
267
tcg_temp_free_ptr(mask);
268
tcg_temp_free(base);
269
- tcg_temp_free_i32(desc);
270
gen_set_label(over);
271
return true;
272
}
273
@@ -XXX,XX +XXX,XX @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
274
mask = tcg_temp_new_ptr();
275
index = tcg_temp_new_ptr();
276
base = tcg_temp_new();
277
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
278
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
279
280
gen_get_gpr(base, rs1);
281
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
282
@@ -XXX,XX +XXX,XX @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
283
tcg_temp_free_ptr(mask);
284
tcg_temp_free_ptr(index);
285
tcg_temp_free(base);
286
- tcg_temp_free_i32(desc);
287
gen_set_label(over);
288
return true;
289
}
290
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
291
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
292
data = FIELD_DP32(data, VDATA, VM, vm);
293
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
294
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
295
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
296
297
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
298
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
299
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
300
tcg_temp_free_ptr(mask);
301
tcg_temp_free_ptr(src2);
302
tcg_temp_free(src1);
303
- tcg_temp_free_i32(desc);
304
gen_set_label(over);
305
return true;
306
}
307
@@ -XXX,XX +XXX,XX @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
308
mask = tcg_temp_new_ptr();
309
src2 = tcg_temp_new_ptr();
310
if (zx) {
311
- src1 = tcg_const_tl(imm);
312
+ src1 = tcg_constant_tl(imm);
313
} else {
314
- src1 = tcg_const_tl(sextract64(imm, 0, 5));
315
+ src1 = tcg_constant_tl(sextract64(imm, 0, 5));
316
}
317
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
318
data = FIELD_DP32(data, VDATA, VM, vm);
319
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
320
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
321
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
322
323
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
324
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
325
@@ -XXX,XX +XXX,XX @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
326
tcg_temp_free_ptr(dest);
327
tcg_temp_free_ptr(mask);
328
tcg_temp_free_ptr(src2);
329
- tcg_temp_free(src1);
330
- tcg_temp_free_i32(desc);
331
gen_set_label(over);
332
return true;
333
}
334
@@ -XXX,XX +XXX,XX @@ GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
335
static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
336
int64_t c, uint32_t oprsz, uint32_t maxsz)
337
{
338
- TCGv_i64 tmp = tcg_const_i64(c);
339
+ TCGv_i64 tmp = tcg_constant_i64(c);
340
tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
341
- tcg_temp_free_i64(tmp);
342
}
343
344
GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
345
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
346
tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
347
MAXSZ(s), MAXSZ(s), s1);
348
} else {
349
- TCGv_i32 desc ;
350
+ TCGv_i32 desc;
351
TCGv_i64 s1_i64 = tcg_temp_new_i64();
352
TCGv_ptr dest = tcg_temp_new_ptr();
353
uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
354
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
355
};
356
357
tcg_gen_ext_tl_i64(s1_i64, s1);
358
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
359
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
360
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
361
fns[s->sew](dest, s1_i64, cpu_env, desc);
362
363
tcg_temp_free_ptr(dest);
364
- tcg_temp_free_i32(desc);
365
tcg_temp_free_i64(s1_i64);
366
}
367
368
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
369
TCGLabel *over = gen_new_label();
370
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
371
372
- s1 = tcg_const_i64(simm);
373
+ s1 = tcg_constant_i64(simm);
374
dest = tcg_temp_new_ptr();
375
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
376
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
377
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
378
fns[s->sew](dest, s1, cpu_env, desc);
379
380
tcg_temp_free_ptr(dest);
381
- tcg_temp_free_i32(desc);
382
- tcg_temp_free_i64(s1);
383
gen_set_label(over);
384
}
385
return true;
386
@@ -XXX,XX +XXX,XX @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
387
dest = tcg_temp_new_ptr();
388
mask = tcg_temp_new_ptr();
389
src2 = tcg_temp_new_ptr();
390
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
391
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
392
393
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
394
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
395
@@ -XXX,XX +XXX,XX @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
396
tcg_temp_free_ptr(dest);
397
tcg_temp_free_ptr(mask);
398
tcg_temp_free_ptr(src2);
399
- tcg_temp_free_i32(desc);
400
gen_set_label(over);
401
return true;
402
}
403
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
404
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
405
406
dest = tcg_temp_new_ptr();
407
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
408
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
409
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
410
fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
411
412
tcg_temp_free_ptr(dest);
413
- tcg_temp_free_i32(desc);
414
gen_set_label(over);
415
}
416
return true;
417
@@ -XXX,XX +XXX,XX @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
418
mask = tcg_temp_new_ptr();
419
src2 = tcg_temp_new_ptr();
420
dst = tcg_temp_new();
421
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
422
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
423
424
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
425
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
426
@@ -XXX,XX +XXX,XX @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
427
tcg_temp_free_ptr(mask);
428
tcg_temp_free_ptr(src2);
429
tcg_temp_free(dst);
430
- tcg_temp_free_i32(desc);
431
return true;
432
}
433
return false;
434
@@ -XXX,XX +XXX,XX @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
435
mask = tcg_temp_new_ptr();
436
src2 = tcg_temp_new_ptr();
437
dst = tcg_temp_new();
438
- desc = tcg_const_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
439
+ desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
440
441
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
442
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
443
@@ -XXX,XX +XXX,XX @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
444
tcg_temp_free_ptr(mask);
445
tcg_temp_free_ptr(src2);
446
tcg_temp_free(dst);
447
- tcg_temp_free_i32(desc);
448
return true;
449
}
450
return false;
451
@@ -XXX,XX +XXX,XX @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
452
tcg_temp_free_i32(ofs);
453
454
/* Flush out-of-range indexing to zero. */
455
- t_vlmax = tcg_const_i64(vlmax);
456
- t_zero = tcg_const_i64(0);
457
+ t_vlmax = tcg_constant_i64(vlmax);
458
+ t_zero = tcg_constant_i64(0);
459
tcg_gen_extu_tl_i64(t_idx, idx);
460
461
tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
462
t_vlmax, dest, t_zero);
463
464
- tcg_temp_free_i64(t_vlmax);
465
- tcg_temp_free_i64(t_zero);
466
tcg_temp_free_i64(t_idx);
467
}
468
469
--
470
2.31.1
471
472
diff view generated by jsdifflib
1
From: Bin Meng <bin.meng@windriver.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
This adds the documentation to describe what is supported for the
3
Tested-by: Bin Meng <bmeng.cn@gmail.com>
4
'microchip-icicle-kit' machine, and how to boot the machine in QEMU.
4
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
5
6
Signed-off-by: Bin Meng <bin.meng@windriver.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20210322075248.136255-2-bmeng.cn@gmail.com
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20210823195529.560295-3-richard.henderson@linaro.org
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
9
---
11
docs/system/riscv/microchip-icicle-kit.rst | 89 ++++++++++++++++++++++
10
tests/tcg/riscv64/test-div.c | 58 +++++++++++++++++++++++++++++++
12
docs/system/target-riscv.rst | 1 +
11
tests/tcg/riscv64/Makefile.target | 5 +++
13
2 files changed, 90 insertions(+)
12
2 files changed, 63 insertions(+)
14
create mode 100644 docs/system/riscv/microchip-icicle-kit.rst
13
create mode 100644 tests/tcg/riscv64/test-div.c
14
create mode 100644 tests/tcg/riscv64/Makefile.target
15
15
16
diff --git a/docs/system/riscv/microchip-icicle-kit.rst b/docs/system/riscv/microchip-icicle-kit.rst
16
diff --git a/tests/tcg/riscv64/test-div.c b/tests/tcg/riscv64/test-div.c
17
new file mode 100644
17
new file mode 100644
18
index XXXXXXX..XXXXXXX
18
index XXXXXXX..XXXXXXX
19
--- /dev/null
19
--- /dev/null
20
+++ b/docs/system/riscv/microchip-icicle-kit.rst
20
+++ b/tests/tcg/riscv64/test-div.c
21
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@
22
+Microchip PolarFire SoC Icicle Kit (``microchip-icicle-kit``)
22
+#include <assert.h>
23
+=============================================================
23
+#include <limits.h>
24
+
24
+
25
+Microchip PolarFire SoC Icicle Kit integrates a PolarFire SoC, with one
25
+struct TestS {
26
+SiFive's E51 plus four U54 cores and many on-chip peripherals and an FPGA.
26
+ long x, y, q, r;
27
+};
27
+
28
+
28
+For more details about Microchip PolarFire SoC, please see:
29
+static struct TestS test_s[] = {
29
+https://www.microsemi.com/product-directory/soc-fpgas/5498-polarfire-soc-fpga
30
+ { 4, 2, 2, 0 }, /* normal cases */
31
+ { 9, 7, 1, 2 },
32
+ { 0, 0, -1, 0 }, /* div by zero cases */
33
+ { 9, 0, -1, 9 },
34
+ { LONG_MIN, -1, LONG_MIN, 0 }, /* overflow case */
35
+};
30
+
36
+
31
+The Icicle Kit board information can be found here:
37
+struct TestU {
32
+https://www.microsemi.com/existing-parts/parts/152514
38
+ unsigned long x, y, q, r;
39
+};
33
+
40
+
34
+Supported devices
41
+static struct TestU test_u[] = {
35
+-----------------
42
+ { 4, 2, 2, 0 }, /* normal cases */
43
+ { 9, 7, 1, 2 },
44
+ { 0, 0, ULONG_MAX, 0 }, /* div by zero cases */
45
+ { 9, 0, ULONG_MAX, 9 },
46
+};
36
+
47
+
37
+The ``microchip-icicle-kit`` machine supports the following devices:
48
+#define ARRAY_SIZE(X) (sizeof(X) / sizeof(*(X)))
38
+
49
+
39
+ * 1 E51 core
50
+int main (void)
40
+ * 4 U54 cores
51
+{
41
+ * Core Level Interruptor (CLINT)
52
+ int i;
42
+ * Platform-Level Interrupt Controller (PLIC)
43
+ * L2 Loosely Integrated Memory (L2-LIM)
44
+ * DDR memory controller
45
+ * 5 MMUARTs
46
+ * 1 DMA controller
47
+ * 2 GEM Ethernet controllers
48
+ * 1 SDHC storage controller
49
+
53
+
50
+Boot options
54
+ for (i = 0; i < ARRAY_SIZE(test_s); i++) {
51
+------------
55
+ long q, r;
52
+
56
+
53
+The ``microchip-icicle-kit`` machine can start using the standard -bios
57
+ asm("div %0, %2, %3\n\t"
54
+functionality for loading its BIOS image, aka Hart Software Services (HSS_).
58
+ "rem %1, %2, %3"
55
+HSS loads the second stage bootloader U-Boot from an SD card. It does not
59
+ : "=&r" (q), "=r" (r)
56
+support direct kernel loading via the -kernel option. One has to load kernel
60
+ : "r" (test_s[i].x), "r" (test_s[i].y));
57
+from U-Boot.
58
+
61
+
59
+The memory is set to 1537 MiB by default which is the minimum required high
62
+ assert(q == test_s[i].q);
60
+memory size by HSS. A sanity check on ram size is performed in the machine
63
+ assert(r == test_s[i].r);
61
+init routine to prompt user to increase the RAM size to > 1537 MiB when less
64
+ }
62
+than 1537 MiB ram is detected.
63
+
65
+
64
+Boot the machine
66
+ for (i = 0; i < ARRAY_SIZE(test_u); i++) {
65
+----------------
67
+ unsigned long q, r;
66
+
68
+
67
+HSS 2020.12 release is tested at the time of writing. To build an HSS image
69
+ asm("divu %0, %2, %3\n\t"
68
+that can be booted by the ``microchip-icicle-kit`` machine, type the following
70
+ "remu %1, %2, %3"
69
+in the HSS source tree:
71
+ : "=&r" (q), "=r" (r)
72
+ : "r" (test_u[i].x), "r" (test_u[i].y));
70
+
73
+
71
+.. code-block:: bash
74
+ assert(q == test_u[i].q);
75
+ assert(r == test_u[i].r);
76
+ }
72
+
77
+
73
+ $ export CROSS_COMPILE=riscv64-linux-
78
+ return 0;
74
+ $ cp boards/mpfs-icicle-kit-es/def_config .config
79
+}
75
+ $ make BOARD=mpfs-icicle-kit-es
80
diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target
81
new file mode 100644
82
index XXXXXXX..XXXXXXX
83
--- /dev/null
84
+++ b/tests/tcg/riscv64/Makefile.target
85
@@ -XXX,XX +XXX,XX @@
86
+# -*- Mode: makefile -*-
87
+# RISC-V specific tweaks
76
+
88
+
77
+Download the official SD card image released by Microchip and prepare it for
89
+VPATH += $(SRC_PATH)/tests/tcg/riscv64
78
+QEMU usage:
90
+TESTS += test-div
79
+
80
+.. code-block:: bash
81
+
82
+ $ wget ftp://ftpsoc.microsemi.com/outgoing/core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
83
+ $ gunzip core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic.gz
84
+ $ qemu-img resize core-image-minimal-dev-icicle-kit-es-sd-20201009141623.rootfs.wic 4G
85
+
86
+Then we can boot the machine by:
87
+
88
+.. code-block:: bash
89
+
90
+ $ qemu-system-riscv64 -M microchip-icicle-kit -smp 5 \
91
+ -bios path/to/hss.bin -sd path/to/sdcard.img \
92
+ -nic user,model=cadence_gem \
93
+ -nic tap,ifname=tap,model=cadence_gem,script=no \
94
+ -display none -serial stdio \
95
+ -chardev socket,id=serial1,path=serial1.sock,server=on,wait=on \
96
+ -serial chardev:serial1
97
+
98
+With above command line, current terminal session will be used for the first
99
+serial port. Open another terminal window, and use `minicom` to connect the
100
+second serial port.
101
+
102
+.. code-block:: bash
103
+
104
+ $ minicom -D unix\#serial1.sock
105
+
106
+HSS output is on the first serial port (stdio) and U-Boot outputs on the
107
+second serial port. U-Boot will automatically load the Linux kernel from
108
+the SD card image.
109
+
110
+.. _HSS: https://github.com/polarfire-soc/hart-software-services
111
diff --git a/docs/system/target-riscv.rst b/docs/system/target-riscv.rst
112
index XXXXXXX..XXXXXXX 100644
113
--- a/docs/system/target-riscv.rst
114
+++ b/docs/system/target-riscv.rst
115
@@ -XXX,XX +XXX,XX @@ undocumented; you can get a complete list by running
116
.. toctree::
117
:maxdepth: 1
118
119
+ riscv/microchip-icicle-kit
120
riscv/sifive_u
121
122
RISC-V CPU features
123
--
91
--
124
2.30.1
92
2.31.1
125
93
126
94
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Utilize the condition in the movcond more; this allows some of
4
the setcond that were feeding into movcond to be removed.
5
Do not write into source1 and source2. Re-name "condN" to "tempN"
6
and use the temporaries for more than holding conditions.
7
8
Tested-by: Bin Meng <bmeng.cn@gmail.com>
9
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210823195529.560295-4-richard.henderson@linaro.org
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/translate.c | 174 ++++++++++++++++++++-------------------
16
1 file changed, 91 insertions(+), 83 deletions(-)
17
18
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/translate.c
21
+++ b/target/riscv/translate.c
22
@@ -XXX,XX +XXX,XX @@ static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
23
24
static void gen_div(TCGv ret, TCGv source1, TCGv source2)
25
{
26
- TCGv cond1, cond2, zeroreg, resultopt1;
27
+ TCGv temp1, temp2, zero, one, mone, min;
28
+
29
+ temp1 = tcg_temp_new();
30
+ temp2 = tcg_temp_new();
31
+ zero = tcg_constant_tl(0);
32
+ one = tcg_constant_tl(1);
33
+ mone = tcg_constant_tl(-1);
34
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
35
+
36
/*
37
- * Handle by altering args to tcg_gen_div to produce req'd results:
38
- * For overflow: want source1 in source1 and 1 in source2
39
- * For div by zero: want -1 in source1 and 1 in source2 -> -1 result
40
+ * If overflow, set temp2 to 1, else source2.
41
+ * This produces the required result of min.
42
*/
43
- cond1 = tcg_temp_new();
44
- cond2 = tcg_temp_new();
45
- zeroreg = tcg_constant_tl(0);
46
- resultopt1 = tcg_temp_new();
47
-
48
- tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
49
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)(~0L));
50
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
51
- ((target_ulong)1) << (TARGET_LONG_BITS - 1));
52
- tcg_gen_and_tl(cond1, cond1, cond2); /* cond1 = overflow */
53
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, 0); /* cond2 = div 0 */
54
- /* if div by zero, set source1 to -1, otherwise don't change */
55
- tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond2, zeroreg, source1,
56
- resultopt1);
57
- /* if overflow or div by zero, set source2 to 1, else don't change */
58
- tcg_gen_or_tl(cond1, cond1, cond2);
59
- tcg_gen_movi_tl(resultopt1, (target_ulong)1);
60
- tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
61
- resultopt1);
62
- tcg_gen_div_tl(ret, source1, source2);
63
-
64
- tcg_temp_free(cond1);
65
- tcg_temp_free(cond2);
66
- tcg_temp_free(resultopt1);
67
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
68
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
69
+ tcg_gen_and_tl(temp1, temp1, temp2);
70
+ tcg_gen_movcond_tl(TCG_COND_NE, temp2, temp1, zero, one, source2);
71
+
72
+ /*
73
+ * If div by zero, set temp1 to -1 and temp2 to 1 to
74
+ * produce the required result of -1.
75
+ */
76
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, mone, source1);
77
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2);
78
+
79
+ tcg_gen_div_tl(ret, temp1, temp2);
80
+
81
+ tcg_temp_free(temp1);
82
+ tcg_temp_free(temp2);
83
}
84
85
static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
86
{
87
- TCGv cond1, zeroreg, resultopt1;
88
- cond1 = tcg_temp_new();
89
+ TCGv temp1, temp2, zero, one, max;
90
91
- zeroreg = tcg_constant_tl(0);
92
- resultopt1 = tcg_temp_new();
93
+ temp1 = tcg_temp_new();
94
+ temp2 = tcg_temp_new();
95
+ zero = tcg_constant_tl(0);
96
+ one = tcg_constant_tl(1);
97
+ max = tcg_constant_tl(~0);
98
99
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
100
- tcg_gen_movi_tl(resultopt1, (target_ulong)-1);
101
- tcg_gen_movcond_tl(TCG_COND_EQ, source1, cond1, zeroreg, source1,
102
- resultopt1);
103
- tcg_gen_movi_tl(resultopt1, (target_ulong)1);
104
- tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
105
- resultopt1);
106
- tcg_gen_divu_tl(ret, source1, source2);
107
+ /*
108
+ * If div by zero, set temp1 to max and temp2 to 1 to
109
+ * produce the required result of max.
110
+ */
111
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1);
112
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
113
+ tcg_gen_divu_tl(ret, temp1, temp2);
114
115
- tcg_temp_free(cond1);
116
- tcg_temp_free(resultopt1);
117
+ tcg_temp_free(temp1);
118
+ tcg_temp_free(temp2);
119
}
120
121
static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
122
{
123
- TCGv cond1, cond2, zeroreg, resultopt1;
124
-
125
- cond1 = tcg_temp_new();
126
- cond2 = tcg_temp_new();
127
- zeroreg = tcg_constant_tl(0);
128
- resultopt1 = tcg_temp_new();
129
-
130
- tcg_gen_movi_tl(resultopt1, 1L);
131
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond2, source2, (target_ulong)-1);
132
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source1,
133
- (target_ulong)1 << (TARGET_LONG_BITS - 1));
134
- tcg_gen_and_tl(cond2, cond1, cond2); /* cond1 = overflow */
135
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0); /* cond2 = div 0 */
136
- /* if overflow or div by zero, set source2 to 1, else don't change */
137
- tcg_gen_or_tl(cond2, cond1, cond2);
138
- tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond2, zeroreg, source2,
139
- resultopt1);
140
- tcg_gen_rem_tl(resultopt1, source1, source2);
141
- /* if div by zero, just return the original dividend */
142
- tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
143
- source1);
144
-
145
- tcg_temp_free(cond1);
146
- tcg_temp_free(cond2);
147
- tcg_temp_free(resultopt1);
148
+ TCGv temp1, temp2, zero, one, mone, min;
149
+
150
+ temp1 = tcg_temp_new();
151
+ temp2 = tcg_temp_new();
152
+ zero = tcg_constant_tl(0);
153
+ one = tcg_constant_tl(1);
154
+ mone = tcg_constant_tl(-1);
155
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
156
+
157
+ /*
158
+ * If overflow, set temp1 to 0, else source1.
159
+ * This avoids a possible host trap, and produces the required result of 0.
160
+ */
161
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
162
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
163
+ tcg_gen_and_tl(temp1, temp1, temp2);
164
+ tcg_gen_movcond_tl(TCG_COND_NE, temp1, temp1, zero, zero, source1);
165
+
166
+ /*
167
+ * If div by zero, set temp2 to 1, else source2.
168
+ * This avoids a possible host trap, but produces an incorrect result.
169
+ */
170
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
171
+
172
+ tcg_gen_rem_tl(temp1, temp1, temp2);
173
+
174
+ /* If div by zero, the required result is the original dividend. */
175
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1);
176
+
177
+ tcg_temp_free(temp1);
178
+ tcg_temp_free(temp2);
179
}
180
181
static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
182
{
183
- TCGv cond1, zeroreg, resultopt1;
184
- cond1 = tcg_temp_new();
185
- zeroreg = tcg_constant_tl(0);
186
- resultopt1 = tcg_temp_new();
187
-
188
- tcg_gen_movi_tl(resultopt1, (target_ulong)1);
189
- tcg_gen_setcondi_tl(TCG_COND_EQ, cond1, source2, 0);
190
- tcg_gen_movcond_tl(TCG_COND_EQ, source2, cond1, zeroreg, source2,
191
- resultopt1);
192
- tcg_gen_remu_tl(resultopt1, source1, source2);
193
- /* if div by zero, just return the original dividend */
194
- tcg_gen_movcond_tl(TCG_COND_EQ, ret, cond1, zeroreg, resultopt1,
195
- source1);
196
-
197
- tcg_temp_free(cond1);
198
- tcg_temp_free(resultopt1);
199
+ TCGv temp, zero, one;
200
+
201
+ temp = tcg_temp_new();
202
+ zero = tcg_constant_tl(0);
203
+ one = tcg_constant_tl(1);
204
+
205
+ /*
206
+ * If div by zero, set temp to 1, else source2.
207
+ * This avoids a possible host trap, but produces an incorrect result.
208
+ */
209
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp, source2, zero, one, source2);
210
+
211
+ tcg_gen_remu_tl(temp, source1, temp);
212
+
213
+ /* If div by zero, the required result is the original dividend. */
214
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp);
215
+
216
+ tcg_temp_free(temp);
217
}
218
219
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
220
--
221
2.31.1
222
223
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We will require the context to handle RV64 word operations.
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20210823195529.560295-5-richard.henderson@linaro.org
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/translate.c | 58 ++++++++++++-------------
13
target/riscv/insn_trans/trans_rva.c.inc | 18 ++++----
14
target/riscv/insn_trans/trans_rvb.c.inc | 4 +-
15
target/riscv/insn_trans/trans_rvd.c.inc | 32 +++++++-------
16
target/riscv/insn_trans/trans_rvf.c.inc | 32 +++++++-------
17
target/riscv/insn_trans/trans_rvh.c.inc | 52 +++++++++++-----------
18
target/riscv/insn_trans/trans_rvi.c.inc | 44 +++++++++----------
19
target/riscv/insn_trans/trans_rvm.c.inc | 12 ++---
20
target/riscv/insn_trans/trans_rvv.c.inc | 36 +++++++--------
21
9 files changed, 144 insertions(+), 144 deletions(-)
22
23
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/riscv/translate.c
26
+++ b/target/riscv/translate.c
27
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
28
/* Wrapper for getting reg values - need to check of reg is zero since
29
* cpu_gpr[0] is not actually allocated
30
*/
31
-static inline void gen_get_gpr(TCGv t, int reg_num)
32
+static void gen_get_gpr(DisasContext *ctx, TCGv t, int reg_num)
33
{
34
if (reg_num == 0) {
35
tcg_gen_movi_tl(t, 0);
36
@@ -XXX,XX +XXX,XX @@ static inline void gen_get_gpr(TCGv t, int reg_num)
37
* since we usually avoid calling the OP_TYPE_gen function if we see a write to
38
* $zero
39
*/
40
-static inline void gen_set_gpr(int reg_num_dst, TCGv t)
41
+static void gen_set_gpr(DisasContext *ctx, int reg_num_dst, TCGv t)
42
{
43
if (reg_num_dst != 0) {
44
tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
45
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
46
TCGv source1;
47
source1 = tcg_temp_new();
48
49
- gen_get_gpr(source1, a->rs1);
50
+ gen_get_gpr(ctx, source1, a->rs1);
51
52
(*func)(source1, source1, a->imm);
53
54
- gen_set_gpr(a->rd, source1);
55
+ gen_set_gpr(ctx, a->rd, source1);
56
tcg_temp_free(source1);
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
60
source1 = tcg_temp_new();
61
source2 = tcg_temp_new();
62
63
- gen_get_gpr(source1, a->rs1);
64
+ gen_get_gpr(ctx, source1, a->rs1);
65
tcg_gen_movi_tl(source2, a->imm);
66
67
(*func)(source1, source1, source2);
68
69
- gen_set_gpr(a->rd, source1);
70
+ gen_set_gpr(ctx, a->rd, source1);
71
tcg_temp_free(source1);
72
tcg_temp_free(source2);
73
return true;
74
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
75
source1 = tcg_temp_new();
76
source2 = tcg_temp_new();
77
78
- gen_get_gpr(source1, a->rs1);
79
- gen_get_gpr(source2, a->rs2);
80
+ gen_get_gpr(ctx, source1, a->rs1);
81
+ gen_get_gpr(ctx, source2, a->rs2);
82
tcg_gen_ext32s_tl(source1, source1);
83
tcg_gen_ext32s_tl(source2, source2);
84
85
(*func)(source1, source1, source2);
86
87
tcg_gen_ext32s_tl(source1, source1);
88
- gen_set_gpr(a->rd, source1);
89
+ gen_set_gpr(ctx, a->rd, source1);
90
tcg_temp_free(source1);
91
tcg_temp_free(source2);
92
return true;
93
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
94
source1 = tcg_temp_new();
95
source2 = tcg_temp_new();
96
97
- gen_get_gpr(source1, a->rs1);
98
- gen_get_gpr(source2, a->rs2);
99
+ gen_get_gpr(ctx, source1, a->rs1);
100
+ gen_get_gpr(ctx, source2, a->rs2);
101
tcg_gen_ext32u_tl(source1, source1);
102
tcg_gen_ext32u_tl(source2, source2);
103
104
(*func)(source1, source1, source2);
105
106
tcg_gen_ext32s_tl(source1, source1);
107
- gen_set_gpr(a->rd, source1);
108
+ gen_set_gpr(ctx, a->rd, source1);
109
tcg_temp_free(source1);
110
tcg_temp_free(source2);
111
return true;
112
@@ -XXX,XX +XXX,XX @@ static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
113
TCGv source1 = tcg_temp_new();
114
TCGv source2;
115
116
- gen_get_gpr(source1, a->rs1);
117
+ gen_get_gpr(ctx, source1, a->rs1);
118
119
if (a->shamt == (TARGET_LONG_BITS - 8)) {
120
/* rev8, byte swaps */
121
@@ -XXX,XX +XXX,XX @@ static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
122
tcg_temp_free(source2);
123
}
124
125
- gen_set_gpr(a->rd, source1);
126
+ gen_set_gpr(ctx, a->rd, source1);
127
tcg_temp_free(source1);
128
return true;
129
}
130
@@ -XXX,XX +XXX,XX @@ static bool gen_arith(DisasContext *ctx, arg_r *a,
131
source1 = tcg_temp_new();
132
source2 = tcg_temp_new();
133
134
- gen_get_gpr(source1, a->rs1);
135
- gen_get_gpr(source2, a->rs2);
136
+ gen_get_gpr(ctx, source1, a->rs1);
137
+ gen_get_gpr(ctx, source2, a->rs2);
138
139
(*func)(source1, source1, source2);
140
141
- gen_set_gpr(a->rd, source1);
142
+ gen_set_gpr(ctx, a->rd, source1);
143
tcg_temp_free(source1);
144
tcg_temp_free(source2);
145
return true;
146
@@ -XXX,XX +XXX,XX @@ static bool gen_shift(DisasContext *ctx, arg_r *a,
147
TCGv source1 = tcg_temp_new();
148
TCGv source2 = tcg_temp_new();
149
150
- gen_get_gpr(source1, a->rs1);
151
- gen_get_gpr(source2, a->rs2);
152
+ gen_get_gpr(ctx, source1, a->rs1);
153
+ gen_get_gpr(ctx, source2, a->rs2);
154
155
tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
156
(*func)(source1, source1, source2);
157
158
- gen_set_gpr(a->rd, source1);
159
+ gen_set_gpr(ctx, a->rd, source1);
160
tcg_temp_free(source1);
161
tcg_temp_free(source2);
162
return true;
163
@@ -XXX,XX +XXX,XX @@ static bool gen_shifti(DisasContext *ctx, arg_shift *a,
164
TCGv source1 = tcg_temp_new();
165
TCGv source2 = tcg_temp_new();
166
167
- gen_get_gpr(source1, a->rs1);
168
+ gen_get_gpr(ctx, source1, a->rs1);
169
170
tcg_gen_movi_tl(source2, a->shamt);
171
(*func)(source1, source1, source2);
172
173
- gen_set_gpr(a->rd, source1);
174
+ gen_set_gpr(ctx, a->rd, source1);
175
tcg_temp_free(source1);
176
tcg_temp_free(source2);
177
return true;
178
@@ -XXX,XX +XXX,XX @@ static bool gen_shiftw(DisasContext *ctx, arg_r *a,
179
TCGv source1 = tcg_temp_new();
180
TCGv source2 = tcg_temp_new();
181
182
- gen_get_gpr(source1, a->rs1);
183
- gen_get_gpr(source2, a->rs2);
184
+ gen_get_gpr(ctx, source1, a->rs1);
185
+ gen_get_gpr(ctx, source2, a->rs2);
186
187
tcg_gen_andi_tl(source2, source2, 31);
188
(*func)(source1, source1, source2);
189
tcg_gen_ext32s_tl(source1, source1);
190
191
- gen_set_gpr(a->rd, source1);
192
+ gen_set_gpr(ctx, a->rd, source1);
193
tcg_temp_free(source1);
194
tcg_temp_free(source2);
195
return true;
196
@@ -XXX,XX +XXX,XX @@ static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
197
TCGv source1 = tcg_temp_new();
198
TCGv source2 = tcg_temp_new();
199
200
- gen_get_gpr(source1, a->rs1);
201
+ gen_get_gpr(ctx, source1, a->rs1);
202
tcg_gen_movi_tl(source2, a->shamt);
203
204
(*func)(source1, source1, source2);
205
tcg_gen_ext32s_tl(source1, source1);
206
207
- gen_set_gpr(a->rd, source1);
208
+ gen_set_gpr(ctx, a->rd, source1);
209
tcg_temp_free(source1);
210
tcg_temp_free(source2);
211
return true;
212
@@ -XXX,XX +XXX,XX @@ static bool gen_unary(DisasContext *ctx, arg_r2 *a,
213
{
214
TCGv source = tcg_temp_new();
215
216
- gen_get_gpr(source, a->rs1);
217
+ gen_get_gpr(ctx, source, a->rs1);
218
219
(*func)(source, source);
220
221
- gen_set_gpr(a->rd, source);
222
+ gen_set_gpr(ctx, a->rd, source);
223
tcg_temp_free(source);
224
return true;
225
}
226
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
227
index XXXXXXX..XXXXXXX 100644
228
--- a/target/riscv/insn_trans/trans_rva.c.inc
229
+++ b/target/riscv/insn_trans/trans_rva.c.inc
230
@@ -XXX,XX +XXX,XX @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
231
{
232
TCGv src1 = tcg_temp_new();
233
/* Put addr in load_res, data in load_val. */
234
- gen_get_gpr(src1, a->rs1);
235
+ gen_get_gpr(ctx, src1, a->rs1);
236
if (a->rl) {
237
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
238
}
239
@@ -XXX,XX +XXX,XX @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
240
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
241
}
242
tcg_gen_mov_tl(load_res, src1);
243
- gen_set_gpr(a->rd, load_val);
244
+ gen_set_gpr(ctx, a->rd, load_val);
245
246
tcg_temp_free(src1);
247
return true;
248
@@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
249
TCGLabel *l1 = gen_new_label();
250
TCGLabel *l2 = gen_new_label();
251
252
- gen_get_gpr(src1, a->rs1);
253
+ gen_get_gpr(ctx, src1, a->rs1);
254
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
255
256
- gen_get_gpr(src2, a->rs2);
257
+ gen_get_gpr(ctx, src2, a->rs2);
258
/*
259
* Note that the TCG atomic primitives are SC,
260
* so we can ignore AQ/RL along this path.
261
@@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
262
tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
263
ctx->mem_idx, mop);
264
tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
265
- gen_set_gpr(a->rd, dat);
266
+ gen_set_gpr(ctx, a->rd, dat);
267
tcg_gen_br(l2);
268
269
gen_set_label(l1);
270
@@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
271
*/
272
tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
273
tcg_gen_movi_tl(dat, 1);
274
- gen_set_gpr(a->rd, dat);
275
+ gen_set_gpr(ctx, a->rd, dat);
276
277
gen_set_label(l2);
278
/*
279
@@ -XXX,XX +XXX,XX @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
280
TCGv src1 = tcg_temp_new();
281
TCGv src2 = tcg_temp_new();
282
283
- gen_get_gpr(src1, a->rs1);
284
- gen_get_gpr(src2, a->rs2);
285
+ gen_get_gpr(ctx, src1, a->rs1);
286
+ gen_get_gpr(ctx, src2, a->rs2);
287
288
(*func)(src2, src1, src2, ctx->mem_idx, mop);
289
290
- gen_set_gpr(a->rd, src2);
291
+ gen_set_gpr(ctx, a->rd, src2);
292
tcg_temp_free(src1);
293
tcg_temp_free(src2);
294
return true;
295
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
296
index XXXXXXX..XXXXXXX 100644
297
--- a/target/riscv/insn_trans/trans_rvb.c.inc
298
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
299
@@ -XXX,XX +XXX,XX @@ static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
300
REQUIRE_EXT(ctx, RVB);
301
302
TCGv source1 = tcg_temp_new();
303
- gen_get_gpr(source1, a->rs1);
304
+ gen_get_gpr(ctx, source1, a->rs1);
305
306
if (a->shamt < 32) {
307
tcg_gen_deposit_z_tl(source1, source1, a->shamt, 32);
308
@@ -XXX,XX +XXX,XX @@ static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
309
tcg_gen_shli_tl(source1, source1, a->shamt);
310
}
311
312
- gen_set_gpr(a->rd, source1);
313
+ gen_set_gpr(ctx, a->rd, source1);
314
tcg_temp_free(source1);
315
return true;
316
}
317
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/riscv/insn_trans/trans_rvd.c.inc
320
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
321
@@ -XXX,XX +XXX,XX @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
322
REQUIRE_FPU;
323
REQUIRE_EXT(ctx, RVD);
324
TCGv t0 = tcg_temp_new();
325
- gen_get_gpr(t0, a->rs1);
326
+ gen_get_gpr(ctx, t0, a->rs1);
327
tcg_gen_addi_tl(t0, t0, a->imm);
328
329
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ);
330
@@ -XXX,XX +XXX,XX @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
331
REQUIRE_FPU;
332
REQUIRE_EXT(ctx, RVD);
333
TCGv t0 = tcg_temp_new();
334
- gen_get_gpr(t0, a->rs1);
335
+ gen_get_gpr(ctx, t0, a->rs1);
336
tcg_gen_addi_tl(t0, t0, a->imm);
337
338
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ);
339
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
340
341
TCGv t0 = tcg_temp_new();
342
gen_helper_feq_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
343
- gen_set_gpr(a->rd, t0);
344
+ gen_set_gpr(ctx, a->rd, t0);
345
tcg_temp_free(t0);
346
347
return true;
348
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
349
350
TCGv t0 = tcg_temp_new();
351
gen_helper_flt_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
352
- gen_set_gpr(a->rd, t0);
353
+ gen_set_gpr(ctx, a->rd, t0);
354
tcg_temp_free(t0);
355
356
return true;
357
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
358
359
TCGv t0 = tcg_temp_new();
360
gen_helper_fle_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
361
- gen_set_gpr(a->rd, t0);
362
+ gen_set_gpr(ctx, a->rd, t0);
363
tcg_temp_free(t0);
364
365
return true;
366
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
367
368
TCGv t0 = tcg_temp_new();
369
gen_helper_fclass_d(t0, cpu_fpr[a->rs1]);
370
- gen_set_gpr(a->rd, t0);
371
+ gen_set_gpr(ctx, a->rd, t0);
372
tcg_temp_free(t0);
373
return true;
374
}
375
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
376
TCGv t0 = tcg_temp_new();
377
gen_set_rm(ctx, a->rm);
378
gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[a->rs1]);
379
- gen_set_gpr(a->rd, t0);
380
+ gen_set_gpr(ctx, a->rd, t0);
381
tcg_temp_free(t0);
382
383
return true;
384
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
385
TCGv t0 = tcg_temp_new();
386
gen_set_rm(ctx, a->rm);
387
gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[a->rs1]);
388
- gen_set_gpr(a->rd, t0);
389
+ gen_set_gpr(ctx, a->rd, t0);
390
tcg_temp_free(t0);
391
392
return true;
393
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
394
REQUIRE_EXT(ctx, RVD);
395
396
TCGv t0 = tcg_temp_new();
397
- gen_get_gpr(t0, a->rs1);
398
+ gen_get_gpr(ctx, t0, a->rs1);
399
400
gen_set_rm(ctx, a->rm);
401
gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, t0);
402
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
403
REQUIRE_EXT(ctx, RVD);
404
405
TCGv t0 = tcg_temp_new();
406
- gen_get_gpr(t0, a->rs1);
407
+ gen_get_gpr(ctx, t0, a->rs1);
408
409
gen_set_rm(ctx, a->rm);
410
gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, t0);
411
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
412
TCGv t0 = tcg_temp_new();
413
gen_set_rm(ctx, a->rm);
414
gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[a->rs1]);
415
- gen_set_gpr(a->rd, t0);
416
+ gen_set_gpr(ctx, a->rd, t0);
417
tcg_temp_free(t0);
418
return true;
419
}
420
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
421
TCGv t0 = tcg_temp_new();
422
gen_set_rm(ctx, a->rm);
423
gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[a->rs1]);
424
- gen_set_gpr(a->rd, t0);
425
+ gen_set_gpr(ctx, a->rd, t0);
426
tcg_temp_free(t0);
427
return true;
428
}
429
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a)
430
REQUIRE_EXT(ctx, RVD);
431
432
#ifdef TARGET_RISCV64
433
- gen_set_gpr(a->rd, cpu_fpr[a->rs1]);
434
+ gen_set_gpr(ctx, a->rd, cpu_fpr[a->rs1]);
435
return true;
436
#else
437
qemu_build_not_reached();
438
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
439
REQUIRE_EXT(ctx, RVD);
440
441
TCGv t0 = tcg_temp_new();
442
- gen_get_gpr(t0, a->rs1);
443
+ gen_get_gpr(ctx, t0, a->rs1);
444
445
gen_set_rm(ctx, a->rm);
446
gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, t0);
447
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
448
REQUIRE_EXT(ctx, RVD);
449
450
TCGv t0 = tcg_temp_new();
451
- gen_get_gpr(t0, a->rs1);
452
+ gen_get_gpr(ctx, t0, a->rs1);
453
454
gen_set_rm(ctx, a->rm);
455
gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, t0);
456
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a)
457
458
#ifdef TARGET_RISCV64
459
TCGv t0 = tcg_temp_new();
460
- gen_get_gpr(t0, a->rs1);
461
+ gen_get_gpr(ctx, t0, a->rs1);
462
463
tcg_gen_mov_tl(cpu_fpr[a->rd], t0);
464
tcg_temp_free(t0);
465
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
466
index XXXXXXX..XXXXXXX 100644
467
--- a/target/riscv/insn_trans/trans_rvf.c.inc
468
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
469
@@ -XXX,XX +XXX,XX @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
470
REQUIRE_FPU;
471
REQUIRE_EXT(ctx, RVF);
472
TCGv t0 = tcg_temp_new();
473
- gen_get_gpr(t0, a->rs1);
474
+ gen_get_gpr(ctx, t0, a->rs1);
475
tcg_gen_addi_tl(t0, t0, a->imm);
476
477
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL);
478
@@ -XXX,XX +XXX,XX @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
479
REQUIRE_FPU;
480
REQUIRE_EXT(ctx, RVF);
481
TCGv t0 = tcg_temp_new();
482
- gen_get_gpr(t0, a->rs1);
483
+ gen_get_gpr(ctx, t0, a->rs1);
484
485
tcg_gen_addi_tl(t0, t0, a->imm);
486
487
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
488
TCGv t0 = tcg_temp_new();
489
gen_set_rm(ctx, a->rm);
490
gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[a->rs1]);
491
- gen_set_gpr(a->rd, t0);
492
+ gen_set_gpr(ctx, a->rd, t0);
493
tcg_temp_free(t0);
494
495
return true;
496
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
497
TCGv t0 = tcg_temp_new();
498
gen_set_rm(ctx, a->rm);
499
gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[a->rs1]);
500
- gen_set_gpr(a->rd, t0);
501
+ gen_set_gpr(ctx, a->rd, t0);
502
tcg_temp_free(t0);
503
504
return true;
505
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
506
tcg_gen_extrl_i64_i32(t0, cpu_fpr[a->rs1]);
507
#endif
508
509
- gen_set_gpr(a->rd, t0);
510
+ gen_set_gpr(ctx, a->rd, t0);
511
tcg_temp_free(t0);
512
513
return true;
514
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
515
REQUIRE_EXT(ctx, RVF);
516
TCGv t0 = tcg_temp_new();
517
gen_helper_feq_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
518
- gen_set_gpr(a->rd, t0);
519
+ gen_set_gpr(ctx, a->rd, t0);
520
tcg_temp_free(t0);
521
return true;
522
}
523
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
524
REQUIRE_EXT(ctx, RVF);
525
TCGv t0 = tcg_temp_new();
526
gen_helper_flt_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
527
- gen_set_gpr(a->rd, t0);
528
+ gen_set_gpr(ctx, a->rd, t0);
529
tcg_temp_free(t0);
530
return true;
531
}
532
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
533
REQUIRE_EXT(ctx, RVF);
534
TCGv t0 = tcg_temp_new();
535
gen_helper_fle_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
536
- gen_set_gpr(a->rd, t0);
537
+ gen_set_gpr(ctx, a->rd, t0);
538
tcg_temp_free(t0);
539
return true;
540
}
541
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
542
543
gen_helper_fclass_s(t0, cpu_fpr[a->rs1]);
544
545
- gen_set_gpr(a->rd, t0);
546
+ gen_set_gpr(ctx, a->rd, t0);
547
tcg_temp_free(t0);
548
549
return true;
550
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
551
REQUIRE_EXT(ctx, RVF);
552
553
TCGv t0 = tcg_temp_new();
554
- gen_get_gpr(t0, a->rs1);
555
+ gen_get_gpr(ctx, t0, a->rs1);
556
557
gen_set_rm(ctx, a->rm);
558
gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, t0);
559
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
560
REQUIRE_EXT(ctx, RVF);
561
562
TCGv t0 = tcg_temp_new();
563
- gen_get_gpr(t0, a->rs1);
564
+ gen_get_gpr(ctx, t0, a->rs1);
565
566
gen_set_rm(ctx, a->rm);
567
gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, t0);
568
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
569
REQUIRE_EXT(ctx, RVF);
570
571
TCGv t0 = tcg_temp_new();
572
- gen_get_gpr(t0, a->rs1);
573
+ gen_get_gpr(ctx, t0, a->rs1);
574
575
tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0);
576
gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
577
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
578
TCGv t0 = tcg_temp_new();
579
gen_set_rm(ctx, a->rm);
580
gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[a->rs1]);
581
- gen_set_gpr(a->rd, t0);
582
+ gen_set_gpr(ctx, a->rd, t0);
583
tcg_temp_free(t0);
584
return true;
585
}
586
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
587
TCGv t0 = tcg_temp_new();
588
gen_set_rm(ctx, a->rm);
589
gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[a->rs1]);
590
- gen_set_gpr(a->rd, t0);
591
+ gen_set_gpr(ctx, a->rd, t0);
592
tcg_temp_free(t0);
593
return true;
594
}
595
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
596
REQUIRE_EXT(ctx, RVF);
597
598
TCGv t0 = tcg_temp_new();
599
- gen_get_gpr(t0, a->rs1);
600
+ gen_get_gpr(ctx, t0, a->rs1);
601
602
gen_set_rm(ctx, a->rm);
603
gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, t0);
604
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
605
REQUIRE_EXT(ctx, RVF);
606
607
TCGv t0 = tcg_temp_new();
608
- gen_get_gpr(t0, a->rs1);
609
+ gen_get_gpr(ctx, t0, a->rs1);
610
611
gen_set_rm(ctx, a->rm);
612
gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, t0);
613
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/riscv/insn_trans/trans_rvh.c.inc
616
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
617
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
618
619
check_access(ctx);
620
621
- gen_get_gpr(t0, a->rs1);
622
+ gen_get_gpr(ctx, t0, a->rs1);
623
624
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
625
- gen_set_gpr(a->rd, t1);
626
+ gen_set_gpr(ctx, a->rd, t1);
627
628
tcg_temp_free(t0);
629
tcg_temp_free(t1);
630
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
631
632
check_access(ctx);
633
634
- gen_get_gpr(t0, a->rs1);
635
+ gen_get_gpr(ctx, t0, a->rs1);
636
637
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
638
- gen_set_gpr(a->rd, t1);
639
+ gen_set_gpr(ctx, a->rd, t1);
640
641
tcg_temp_free(t0);
642
tcg_temp_free(t1);
643
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a)
644
645
check_access(ctx);
646
647
- gen_get_gpr(t0, a->rs1);
648
+ gen_get_gpr(ctx, t0, a->rs1);
649
650
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
651
- gen_set_gpr(a->rd, t1);
652
+ gen_set_gpr(ctx, a->rd, t1);
653
654
tcg_temp_free(t0);
655
tcg_temp_free(t1);
656
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a)
657
658
check_access(ctx);
659
660
- gen_get_gpr(t0, a->rs1);
661
+ gen_get_gpr(ctx, t0, a->rs1);
662
663
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_UB);
664
- gen_set_gpr(a->rd, t1);
665
+ gen_set_gpr(ctx, a->rd, t1);
666
667
tcg_temp_free(t0);
668
tcg_temp_free(t1);
669
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a)
670
671
check_access(ctx);
672
673
- gen_get_gpr(t0, a->rs1);
674
+ gen_get_gpr(ctx, t0, a->rs1);
675
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUW);
676
- gen_set_gpr(a->rd, t1);
677
+ gen_set_gpr(ctx, a->rd, t1);
678
679
tcg_temp_free(t0);
680
tcg_temp_free(t1);
681
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a)
682
683
check_access(ctx);
684
685
- gen_get_gpr(t0, a->rs1);
686
- gen_get_gpr(dat, a->rs2);
687
+ gen_get_gpr(ctx, t0, a->rs1);
688
+ gen_get_gpr(ctx, dat, a->rs2);
689
690
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
691
692
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a)
693
694
check_access(ctx);
695
696
- gen_get_gpr(t0, a->rs1);
697
- gen_get_gpr(dat, a->rs2);
698
+ gen_get_gpr(ctx, t0, a->rs1);
699
+ gen_get_gpr(ctx, dat, a->rs2);
700
701
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
702
703
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a)
704
705
check_access(ctx);
706
707
- gen_get_gpr(t0, a->rs1);
708
- gen_get_gpr(dat, a->rs2);
709
+ gen_get_gpr(ctx, t0, a->rs1);
710
+ gen_get_gpr(ctx, dat, a->rs2);
711
712
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
713
714
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a)
715
716
check_access(ctx);
717
718
- gen_get_gpr(t0, a->rs1);
719
+ gen_get_gpr(ctx, t0, a->rs1);
720
721
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUL);
722
- gen_set_gpr(a->rd, t1);
723
+ gen_set_gpr(ctx, a->rd, t1);
724
725
tcg_temp_free(t0);
726
tcg_temp_free(t1);
727
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
728
729
check_access(ctx);
730
731
- gen_get_gpr(t0, a->rs1);
732
+ gen_get_gpr(ctx, t0, a->rs1);
733
734
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
735
- gen_set_gpr(a->rd, t1);
736
+ gen_set_gpr(ctx, a->rd, t1);
737
738
tcg_temp_free(t0);
739
tcg_temp_free(t1);
740
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
741
742
check_access(ctx);
743
744
- gen_get_gpr(t0, a->rs1);
745
- gen_get_gpr(dat, a->rs2);
746
+ gen_get_gpr(ctx, t0, a->rs1);
747
+ gen_get_gpr(ctx, dat, a->rs2);
748
749
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
750
751
@@ -XXX,XX +XXX,XX @@ static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a)
752
753
check_access(ctx);
754
755
- gen_get_gpr(t0, a->rs1);
756
+ gen_get_gpr(ctx, t0, a->rs1);
757
758
gen_helper_hyp_hlvx_hu(t1, cpu_env, t0);
759
- gen_set_gpr(a->rd, t1);
760
+ gen_set_gpr(ctx, a->rd, t1);
761
762
tcg_temp_free(t0);
763
tcg_temp_free(t1);
764
@@ -XXX,XX +XXX,XX @@ static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a)
765
766
check_access(ctx);
767
768
- gen_get_gpr(t0, a->rs1);
769
+ gen_get_gpr(ctx, t0, a->rs1);
770
771
gen_helper_hyp_hlvx_wu(t1, cpu_env, t0);
772
- gen_set_gpr(a->rd, t1);
773
+ gen_set_gpr(ctx, a->rd, t1);
774
775
tcg_temp_free(t0);
776
tcg_temp_free(t1);
777
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
778
index XXXXXXX..XXXXXXX 100644
779
--- a/target/riscv/insn_trans/trans_rvi.c.inc
780
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
781
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
782
TCGv t0 = tcg_temp_new();
783
784
785
- gen_get_gpr(cpu_pc, a->rs1);
786
+ gen_get_gpr(ctx, cpu_pc, a->rs1);
787
tcg_gen_addi_tl(cpu_pc, cpu_pc, a->imm);
788
tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
789
790
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
791
TCGv source1, source2;
792
source1 = tcg_temp_new();
793
source2 = tcg_temp_new();
794
- gen_get_gpr(source1, a->rs1);
795
- gen_get_gpr(source2, a->rs2);
796
+ gen_get_gpr(ctx, source1, a->rs1);
797
+ gen_get_gpr(ctx, source2, a->rs2);
798
799
tcg_gen_brcond_tl(cond, source1, source2, l);
800
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
801
@@ -XXX,XX +XXX,XX @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
802
{
803
TCGv t0 = tcg_temp_new();
804
TCGv t1 = tcg_temp_new();
805
- gen_get_gpr(t0, a->rs1);
806
+ gen_get_gpr(ctx, t0, a->rs1);
807
tcg_gen_addi_tl(t0, t0, a->imm);
808
809
tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
810
- gen_set_gpr(a->rd, t1);
811
+ gen_set_gpr(ctx, a->rd, t1);
812
tcg_temp_free(t0);
813
tcg_temp_free(t1);
814
return true;
815
@@ -XXX,XX +XXX,XX @@ static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
816
{
817
TCGv t0 = tcg_temp_new();
818
TCGv dat = tcg_temp_new();
819
- gen_get_gpr(t0, a->rs1);
820
+ gen_get_gpr(ctx, t0, a->rs1);
821
tcg_gen_addi_tl(t0, t0, a->imm);
822
- gen_get_gpr(dat, a->rs2);
823
+ gen_get_gpr(ctx, dat, a->rs2);
824
825
tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
826
tcg_temp_free(t0);
827
@@ -XXX,XX +XXX,XX @@ static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
828
{
829
REQUIRE_64BIT(ctx);
830
TCGv t = tcg_temp_new();
831
- gen_get_gpr(t, a->rs1);
832
+ gen_get_gpr(ctx, t, a->rs1);
833
tcg_gen_extract_tl(t, t, a->shamt, 32 - a->shamt);
834
/* sign-extend for W instructions */
835
tcg_gen_ext32s_tl(t, t);
836
- gen_set_gpr(a->rd, t);
837
+ gen_set_gpr(ctx, a->rd, t);
838
tcg_temp_free(t);
839
return true;
840
}
841
@@ -XXX,XX +XXX,XX @@ static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
842
{
843
REQUIRE_64BIT(ctx);
844
TCGv t = tcg_temp_new();
845
- gen_get_gpr(t, a->rs1);
846
+ gen_get_gpr(ctx, t, a->rs1);
847
tcg_gen_sextract_tl(t, t, a->shamt, 32 - a->shamt);
848
- gen_set_gpr(a->rd, t);
849
+ gen_set_gpr(ctx, a->rd, t);
850
tcg_temp_free(t);
851
return true;
852
}
853
@@ -XXX,XX +XXX,XX @@ static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
854
TCGv source1 = tcg_temp_new();
855
TCGv source2 = tcg_temp_new();
856
857
- gen_get_gpr(source1, a->rs1);
858
- gen_get_gpr(source2, a->rs2);
859
+ gen_get_gpr(ctx, source1, a->rs1);
860
+ gen_get_gpr(ctx, source2, a->rs2);
861
862
tcg_gen_andi_tl(source2, source2, 0x1F);
863
tcg_gen_shl_tl(source1, source1, source2);
864
865
tcg_gen_ext32s_tl(source1, source1);
866
- gen_set_gpr(a->rd, source1);
867
+ gen_set_gpr(ctx, a->rd, source1);
868
tcg_temp_free(source1);
869
tcg_temp_free(source2);
870
return true;
871
@@ -XXX,XX +XXX,XX @@ static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
872
TCGv source1 = tcg_temp_new();
873
TCGv source2 = tcg_temp_new();
874
875
- gen_get_gpr(source1, a->rs1);
876
- gen_get_gpr(source2, a->rs2);
877
+ gen_get_gpr(ctx, source1, a->rs1);
878
+ gen_get_gpr(ctx, source2, a->rs2);
879
880
/* clear upper 32 */
881
tcg_gen_ext32u_tl(source1, source1);
882
@@ -XXX,XX +XXX,XX @@ static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
883
tcg_gen_shr_tl(source1, source1, source2);
884
885
tcg_gen_ext32s_tl(source1, source1);
886
- gen_set_gpr(a->rd, source1);
887
+ gen_set_gpr(ctx, a->rd, source1);
888
tcg_temp_free(source1);
889
tcg_temp_free(source2);
890
return true;
891
@@ -XXX,XX +XXX,XX @@ static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
892
TCGv source1 = tcg_temp_new();
893
TCGv source2 = tcg_temp_new();
894
895
- gen_get_gpr(source1, a->rs1);
896
- gen_get_gpr(source2, a->rs2);
897
+ gen_get_gpr(ctx, source1, a->rs1);
898
+ gen_get_gpr(ctx, source2, a->rs2);
899
900
/*
901
* first, trick to get it to act like working on 32 bits (get rid of
902
@@ -XXX,XX +XXX,XX @@ static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
903
tcg_gen_andi_tl(source2, source2, 0x1F);
904
tcg_gen_sar_tl(source1, source1, source2);
905
906
- gen_set_gpr(a->rd, source1);
907
+ gen_set_gpr(ctx, a->rd, source1);
908
tcg_temp_free(source1);
909
tcg_temp_free(source2);
910
911
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
912
csr_store = tcg_temp_new(); \
913
dest = tcg_temp_new(); \
914
rs1_pass = tcg_temp_new(); \
915
- gen_get_gpr(source1, a->rs1); \
916
+ gen_get_gpr(ctx, source1, a->rs1); \
917
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); \
918
tcg_gen_movi_tl(rs1_pass, a->rs1); \
919
tcg_gen_movi_tl(csr_store, a->csr); \
920
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
921
} while (0)
922
923
#define RISCV_OP_CSR_POST do {\
924
- gen_set_gpr(a->rd, dest); \
925
+ gen_set_gpr(ctx, a->rd, dest); \
926
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \
927
exit_tb(ctx); \
928
ctx->base.is_jmp = DISAS_NORETURN; \
929
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
930
index XXXXXXX..XXXXXXX 100644
931
--- a/target/riscv/insn_trans/trans_rvm.c.inc
932
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
933
@@ -XXX,XX +XXX,XX @@ static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
934
REQUIRE_EXT(ctx, RVM);
935
TCGv source1 = tcg_temp_new();
936
TCGv source2 = tcg_temp_new();
937
- gen_get_gpr(source1, a->rs1);
938
- gen_get_gpr(source2, a->rs2);
939
+ gen_get_gpr(ctx, source1, a->rs1);
940
+ gen_get_gpr(ctx, source2, a->rs2);
941
942
tcg_gen_muls2_tl(source2, source1, source1, source2);
943
944
- gen_set_gpr(a->rd, source1);
945
+ gen_set_gpr(ctx, a->rd, source1);
946
tcg_temp_free(source1);
947
tcg_temp_free(source2);
948
return true;
949
@@ -XXX,XX +XXX,XX @@ static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
950
REQUIRE_EXT(ctx, RVM);
951
TCGv source1 = tcg_temp_new();
952
TCGv source2 = tcg_temp_new();
953
- gen_get_gpr(source1, a->rs1);
954
- gen_get_gpr(source2, a->rs2);
955
+ gen_get_gpr(ctx, source1, a->rs1);
956
+ gen_get_gpr(ctx, source2, a->rs2);
957
958
tcg_gen_mulu2_tl(source2, source1, source1, source2);
959
960
- gen_set_gpr(a->rd, source1);
961
+ gen_set_gpr(ctx, a->rd, source1);
962
tcg_temp_free(source1);
963
tcg_temp_free(source2);
964
return true;
965
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
966
index XXXXXXX..XXXXXXX 100644
967
--- a/target/riscv/insn_trans/trans_rvv.c.inc
968
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
969
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
970
s1 = tcg_constant_tl(RV_VLEN_MAX);
971
} else {
972
s1 = tcg_temp_new();
973
- gen_get_gpr(s1, a->rs1);
974
+ gen_get_gpr(ctx, s1, a->rs1);
975
}
976
- gen_get_gpr(s2, a->rs2);
977
+ gen_get_gpr(ctx, s2, a->rs2);
978
gen_helper_vsetvl(dst, cpu_env, s1, s2);
979
- gen_set_gpr(a->rd, dst);
980
+ gen_set_gpr(ctx, a->rd, dst);
981
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
982
lookup_and_goto_ptr(ctx);
983
ctx->base.is_jmp = DISAS_NORETURN;
984
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
985
s1 = tcg_constant_tl(RV_VLEN_MAX);
986
} else {
987
s1 = tcg_temp_new();
988
- gen_get_gpr(s1, a->rs1);
989
+ gen_get_gpr(ctx, s1, a->rs1);
990
}
991
gen_helper_vsetvl(dst, cpu_env, s1, s2);
992
- gen_set_gpr(a->rd, dst);
993
+ gen_set_gpr(ctx, a->rd, dst);
994
gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
995
ctx->base.is_jmp = DISAS_NORETURN;
996
997
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
998
*/
999
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1000
1001
- gen_get_gpr(base, rs1);
1002
+ gen_get_gpr(s, base, rs1);
1003
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1004
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1005
1006
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
1007
stride = tcg_temp_new();
1008
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1009
1010
- gen_get_gpr(base, rs1);
1011
- gen_get_gpr(stride, rs2);
1012
+ gen_get_gpr(s, base, rs1);
1013
+ gen_get_gpr(s, stride, rs2);
1014
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1015
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1016
1017
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
1018
base = tcg_temp_new();
1019
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1020
1021
- gen_get_gpr(base, rs1);
1022
+ gen_get_gpr(s, base, rs1);
1023
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1024
tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
1025
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1026
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1027
base = tcg_temp_new();
1028
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1029
1030
- gen_get_gpr(base, rs1);
1031
+ gen_get_gpr(s, base, rs1);
1032
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1033
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1034
1035
@@ -XXX,XX +XXX,XX @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
1036
base = tcg_temp_new();
1037
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1038
1039
- gen_get_gpr(base, rs1);
1040
+ gen_get_gpr(s, base, rs1);
1041
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1042
tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
1043
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1044
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1045
mask = tcg_temp_new_ptr();
1046
src2 = tcg_temp_new_ptr();
1047
src1 = tcg_temp_new();
1048
- gen_get_gpr(src1, rs1);
1049
+ gen_get_gpr(s, src1, rs1);
1050
1051
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1052
data = FIELD_DP32(data, VDATA, VM, vm);
1053
@@ -XXX,XX +XXX,XX @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1054
TCGv_i64 src1 = tcg_temp_new_i64();
1055
TCGv tmp = tcg_temp_new();
1056
1057
- gen_get_gpr(tmp, a->rs1);
1058
+ gen_get_gpr(s, tmp, a->rs1);
1059
tcg_gen_ext_tl_i64(src1, tmp);
1060
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1061
src1, MAXSZ(s), MAXSZ(s));
1062
@@ -XXX,XX +XXX,XX @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1063
TCGv_i32 src1 = tcg_temp_new_i32();
1064
TCGv tmp = tcg_temp_new();
1065
1066
- gen_get_gpr(tmp, a->rs1);
1067
+ gen_get_gpr(s, tmp, a->rs1);
1068
tcg_gen_trunc_tl_i32(src1, tmp);
1069
tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1070
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1071
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1072
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1073
1074
s1 = tcg_temp_new();
1075
- gen_get_gpr(s1, a->rs1);
1076
+ gen_get_gpr(s, s1, a->rs1);
1077
1078
if (s->vl_eq_vlmax) {
1079
tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1080
@@ -XXX,XX +XXX,XX @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
1081
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1082
1083
gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
1084
- gen_set_gpr(a->rd, dst);
1085
+ gen_set_gpr(s, a->rd, dst);
1086
1087
tcg_temp_free_ptr(mask);
1088
tcg_temp_free_ptr(src2);
1089
@@ -XXX,XX +XXX,XX @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
1090
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1091
1092
gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
1093
- gen_set_gpr(a->rd, dst);
1094
+ gen_set_gpr(s, a->rd, dst);
1095
1096
tcg_temp_free_ptr(mask);
1097
tcg_temp_free_ptr(src2);
1098
@@ -XXX,XX +XXX,XX @@ static bool trans_vext_x_v(DisasContext *s, arg_r *a)
1099
vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
1100
}
1101
tcg_gen_trunc_i64_tl(dest, tmp);
1102
- gen_set_gpr(a->rd, dest);
1103
+ gen_set_gpr(s, a->rd, dest);
1104
1105
tcg_temp_free(dest);
1106
tcg_temp_free_i64(tmp);
1107
--
1108
2.31.1
1109
1110
1111
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Introduce get_gpr, dest_gpr, temp_new -- new helpers that do not force
4
tcg globals into temps, returning a constant 0 for $zero as source and
5
a new temp for $zero as destination.
6
7
Introduce ctx->w for simplifying word operations, such as addw.
8
9
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210823195529.560295-6-richard.henderson@linaro.org
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/translate.c | 97 +++++++++++++++++++++++++++++++++-------
16
1 file changed, 81 insertions(+), 16 deletions(-)
17
18
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/translate.c
21
+++ b/target/riscv/translate.c
22
@@ -XXX,XX +XXX,XX @@ static TCGv load_val;
23
24
#include "exec/gen-icount.h"
25
26
+/*
27
+ * If an operation is being performed on less than TARGET_LONG_BITS,
28
+ * it may require the inputs to be sign- or zero-extended; which will
29
+ * depend on the exact operation being performed.
30
+ */
31
+typedef enum {
32
+ EXT_NONE,
33
+ EXT_SIGN,
34
+ EXT_ZERO,
35
+} DisasExtend;
36
+
37
typedef struct DisasContext {
38
DisasContextBase base;
39
/* pc_succ_insn points to the instruction following base.pc_next */
40
target_ulong pc_succ_insn;
41
target_ulong priv_ver;
42
- bool virt_enabled;
43
+ target_ulong misa;
44
uint32_t opcode;
45
uint32_t mstatus_fs;
46
- target_ulong misa;
47
uint32_t mem_idx;
48
/* Remember the rounding mode encoded in the previous fp instruction,
49
which we have already installed into env->fp_status. Or -1 for
50
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
51
to any system register, which includes CSR_FRM, so we do not have
52
to reset this known value. */
53
int frm;
54
+ bool w;
55
+ bool virt_enabled;
56
bool ext_ifencei;
57
bool hlsx;
58
/* vector extension */
59
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
60
uint16_t vlen;
61
uint16_t mlen;
62
bool vl_eq_vlmax;
63
+ uint8_t ntemp;
64
CPUState *cs;
65
+ TCGv zero;
66
+ /* Space for 3 operands plus 1 extra for address computation. */
67
+ TCGv temp[4];
68
} DisasContext;
69
70
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
71
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
72
}
73
}
74
75
-/* Wrapper for getting reg values - need to check of reg is zero since
76
- * cpu_gpr[0] is not actually allocated
77
+/*
78
+ * Wrappers for getting reg values.
79
+ *
80
+ * The $zero register does not have cpu_gpr[0] allocated -- we supply the
81
+ * constant zero as a source, and an uninitialized sink as destination.
82
+ *
83
+ * Further, we may provide an extension for word operations.
84
*/
85
-static void gen_get_gpr(DisasContext *ctx, TCGv t, int reg_num)
86
+static TCGv temp_new(DisasContext *ctx)
87
+{
88
+ assert(ctx->ntemp < ARRAY_SIZE(ctx->temp));
89
+ return ctx->temp[ctx->ntemp++] = tcg_temp_new();
90
+}
91
+
92
+static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
93
{
94
+ TCGv t;
95
+
96
if (reg_num == 0) {
97
- tcg_gen_movi_tl(t, 0);
98
- } else {
99
- tcg_gen_mov_tl(t, cpu_gpr[reg_num]);
100
+ return ctx->zero;
101
}
102
+
103
+ switch (ctx->w ? ext : EXT_NONE) {
104
+ case EXT_NONE:
105
+ return cpu_gpr[reg_num];
106
+ case EXT_SIGN:
107
+ t = temp_new(ctx);
108
+ tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]);
109
+ return t;
110
+ case EXT_ZERO:
111
+ t = temp_new(ctx);
112
+ tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]);
113
+ return t;
114
+ }
115
+ g_assert_not_reached();
116
}
117
118
-/* Wrapper for setting reg values - need to check of reg is zero since
119
- * cpu_gpr[0] is not actually allocated. this is more for safety purposes,
120
- * since we usually avoid calling the OP_TYPE_gen function if we see a write to
121
- * $zero
122
- */
123
-static void gen_set_gpr(DisasContext *ctx, int reg_num_dst, TCGv t)
124
+static void gen_get_gpr(DisasContext *ctx, TCGv t, int reg_num)
125
+{
126
+ tcg_gen_mov_tl(t, get_gpr(ctx, reg_num, EXT_NONE));
127
+}
128
+
129
+static TCGv __attribute__((unused)) dest_gpr(DisasContext *ctx, int reg_num)
130
+{
131
+ if (reg_num == 0 || ctx->w) {
132
+ return temp_new(ctx);
133
+ }
134
+ return cpu_gpr[reg_num];
135
+}
136
+
137
+static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
138
{
139
- if (reg_num_dst != 0) {
140
- tcg_gen_mov_tl(cpu_gpr[reg_num_dst], t);
141
+ if (reg_num != 0) {
142
+ if (ctx->w) {
143
+ tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
144
+ } else {
145
+ tcg_gen_mov_tl(cpu_gpr[reg_num], t);
146
+ }
147
}
148
}
149
150
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
151
ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul);
152
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
153
ctx->cs = cs;
154
+ ctx->w = false;
155
+ ctx->ntemp = 0;
156
+ memset(ctx->temp, 0, sizeof(ctx->temp));
157
+
158
+ ctx->zero = tcg_constant_tl(0);
159
}
160
161
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
162
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
163
164
decode_opc(env, ctx, opcode16);
165
ctx->base.pc_next = ctx->pc_succ_insn;
166
+ ctx->w = false;
167
+
168
+ for (int i = ctx->ntemp - 1; i >= 0; --i) {
169
+ tcg_temp_free(ctx->temp[i]);
170
+ ctx->temp[i] = NULL;
171
+ }
172
+ ctx->ntemp = 0;
173
174
if (ctx->base.is_jmp == DISAS_NEXT) {
175
target_ulong page_start;
176
--
177
2.31.1
178
179
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Most arithmetic does not require extending the inputs.
4
Exceptions include division, comparison and minmax.
5
6
Begin using ctx->w, which allows elimination of gen_addw,
7
gen_subw, gen_mulw.
8
9
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-id: 20210823195529.560295-7-richard.henderson@linaro.org
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/translate.c | 69 +++++++------------------
16
target/riscv/insn_trans/trans_rvb.c.inc | 30 +++++------
17
target/riscv/insn_trans/trans_rvi.c.inc | 39 ++++++++------
18
target/riscv/insn_trans/trans_rvm.c.inc | 16 +++---
19
4 files changed, 64 insertions(+), 90 deletions(-)
20
21
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/translate.c
24
+++ b/target/riscv/translate.c
25
@@ -XXX,XX +XXX,XX @@ static void gen_get_gpr(DisasContext *ctx, TCGv t, int reg_num)
26
tcg_gen_mov_tl(t, get_gpr(ctx, reg_num, EXT_NONE));
27
}
28
29
-static TCGv __attribute__((unused)) dest_gpr(DisasContext *ctx, int reg_num)
30
+static TCGv dest_gpr(DisasContext *ctx, int reg_num)
31
{
32
if (reg_num == 0 || ctx->w) {
33
return temp_new(ctx);
34
@@ -XXX,XX +XXX,XX @@ static int ex_rvc_shifti(DisasContext *ctx, int imm)
35
/* Include the auto-generated decoder for 32 bit insn */
36
#include "decode-insn32.c.inc"
37
38
-static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a,
39
+static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
40
void (*func)(TCGv, TCGv, target_long))
41
{
42
- TCGv source1;
43
- source1 = tcg_temp_new();
44
-
45
- gen_get_gpr(ctx, source1, a->rs1);
46
+ TCGv dest = dest_gpr(ctx, a->rd);
47
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
48
49
- (*func)(source1, source1, a->imm);
50
+ func(dest, src1, a->imm);
51
52
- gen_set_gpr(ctx, a->rd, source1);
53
- tcg_temp_free(source1);
54
+ gen_set_gpr(ctx, a->rd, dest);
55
return true;
56
}
57
58
-static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a,
59
+static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
60
void (*func)(TCGv, TCGv, TCGv))
61
{
62
- TCGv source1, source2;
63
- source1 = tcg_temp_new();
64
- source2 = tcg_temp_new();
65
+ TCGv dest = dest_gpr(ctx, a->rd);
66
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
67
+ TCGv src2 = tcg_constant_tl(a->imm);
68
69
- gen_get_gpr(ctx, source1, a->rs1);
70
- tcg_gen_movi_tl(source2, a->imm);
71
+ func(dest, src1, src2);
72
73
- (*func)(source1, source1, source2);
74
-
75
- gen_set_gpr(ctx, a->rd, source1);
76
- tcg_temp_free(source1);
77
- tcg_temp_free(source2);
78
+ gen_set_gpr(ctx, a->rd, dest);
79
return true;
80
}
81
82
-static void gen_addw(TCGv ret, TCGv arg1, TCGv arg2)
83
-{
84
- tcg_gen_add_tl(ret, arg1, arg2);
85
- tcg_gen_ext32s_tl(ret, ret);
86
-}
87
-
88
-static void gen_subw(TCGv ret, TCGv arg1, TCGv arg2)
89
-{
90
- tcg_gen_sub_tl(ret, arg1, arg2);
91
- tcg_gen_ext32s_tl(ret, ret);
92
-}
93
-
94
-static void gen_mulw(TCGv ret, TCGv arg1, TCGv arg2)
95
-{
96
- tcg_gen_mul_tl(ret, arg1, arg2);
97
- tcg_gen_ext32s_tl(ret, ret);
98
-}
99
-
100
static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
101
void(*func)(TCGv, TCGv, TCGv))
102
{
103
@@ -XXX,XX +XXX,XX @@ static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
104
tcg_gen_add_tl(ret, arg1, arg2);
105
}
106
107
-static bool gen_arith(DisasContext *ctx, arg_r *a,
108
- void(*func)(TCGv, TCGv, TCGv))
109
+static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
110
+ void (*func)(TCGv, TCGv, TCGv))
111
{
112
- TCGv source1, source2;
113
- source1 = tcg_temp_new();
114
- source2 = tcg_temp_new();
115
+ TCGv dest = dest_gpr(ctx, a->rd);
116
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
117
+ TCGv src2 = get_gpr(ctx, a->rs2, ext);
118
119
- gen_get_gpr(ctx, source1, a->rs1);
120
- gen_get_gpr(ctx, source2, a->rs2);
121
+ func(dest, src1, src2);
122
123
- (*func)(source1, source1, source2);
124
-
125
- gen_set_gpr(ctx, a->rd, source1);
126
- tcg_temp_free(source1);
127
- tcg_temp_free(source2);
128
+ gen_set_gpr(ctx, a->rd, dest);
129
return true;
130
}
131
132
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
133
index XXXXXXX..XXXXXXX 100644
134
--- a/target/riscv/insn_trans/trans_rvb.c.inc
135
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
136
@@ -XXX,XX +XXX,XX @@ static bool trans_cpop(DisasContext *ctx, arg_cpop *a)
137
static bool trans_andn(DisasContext *ctx, arg_andn *a)
138
{
139
REQUIRE_EXT(ctx, RVB);
140
- return gen_arith(ctx, a, tcg_gen_andc_tl);
141
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_andc_tl);
142
}
143
144
static bool trans_orn(DisasContext *ctx, arg_orn *a)
145
{
146
REQUIRE_EXT(ctx, RVB);
147
- return gen_arith(ctx, a, tcg_gen_orc_tl);
148
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_orc_tl);
149
}
150
151
static bool trans_xnor(DisasContext *ctx, arg_xnor *a)
152
{
153
REQUIRE_EXT(ctx, RVB);
154
- return gen_arith(ctx, a, tcg_gen_eqv_tl);
155
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl);
156
}
157
158
static bool trans_pack(DisasContext *ctx, arg_pack *a)
159
{
160
REQUIRE_EXT(ctx, RVB);
161
- return gen_arith(ctx, a, gen_pack);
162
+ return gen_arith(ctx, a, EXT_NONE, gen_pack);
163
}
164
165
static bool trans_packu(DisasContext *ctx, arg_packu *a)
166
{
167
REQUIRE_EXT(ctx, RVB);
168
- return gen_arith(ctx, a, gen_packu);
169
+ return gen_arith(ctx, a, EXT_NONE, gen_packu);
170
}
171
172
static bool trans_packh(DisasContext *ctx, arg_packh *a)
173
{
174
REQUIRE_EXT(ctx, RVB);
175
- return gen_arith(ctx, a, gen_packh);
176
+ return gen_arith(ctx, a, EXT_NONE, gen_packh);
177
}
178
179
static bool trans_min(DisasContext *ctx, arg_min *a)
180
{
181
REQUIRE_EXT(ctx, RVB);
182
- return gen_arith(ctx, a, tcg_gen_smin_tl);
183
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl);
184
}
185
186
static bool trans_max(DisasContext *ctx, arg_max *a)
187
{
188
REQUIRE_EXT(ctx, RVB);
189
- return gen_arith(ctx, a, tcg_gen_smax_tl);
190
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl);
191
}
192
193
static bool trans_minu(DisasContext *ctx, arg_minu *a)
194
{
195
REQUIRE_EXT(ctx, RVB);
196
- return gen_arith(ctx, a, tcg_gen_umin_tl);
197
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl);
198
}
199
200
static bool trans_maxu(DisasContext *ctx, arg_maxu *a)
201
{
202
REQUIRE_EXT(ctx, RVB);
203
- return gen_arith(ctx, a, tcg_gen_umax_tl);
204
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl);
205
}
206
207
static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a)
208
@@ -XXX,XX +XXX,XX @@ static bool trans_gorci(DisasContext *ctx, arg_gorci *a)
209
static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \
210
{ \
211
REQUIRE_EXT(ctx, RVB); \
212
- return gen_arith(ctx, a, gen_sh##SHAMT##add); \
213
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \
214
}
215
216
GEN_TRANS_SHADD(1)
217
@@ -XXX,XX +XXX,XX @@ static bool trans_packw(DisasContext *ctx, arg_packw *a)
218
{
219
REQUIRE_64BIT(ctx);
220
REQUIRE_EXT(ctx, RVB);
221
- return gen_arith(ctx, a, gen_packw);
222
+ return gen_arith(ctx, a, EXT_NONE, gen_packw);
223
}
224
225
static bool trans_packuw(DisasContext *ctx, arg_packuw *a)
226
{
227
REQUIRE_64BIT(ctx);
228
REQUIRE_EXT(ctx, RVB);
229
- return gen_arith(ctx, a, gen_packuw);
230
+ return gen_arith(ctx, a, EXT_NONE, gen_packuw);
231
}
232
233
static bool trans_bsetw(DisasContext *ctx, arg_bsetw *a)
234
@@ -XXX,XX +XXX,XX @@ static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \
235
{ \
236
REQUIRE_64BIT(ctx); \
237
REQUIRE_EXT(ctx, RVB); \
238
- return gen_arith(ctx, a, gen_sh##SHAMT##add_uw); \
239
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw); \
240
}
241
242
GEN_TRANS_SHADD_UW(1)
243
@@ -XXX,XX +XXX,XX @@ static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a)
244
{
245
REQUIRE_64BIT(ctx);
246
REQUIRE_EXT(ctx, RVB);
247
- return gen_arith(ctx, a, gen_add_uw);
248
+ return gen_arith(ctx, a, EXT_NONE, gen_add_uw);
249
}
250
251
static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
252
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
253
index XXXXXXX..XXXXXXX 100644
254
--- a/target/riscv/insn_trans/trans_rvi.c.inc
255
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
256
@@ -XXX,XX +XXX,XX @@ static bool trans_sd(DisasContext *ctx, arg_sd *a)
257
258
static bool trans_addi(DisasContext *ctx, arg_addi *a)
259
{
260
- return gen_arith_imm_fn(ctx, a, &tcg_gen_addi_tl);
261
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
262
}
263
264
static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
265
@@ -XXX,XX +XXX,XX @@ static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
266
tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
267
}
268
269
-
270
static bool trans_slti(DisasContext *ctx, arg_slti *a)
271
{
272
- return gen_arith_imm_tl(ctx, a, &gen_slt);
273
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt);
274
}
275
276
static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
277
{
278
- return gen_arith_imm_tl(ctx, a, &gen_sltu);
279
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu);
280
}
281
282
static bool trans_xori(DisasContext *ctx, arg_xori *a)
283
{
284
- return gen_arith_imm_fn(ctx, a, &tcg_gen_xori_tl);
285
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_xori_tl);
286
}
287
+
288
static bool trans_ori(DisasContext *ctx, arg_ori *a)
289
{
290
- return gen_arith_imm_fn(ctx, a, &tcg_gen_ori_tl);
291
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_ori_tl);
292
}
293
+
294
static bool trans_andi(DisasContext *ctx, arg_andi *a)
295
{
296
- return gen_arith_imm_fn(ctx, a, &tcg_gen_andi_tl);
297
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_andi_tl);
298
}
299
+
300
static bool trans_slli(DisasContext *ctx, arg_slli *a)
301
{
302
return gen_shifti(ctx, a, tcg_gen_shl_tl);
303
@@ -XXX,XX +XXX,XX @@ static bool trans_srai(DisasContext *ctx, arg_srai *a)
304
305
static bool trans_add(DisasContext *ctx, arg_add *a)
306
{
307
- return gen_arith(ctx, a, &tcg_gen_add_tl);
308
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
309
}
310
311
static bool trans_sub(DisasContext *ctx, arg_sub *a)
312
{
313
- return gen_arith(ctx, a, &tcg_gen_sub_tl);
314
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
315
}
316
317
static bool trans_sll(DisasContext *ctx, arg_sll *a)
318
@@ -XXX,XX +XXX,XX @@ static bool trans_sll(DisasContext *ctx, arg_sll *a)
319
320
static bool trans_slt(DisasContext *ctx, arg_slt *a)
321
{
322
- return gen_arith(ctx, a, &gen_slt);
323
+ return gen_arith(ctx, a, EXT_SIGN, gen_slt);
324
}
325
326
static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
327
{
328
- return gen_arith(ctx, a, &gen_sltu);
329
+ return gen_arith(ctx, a, EXT_SIGN, gen_sltu);
330
}
331
332
static bool trans_xor(DisasContext *ctx, arg_xor *a)
333
{
334
- return gen_arith(ctx, a, &tcg_gen_xor_tl);
335
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_xor_tl);
336
}
337
338
static bool trans_srl(DisasContext *ctx, arg_srl *a)
339
@@ -XXX,XX +XXX,XX @@ static bool trans_sra(DisasContext *ctx, arg_sra *a)
340
341
static bool trans_or(DisasContext *ctx, arg_or *a)
342
{
343
- return gen_arith(ctx, a, &tcg_gen_or_tl);
344
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_or_tl);
345
}
346
347
static bool trans_and(DisasContext *ctx, arg_and *a)
348
{
349
- return gen_arith(ctx, a, &tcg_gen_and_tl);
350
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_and_tl);
351
}
352
353
static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
354
{
355
REQUIRE_64BIT(ctx);
356
- return gen_arith_imm_tl(ctx, a, &gen_addw);
357
+ ctx->w = true;
358
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
359
}
360
361
static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
362
@@ -XXX,XX +XXX,XX @@ static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
363
static bool trans_addw(DisasContext *ctx, arg_addw *a)
364
{
365
REQUIRE_64BIT(ctx);
366
- return gen_arith(ctx, a, &gen_addw);
367
+ ctx->w = true;
368
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
369
}
370
371
static bool trans_subw(DisasContext *ctx, arg_subw *a)
372
{
373
REQUIRE_64BIT(ctx);
374
- return gen_arith(ctx, a, &gen_subw);
375
+ ctx->w = true;
376
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
377
}
378
379
static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
380
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
381
index XXXXXXX..XXXXXXX 100644
382
--- a/target/riscv/insn_trans/trans_rvm.c.inc
383
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
384
@@ -XXX,XX +XXX,XX @@
385
static bool trans_mul(DisasContext *ctx, arg_mul *a)
386
{
387
REQUIRE_EXT(ctx, RVM);
388
- return gen_arith(ctx, a, &tcg_gen_mul_tl);
389
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
390
}
391
392
static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
393
@@ -XXX,XX +XXX,XX @@ static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
394
static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
395
{
396
REQUIRE_EXT(ctx, RVM);
397
- return gen_arith(ctx, a, &gen_mulhsu);
398
+ return gen_arith(ctx, a, EXT_NONE, gen_mulhsu);
399
}
400
401
static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
402
@@ -XXX,XX +XXX,XX @@ static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
403
static bool trans_div(DisasContext *ctx, arg_div *a)
404
{
405
REQUIRE_EXT(ctx, RVM);
406
- return gen_arith(ctx, a, &gen_div);
407
+ return gen_arith(ctx, a, EXT_SIGN, gen_div);
408
}
409
410
static bool trans_divu(DisasContext *ctx, arg_divu *a)
411
{
412
REQUIRE_EXT(ctx, RVM);
413
- return gen_arith(ctx, a, &gen_divu);
414
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu);
415
}
416
417
static bool trans_rem(DisasContext *ctx, arg_rem *a)
418
{
419
REQUIRE_EXT(ctx, RVM);
420
- return gen_arith(ctx, a, &gen_rem);
421
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem);
422
}
423
424
static bool trans_remu(DisasContext *ctx, arg_remu *a)
425
{
426
REQUIRE_EXT(ctx, RVM);
427
- return gen_arith(ctx, a, &gen_remu);
428
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu);
429
}
430
431
static bool trans_mulw(DisasContext *ctx, arg_mulw *a)
432
{
433
REQUIRE_64BIT(ctx);
434
REQUIRE_EXT(ctx, RVM);
435
-
436
- return gen_arith(ctx, a, &gen_mulw);
437
+ ctx->w = true;
438
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
439
}
440
441
static bool trans_divw(DisasContext *ctx, arg_divw *a)
442
--
443
2.31.1
444
445
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The current two-stage lookup detection in riscv_cpu_do_interrupt falls
3
Use ctx->w and the enhanced gen_arith function.
4
short of its purpose, as all it checks is whether two-stage address
5
translation either via the hypervisor-load store instructions or the
6
MPRV feature would be allowed.
7
4
8
What we really need instead is whether two-stage address translation was
5
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
9
active when the exception was raised. However, in riscv_cpu_do_interrupt
10
we do not have the information to reliably detect this. Therefore, when
11
we raise a memory fault exception we have to record whether two-stage
12
address translation is active.
13
14
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
15
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
16
Message-id: 20210319141459.1196741-1-georg.kotheimer@kernkonzept.com
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210823195529.560295-8-richard.henderson@linaro.org
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
10
---
19
target/riscv/cpu.h | 4 ++++
11
target/riscv/translate.c | 42 -------------------------
20
target/riscv/cpu.c | 1 +
12
target/riscv/insn_trans/trans_rvm.c.inc | 16 +++++-----
21
target/riscv/cpu_helper.c | 21 ++++++++-------------
13
2 files changed, 8 insertions(+), 50 deletions(-)
22
3 files changed, 13 insertions(+), 13 deletions(-)
23
14
24
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
15
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
25
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
26
--- a/target/riscv/cpu.h
17
--- a/target/riscv/translate.c
27
+++ b/target/riscv/cpu.h
18
+++ b/target/riscv/translate.c
28
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
19
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
29
target_ulong satp_hs;
20
return true;
30
uint64_t mstatus_hs;
21
}
31
22
32
+ /* Signals whether the current exception occurred with two-stage address
23
-static bool gen_arith_div_w(DisasContext *ctx, arg_r *a,
33
+ translation active. */
24
- void(*func)(TCGv, TCGv, TCGv))
34
+ bool two_stage_lookup;
25
-{
35
+
26
- TCGv source1, source2;
36
target_ulong scounteren;
27
- source1 = tcg_temp_new();
37
target_ulong mcounteren;
28
- source2 = tcg_temp_new();
38
29
-
39
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
30
- gen_get_gpr(ctx, source1, a->rs1);
31
- gen_get_gpr(ctx, source2, a->rs2);
32
- tcg_gen_ext32s_tl(source1, source1);
33
- tcg_gen_ext32s_tl(source2, source2);
34
-
35
- (*func)(source1, source1, source2);
36
-
37
- tcg_gen_ext32s_tl(source1, source1);
38
- gen_set_gpr(ctx, a->rd, source1);
39
- tcg_temp_free(source1);
40
- tcg_temp_free(source2);
41
- return true;
42
-}
43
-
44
-static bool gen_arith_div_uw(DisasContext *ctx, arg_r *a,
45
- void(*func)(TCGv, TCGv, TCGv))
46
-{
47
- TCGv source1, source2;
48
- source1 = tcg_temp_new();
49
- source2 = tcg_temp_new();
50
-
51
- gen_get_gpr(ctx, source1, a->rs1);
52
- gen_get_gpr(ctx, source2, a->rs2);
53
- tcg_gen_ext32u_tl(source1, source1);
54
- tcg_gen_ext32u_tl(source2, source2);
55
-
56
- (*func)(source1, source1, source2);
57
-
58
- tcg_gen_ext32s_tl(source1, source1);
59
- gen_set_gpr(ctx, a->rd, source1);
60
- tcg_temp_free(source1);
61
- tcg_temp_free(source2);
62
- return true;
63
-}
64
-
65
static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2)
66
{
67
tcg_gen_deposit_tl(ret, arg1, arg2,
68
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
40
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
41
--- a/target/riscv/cpu.c
70
--- a/target/riscv/insn_trans/trans_rvm.c.inc
42
+++ b/target/riscv/cpu.c
71
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
43
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset(DeviceState *dev)
72
@@ -XXX,XX +XXX,XX @@ static bool trans_divw(DisasContext *ctx, arg_divw *a)
44
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
73
{
45
env->mcause = 0;
74
REQUIRE_64BIT(ctx);
46
env->pc = env->resetvec;
75
REQUIRE_EXT(ctx, RVM);
47
+ env->two_stage_lookup = false;
76
-
48
#endif
77
- return gen_arith_div_w(ctx, a, &gen_div);
49
cs->exception_index = EXCP_NONE;
78
+ ctx->w = true;
50
env->load_res = -1;
79
+ return gen_arith(ctx, a, EXT_SIGN, gen_div);
51
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/riscv/cpu_helper.c
54
+++ b/target/riscv/cpu_helper.c
55
@@ -XXX,XX +XXX,XX @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
56
g_assert_not_reached();
57
}
58
env->badaddr = address;
59
+ env->two_stage_lookup = two_stage;
60
}
80
}
61
81
62
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
82
static bool trans_divuw(DisasContext *ctx, arg_divuw *a)
63
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
83
{
64
}
84
REQUIRE_64BIT(ctx);
65
85
REQUIRE_EXT(ctx, RVM);
66
env->badaddr = addr;
86
-
67
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
87
- return gen_arith_div_uw(ctx, a, &gen_divu);
68
+ riscv_cpu_two_stage_lookup(mmu_idx);
88
+ ctx->w = true;
69
riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
89
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu);
70
}
90
}
71
91
72
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
92
static bool trans_remw(DisasContext *ctx, arg_remw *a)
73
g_assert_not_reached();
93
{
74
}
94
REQUIRE_64BIT(ctx);
75
env->badaddr = addr;
95
REQUIRE_EXT(ctx, RVM);
76
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
96
-
77
+ riscv_cpu_two_stage_lookup(mmu_idx);
97
- return gen_arith_div_w(ctx, a, &gen_rem);
78
riscv_raise_exception(env, cs->exception_index, retaddr);
98
+ ctx->w = true;
99
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem);
79
}
100
}
80
#endif /* !CONFIG_USER_ONLY */
101
81
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
102
static bool trans_remuw(DisasContext *ctx, arg_remuw *a)
82
/* handle the trap in S-mode */
103
{
83
if (riscv_has_ext(env, RVH)) {
104
REQUIRE_64BIT(ctx);
84
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
105
REQUIRE_EXT(ctx, RVM);
85
- bool two_stage_lookup = false;
86
87
- if (env->priv == PRV_M ||
88
- (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
89
- (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
90
- get_field(env->hstatus, HSTATUS_HU))) {
91
- two_stage_lookup = true;
92
- }
93
-
106
-
94
- if ((riscv_cpu_virt_enabled(env) || two_stage_lookup) && write_tval) {
107
- return gen_arith_div_uw(ctx, a, &gen_remu);
95
+ if (env->two_stage_lookup && write_tval) {
108
+ ctx->w = true;
96
/*
109
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu);
97
* If we are writing a guest virtual address to stval, set
98
* this to 1. If we are trapping to VS we will set this to 0
99
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
100
riscv_cpu_set_force_hs_excep(env, 0);
101
} else {
102
/* Trap into HS mode */
103
- if (!two_stage_lookup) {
104
- env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
105
- riscv_cpu_virt_enabled(env));
106
- }
107
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
108
htval = env->guest_phys_fault_addr;
109
}
110
}
111
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
112
* RISC-V ISA Specification.
113
*/
114
115
+ env->two_stage_lookup = false;
116
#endif
117
cs->exception_index = EXCP_NONE; /* mark handled to qemu */
118
}
110
}
119
--
111
--
120
2.30.1
112
2.31.1
121
113
122
114
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Split out gen_mulh and gen_mulhu and use the common helper.
4
5
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210823195529.560295-9-richard.henderson@linaro.org
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/insn_trans/trans_rvm.c.inc | 40 +++++++++++--------------
12
1 file changed, 18 insertions(+), 22 deletions(-)
13
14
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvm.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool trans_mul(DisasContext *ctx, arg_mul *a)
19
return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
20
}
21
22
-static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
23
+static void gen_mulh(TCGv ret, TCGv s1, TCGv s2)
24
{
25
- REQUIRE_EXT(ctx, RVM);
26
- TCGv source1 = tcg_temp_new();
27
- TCGv source2 = tcg_temp_new();
28
- gen_get_gpr(ctx, source1, a->rs1);
29
- gen_get_gpr(ctx, source2, a->rs2);
30
+ TCGv discard = tcg_temp_new();
31
32
- tcg_gen_muls2_tl(source2, source1, source1, source2);
33
+ tcg_gen_muls2_tl(discard, ret, s1, s2);
34
+ tcg_temp_free(discard);
35
+}
36
37
- gen_set_gpr(ctx, a->rd, source1);
38
- tcg_temp_free(source1);
39
- tcg_temp_free(source2);
40
- return true;
41
+static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
42
+{
43
+ REQUIRE_EXT(ctx, RVM);
44
+ return gen_arith(ctx, a, EXT_NONE, gen_mulh);
45
}
46
47
static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
48
@@ -XXX,XX +XXX,XX @@ static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
49
return gen_arith(ctx, a, EXT_NONE, gen_mulhsu);
50
}
51
52
-static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
53
+static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2)
54
{
55
- REQUIRE_EXT(ctx, RVM);
56
- TCGv source1 = tcg_temp_new();
57
- TCGv source2 = tcg_temp_new();
58
- gen_get_gpr(ctx, source1, a->rs1);
59
- gen_get_gpr(ctx, source2, a->rs2);
60
+ TCGv discard = tcg_temp_new();
61
62
- tcg_gen_mulu2_tl(source2, source1, source1, source2);
63
+ tcg_gen_mulu2_tl(discard, ret, s1, s2);
64
+ tcg_temp_free(discard);
65
+}
66
67
- gen_set_gpr(ctx, a->rd, source1);
68
- tcg_temp_free(source1);
69
- tcg_temp_free(source2);
70
- return true;
71
+static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
72
+{
73
+ REQUIRE_EXT(ctx, RVM);
74
+ return gen_arith(ctx, a, EXT_NONE, gen_mulhu);
75
}
76
77
static bool trans_div(DisasContext *ctx, arg_div *a)
78
--
79
2.31.1
80
81
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Move these helpers near their use by the trans_*
4
functions within insn_trans/trans_rvm.c.inc.
5
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210823195529.560295-10-richard.henderson@linaro.org
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/translate.c | 127 ------------------------
14
target/riscv/insn_trans/trans_rvm.c.inc | 127 ++++++++++++++++++++++++
15
2 files changed, 127 insertions(+), 127 deletions(-)
16
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
20
+++ b/target/riscv/translate.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
22
}
23
}
24
25
-static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
26
-{
27
- TCGv rl = tcg_temp_new();
28
- TCGv rh = tcg_temp_new();
29
-
30
- tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
31
- /* fix up for one negative */
32
- tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
33
- tcg_gen_and_tl(rl, rl, arg2);
34
- tcg_gen_sub_tl(ret, rh, rl);
35
-
36
- tcg_temp_free(rl);
37
- tcg_temp_free(rh);
38
-}
39
-
40
-static void gen_div(TCGv ret, TCGv source1, TCGv source2)
41
-{
42
- TCGv temp1, temp2, zero, one, mone, min;
43
-
44
- temp1 = tcg_temp_new();
45
- temp2 = tcg_temp_new();
46
- zero = tcg_constant_tl(0);
47
- one = tcg_constant_tl(1);
48
- mone = tcg_constant_tl(-1);
49
- min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
50
-
51
- /*
52
- * If overflow, set temp2 to 1, else source2.
53
- * This produces the required result of min.
54
- */
55
- tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
56
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
57
- tcg_gen_and_tl(temp1, temp1, temp2);
58
- tcg_gen_movcond_tl(TCG_COND_NE, temp2, temp1, zero, one, source2);
59
-
60
- /*
61
- * If div by zero, set temp1 to -1 and temp2 to 1 to
62
- * produce the required result of -1.
63
- */
64
- tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, mone, source1);
65
- tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2);
66
-
67
- tcg_gen_div_tl(ret, temp1, temp2);
68
-
69
- tcg_temp_free(temp1);
70
- tcg_temp_free(temp2);
71
-}
72
-
73
-static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
74
-{
75
- TCGv temp1, temp2, zero, one, max;
76
-
77
- temp1 = tcg_temp_new();
78
- temp2 = tcg_temp_new();
79
- zero = tcg_constant_tl(0);
80
- one = tcg_constant_tl(1);
81
- max = tcg_constant_tl(~0);
82
-
83
- /*
84
- * If div by zero, set temp1 to max and temp2 to 1 to
85
- * produce the required result of max.
86
- */
87
- tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1);
88
- tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
89
- tcg_gen_divu_tl(ret, temp1, temp2);
90
-
91
- tcg_temp_free(temp1);
92
- tcg_temp_free(temp2);
93
-}
94
-
95
-static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
96
-{
97
- TCGv temp1, temp2, zero, one, mone, min;
98
-
99
- temp1 = tcg_temp_new();
100
- temp2 = tcg_temp_new();
101
- zero = tcg_constant_tl(0);
102
- one = tcg_constant_tl(1);
103
- mone = tcg_constant_tl(-1);
104
- min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
105
-
106
- /*
107
- * If overflow, set temp1 to 0, else source1.
108
- * This avoids a possible host trap, and produces the required result of 0.
109
- */
110
- tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
111
- tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
112
- tcg_gen_and_tl(temp1, temp1, temp2);
113
- tcg_gen_movcond_tl(TCG_COND_NE, temp1, temp1, zero, zero, source1);
114
-
115
- /*
116
- * If div by zero, set temp2 to 1, else source2.
117
- * This avoids a possible host trap, but produces an incorrect result.
118
- */
119
- tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
120
-
121
- tcg_gen_rem_tl(temp1, temp1, temp2);
122
-
123
- /* If div by zero, the required result is the original dividend. */
124
- tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1);
125
-
126
- tcg_temp_free(temp1);
127
- tcg_temp_free(temp2);
128
-}
129
-
130
-static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
131
-{
132
- TCGv temp, zero, one;
133
-
134
- temp = tcg_temp_new();
135
- zero = tcg_constant_tl(0);
136
- one = tcg_constant_tl(1);
137
-
138
- /*
139
- * If div by zero, set temp to 1, else source2.
140
- * This avoids a possible host trap, but produces an incorrect result.
141
- */
142
- tcg_gen_movcond_tl(TCG_COND_EQ, temp, source2, zero, one, source2);
143
-
144
- tcg_gen_remu_tl(temp, source1, temp);
145
-
146
- /* If div by zero, the required result is the original dividend. */
147
- tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp);
148
-
149
- tcg_temp_free(temp);
150
-}
151
-
152
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
153
{
154
target_ulong next_pc;
155
diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc
156
index XXXXXXX..XXXXXXX 100644
157
--- a/target/riscv/insn_trans/trans_rvm.c.inc
158
+++ b/target/riscv/insn_trans/trans_rvm.c.inc
159
@@ -XXX,XX +XXX,XX @@ static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
160
return gen_arith(ctx, a, EXT_NONE, gen_mulh);
161
}
162
163
+static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
164
+{
165
+ TCGv rl = tcg_temp_new();
166
+ TCGv rh = tcg_temp_new();
167
+
168
+ tcg_gen_mulu2_tl(rl, rh, arg1, arg2);
169
+ /* fix up for one negative */
170
+ tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1);
171
+ tcg_gen_and_tl(rl, rl, arg2);
172
+ tcg_gen_sub_tl(ret, rh, rl);
173
+
174
+ tcg_temp_free(rl);
175
+ tcg_temp_free(rh);
176
+}
177
+
178
static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
179
{
180
REQUIRE_EXT(ctx, RVM);
181
@@ -XXX,XX +XXX,XX @@ static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a)
182
return gen_arith(ctx, a, EXT_NONE, gen_mulhu);
183
}
184
185
+static void gen_div(TCGv ret, TCGv source1, TCGv source2)
186
+{
187
+ TCGv temp1, temp2, zero, one, mone, min;
188
+
189
+ temp1 = tcg_temp_new();
190
+ temp2 = tcg_temp_new();
191
+ zero = tcg_constant_tl(0);
192
+ one = tcg_constant_tl(1);
193
+ mone = tcg_constant_tl(-1);
194
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
195
+
196
+ /*
197
+ * If overflow, set temp2 to 1, else source2.
198
+ * This produces the required result of min.
199
+ */
200
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
201
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
202
+ tcg_gen_and_tl(temp1, temp1, temp2);
203
+ tcg_gen_movcond_tl(TCG_COND_NE, temp2, temp1, zero, one, source2);
204
+
205
+ /*
206
+ * If div by zero, set temp1 to -1 and temp2 to 1 to
207
+ * produce the required result of -1.
208
+ */
209
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, mone, source1);
210
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2);
211
+
212
+ tcg_gen_div_tl(ret, temp1, temp2);
213
+
214
+ tcg_temp_free(temp1);
215
+ tcg_temp_free(temp2);
216
+}
217
+
218
static bool trans_div(DisasContext *ctx, arg_div *a)
219
{
220
REQUIRE_EXT(ctx, RVM);
221
return gen_arith(ctx, a, EXT_SIGN, gen_div);
222
}
223
224
+static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
225
+{
226
+ TCGv temp1, temp2, zero, one, max;
227
+
228
+ temp1 = tcg_temp_new();
229
+ temp2 = tcg_temp_new();
230
+ zero = tcg_constant_tl(0);
231
+ one = tcg_constant_tl(1);
232
+ max = tcg_constant_tl(~0);
233
+
234
+ /*
235
+ * If div by zero, set temp1 to max and temp2 to 1 to
236
+ * produce the required result of max.
237
+ */
238
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1);
239
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
240
+ tcg_gen_divu_tl(ret, temp1, temp2);
241
+
242
+ tcg_temp_free(temp1);
243
+ tcg_temp_free(temp2);
244
+}
245
+
246
static bool trans_divu(DisasContext *ctx, arg_divu *a)
247
{
248
REQUIRE_EXT(ctx, RVM);
249
return gen_arith(ctx, a, EXT_ZERO, gen_divu);
250
}
251
252
+static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
253
+{
254
+ TCGv temp1, temp2, zero, one, mone, min;
255
+
256
+ temp1 = tcg_temp_new();
257
+ temp2 = tcg_temp_new();
258
+ zero = tcg_constant_tl(0);
259
+ one = tcg_constant_tl(1);
260
+ mone = tcg_constant_tl(-1);
261
+ min = tcg_constant_tl(1ull << (TARGET_LONG_BITS - 1));
262
+
263
+ /*
264
+ * If overflow, set temp1 to 0, else source1.
265
+ * This avoids a possible host trap, and produces the required result of 0.
266
+ */
267
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp1, source1, min);
268
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, source2, mone);
269
+ tcg_gen_and_tl(temp1, temp1, temp2);
270
+ tcg_gen_movcond_tl(TCG_COND_NE, temp1, temp1, zero, zero, source1);
271
+
272
+ /*
273
+ * If div by zero, set temp2 to 1, else source2.
274
+ * This avoids a possible host trap, but produces an incorrect result.
275
+ */
276
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2);
277
+
278
+ tcg_gen_rem_tl(temp1, temp1, temp2);
279
+
280
+ /* If div by zero, the required result is the original dividend. */
281
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1);
282
+
283
+ tcg_temp_free(temp1);
284
+ tcg_temp_free(temp2);
285
+}
286
+
287
static bool trans_rem(DisasContext *ctx, arg_rem *a)
288
{
289
REQUIRE_EXT(ctx, RVM);
290
return gen_arith(ctx, a, EXT_SIGN, gen_rem);
291
}
292
293
+static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
294
+{
295
+ TCGv temp, zero, one;
296
+
297
+ temp = tcg_temp_new();
298
+ zero = tcg_constant_tl(0);
299
+ one = tcg_constant_tl(1);
300
+
301
+ /*
302
+ * If div by zero, set temp to 1, else source2.
303
+ * This avoids a possible host trap, but produces an incorrect result.
304
+ */
305
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp, source2, zero, one, source2);
306
+
307
+ tcg_gen_remu_tl(temp, source1, temp);
308
+
309
+ /* If div by zero, the required result is the original dividend. */
310
+ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp);
311
+
312
+ tcg_temp_free(temp);
313
+}
314
+
315
static bool trans_remu(DisasContext *ctx, arg_remu *a)
316
{
317
REQUIRE_EXT(ctx, RVM);
318
--
319
2.31.1
320
321
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Move these helpers near their use by the trans_*
4
functions within insn_trans/trans_rvb.c.inc.
5
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210823195529.560295-11-richard.henderson@linaro.org
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/translate.c | 233 -----------------------
14
target/riscv/insn_trans/trans_rvb.c.inc | 234 ++++++++++++++++++++++++
15
2 files changed, 234 insertions(+), 233 deletions(-)
16
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
20
+++ b/target/riscv/translate.c
21
@@ -XXX,XX +XXX,XX @@ static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
22
return true;
23
}
24
25
-static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2)
26
-{
27
- tcg_gen_deposit_tl(ret, arg1, arg2,
28
- TARGET_LONG_BITS / 2,
29
- TARGET_LONG_BITS / 2);
30
-}
31
-
32
-static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2)
33
-{
34
- TCGv t = tcg_temp_new();
35
- tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2);
36
- tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2);
37
- tcg_temp_free(t);
38
-}
39
-
40
-static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2)
41
-{
42
- TCGv t = tcg_temp_new();
43
- tcg_gen_ext8u_tl(t, arg2);
44
- tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8);
45
- tcg_temp_free(t);
46
-}
47
-
48
-static void gen_sbop_mask(TCGv ret, TCGv shamt)
49
-{
50
- tcg_gen_movi_tl(ret, 1);
51
- tcg_gen_shl_tl(ret, ret, shamt);
52
-}
53
-
54
-static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
55
-{
56
- TCGv t = tcg_temp_new();
57
-
58
- gen_sbop_mask(t, shamt);
59
- tcg_gen_or_tl(ret, arg1, t);
60
-
61
- tcg_temp_free(t);
62
-}
63
-
64
-static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
65
-{
66
- TCGv t = tcg_temp_new();
67
-
68
- gen_sbop_mask(t, shamt);
69
- tcg_gen_andc_tl(ret, arg1, t);
70
-
71
- tcg_temp_free(t);
72
-}
73
-
74
-static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
75
-{
76
- TCGv t = tcg_temp_new();
77
-
78
- gen_sbop_mask(t, shamt);
79
- tcg_gen_xor_tl(ret, arg1, t);
80
-
81
- tcg_temp_free(t);
82
-}
83
-
84
-static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
85
-{
86
- tcg_gen_shr_tl(ret, arg1, shamt);
87
- tcg_gen_andi_tl(ret, ret, 1);
88
-}
89
-
90
-static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
91
-{
92
- tcg_gen_not_tl(ret, arg1);
93
- tcg_gen_shl_tl(ret, ret, arg2);
94
- tcg_gen_not_tl(ret, ret);
95
-}
96
-
97
-static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
98
-{
99
- tcg_gen_not_tl(ret, arg1);
100
- tcg_gen_shr_tl(ret, ret, arg2);
101
- tcg_gen_not_tl(ret, ret);
102
-}
103
-
104
-static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
105
-{
106
- TCGv source1 = tcg_temp_new();
107
- TCGv source2;
108
-
109
- gen_get_gpr(ctx, source1, a->rs1);
110
-
111
- if (a->shamt == (TARGET_LONG_BITS - 8)) {
112
- /* rev8, byte swaps */
113
- tcg_gen_bswap_tl(source1, source1);
114
- } else {
115
- source2 = tcg_temp_new();
116
- tcg_gen_movi_tl(source2, a->shamt);
117
- gen_helper_grev(source1, source1, source2);
118
- tcg_temp_free(source2);
119
- }
120
-
121
- gen_set_gpr(ctx, a->rd, source1);
122
- tcg_temp_free(source1);
123
- return true;
124
-}
125
-
126
-#define GEN_SHADD(SHAMT) \
127
-static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \
128
-{ \
129
- TCGv t = tcg_temp_new(); \
130
- \
131
- tcg_gen_shli_tl(t, arg1, SHAMT); \
132
- tcg_gen_add_tl(ret, t, arg2); \
133
- \
134
- tcg_temp_free(t); \
135
-}
136
-
137
-GEN_SHADD(1)
138
-GEN_SHADD(2)
139
-GEN_SHADD(3)
140
-
141
-static void gen_ctzw(TCGv ret, TCGv arg1)
142
-{
143
- tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32));
144
- tcg_gen_ctzi_tl(ret, ret, 64);
145
-}
146
-
147
-static void gen_clzw(TCGv ret, TCGv arg1)
148
-{
149
- tcg_gen_ext32u_tl(ret, arg1);
150
- tcg_gen_clzi_tl(ret, ret, 64);
151
- tcg_gen_subi_tl(ret, ret, 32);
152
-}
153
-
154
-static void gen_cpopw(TCGv ret, TCGv arg1)
155
-{
156
- tcg_gen_ext32u_tl(arg1, arg1);
157
- tcg_gen_ctpop_tl(ret, arg1);
158
-}
159
-
160
-static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
161
-{
162
- TCGv t = tcg_temp_new();
163
- tcg_gen_ext16s_tl(t, arg2);
164
- tcg_gen_deposit_tl(ret, arg1, t, 16, 48);
165
- tcg_temp_free(t);
166
-}
167
-
168
-static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2)
169
-{
170
- TCGv t = tcg_temp_new();
171
- tcg_gen_shri_tl(t, arg1, 16);
172
- tcg_gen_deposit_tl(ret, arg2, t, 0, 16);
173
- tcg_gen_ext32s_tl(ret, ret);
174
- tcg_temp_free(t);
175
-}
176
-
177
-static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
178
-{
179
- TCGv_i32 t1 = tcg_temp_new_i32();
180
- TCGv_i32 t2 = tcg_temp_new_i32();
181
-
182
- /* truncate to 32-bits */
183
- tcg_gen_trunc_tl_i32(t1, arg1);
184
- tcg_gen_trunc_tl_i32(t2, arg2);
185
-
186
- tcg_gen_rotr_i32(t1, t1, t2);
187
-
188
- /* sign-extend 64-bits */
189
- tcg_gen_ext_i32_tl(ret, t1);
190
-
191
- tcg_temp_free_i32(t1);
192
- tcg_temp_free_i32(t2);
193
-}
194
-
195
-static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
196
-{
197
- TCGv_i32 t1 = tcg_temp_new_i32();
198
- TCGv_i32 t2 = tcg_temp_new_i32();
199
-
200
- /* truncate to 32-bits */
201
- tcg_gen_trunc_tl_i32(t1, arg1);
202
- tcg_gen_trunc_tl_i32(t2, arg2);
203
-
204
- tcg_gen_rotl_i32(t1, t1, t2);
205
-
206
- /* sign-extend 64-bits */
207
- tcg_gen_ext_i32_tl(ret, t1);
208
-
209
- tcg_temp_free_i32(t1);
210
- tcg_temp_free_i32(t2);
211
-}
212
-
213
-static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
214
-{
215
- tcg_gen_ext32u_tl(arg1, arg1);
216
- gen_helper_grev(ret, arg1, arg2);
217
-}
218
-
219
-static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2)
220
-{
221
- tcg_gen_ext32u_tl(arg1, arg1);
222
- gen_helper_gorcw(ret, arg1, arg2);
223
-}
224
-
225
-#define GEN_SHADD_UW(SHAMT) \
226
-static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \
227
-{ \
228
- TCGv t = tcg_temp_new(); \
229
- \
230
- tcg_gen_ext32u_tl(t, arg1); \
231
- \
232
- tcg_gen_shli_tl(t, t, SHAMT); \
233
- tcg_gen_add_tl(ret, t, arg2); \
234
- \
235
- tcg_temp_free(t); \
236
-}
237
-
238
-GEN_SHADD_UW(1)
239
-GEN_SHADD_UW(2)
240
-GEN_SHADD_UW(3)
241
-
242
-static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
243
-{
244
- tcg_gen_ext32u_tl(arg1, arg1);
245
- tcg_gen_add_tl(ret, arg1, arg2);
246
-}
247
-
248
static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
249
void (*func)(TCGv, TCGv, TCGv))
250
{
251
@@ -XXX,XX +XXX,XX @@ static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
252
return true;
253
}
254
255
-static void gen_ctz(TCGv ret, TCGv arg1)
256
-{
257
- tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS);
258
-}
259
-
260
-static void gen_clz(TCGv ret, TCGv arg1)
261
-{
262
- tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS);
263
-}
264
-
265
static bool gen_unary(DisasContext *ctx, arg_r2 *a,
266
void(*func)(TCGv, TCGv))
267
{
268
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
269
index XXXXXXX..XXXXXXX 100644
270
--- a/target/riscv/insn_trans/trans_rvb.c.inc
271
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
272
@@ -XXX,XX +XXX,XX @@
273
* this program. If not, see <http://www.gnu.org/licenses/>.
274
*/
275
276
+
277
+static void gen_clz(TCGv ret, TCGv arg1)
278
+{
279
+ tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS);
280
+}
281
+
282
static bool trans_clz(DisasContext *ctx, arg_clz *a)
283
{
284
REQUIRE_EXT(ctx, RVB);
285
return gen_unary(ctx, a, gen_clz);
286
}
287
288
+static void gen_ctz(TCGv ret, TCGv arg1)
289
+{
290
+ tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS);
291
+}
292
+
293
static bool trans_ctz(DisasContext *ctx, arg_ctz *a)
294
{
295
REQUIRE_EXT(ctx, RVB);
296
@@ -XXX,XX +XXX,XX @@ static bool trans_xnor(DisasContext *ctx, arg_xnor *a)
297
return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl);
298
}
299
300
+static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2)
301
+{
302
+ tcg_gen_deposit_tl(ret, arg1, arg2,
303
+ TARGET_LONG_BITS / 2,
304
+ TARGET_LONG_BITS / 2);
305
+}
306
+
307
static bool trans_pack(DisasContext *ctx, arg_pack *a)
308
{
309
REQUIRE_EXT(ctx, RVB);
310
return gen_arith(ctx, a, EXT_NONE, gen_pack);
311
}
312
313
+static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2)
314
+{
315
+ TCGv t = tcg_temp_new();
316
+ tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2);
317
+ tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2);
318
+ tcg_temp_free(t);
319
+}
320
+
321
static bool trans_packu(DisasContext *ctx, arg_packu *a)
322
{
323
REQUIRE_EXT(ctx, RVB);
324
return gen_arith(ctx, a, EXT_NONE, gen_packu);
325
}
326
327
+static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2)
328
+{
329
+ TCGv t = tcg_temp_new();
330
+ tcg_gen_ext8u_tl(t, arg2);
331
+ tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8);
332
+ tcg_temp_free(t);
333
+}
334
+
335
static bool trans_packh(DisasContext *ctx, arg_packh *a)
336
{
337
REQUIRE_EXT(ctx, RVB);
338
@@ -XXX,XX +XXX,XX @@ static bool trans_sext_h(DisasContext *ctx, arg_sext_h *a)
339
return gen_unary(ctx, a, tcg_gen_ext16s_tl);
340
}
341
342
+static void gen_sbop_mask(TCGv ret, TCGv shamt)
343
+{
344
+ tcg_gen_movi_tl(ret, 1);
345
+ tcg_gen_shl_tl(ret, ret, shamt);
346
+}
347
+
348
+static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
349
+{
350
+ TCGv t = tcg_temp_new();
351
+
352
+ gen_sbop_mask(t, shamt);
353
+ tcg_gen_or_tl(ret, arg1, t);
354
+
355
+ tcg_temp_free(t);
356
+}
357
+
358
static bool trans_bset(DisasContext *ctx, arg_bset *a)
359
{
360
REQUIRE_EXT(ctx, RVB);
361
@@ -XXX,XX +XXX,XX @@ static bool trans_bseti(DisasContext *ctx, arg_bseti *a)
362
return gen_shifti(ctx, a, gen_bset);
363
}
364
365
+static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
366
+{
367
+ TCGv t = tcg_temp_new();
368
+
369
+ gen_sbop_mask(t, shamt);
370
+ tcg_gen_andc_tl(ret, arg1, t);
371
+
372
+ tcg_temp_free(t);
373
+}
374
+
375
static bool trans_bclr(DisasContext *ctx, arg_bclr *a)
376
{
377
REQUIRE_EXT(ctx, RVB);
378
@@ -XXX,XX +XXX,XX @@ static bool trans_bclri(DisasContext *ctx, arg_bclri *a)
379
return gen_shifti(ctx, a, gen_bclr);
380
}
381
382
+static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
383
+{
384
+ TCGv t = tcg_temp_new();
385
+
386
+ gen_sbop_mask(t, shamt);
387
+ tcg_gen_xor_tl(ret, arg1, t);
388
+
389
+ tcg_temp_free(t);
390
+}
391
+
392
static bool trans_binv(DisasContext *ctx, arg_binv *a)
393
{
394
REQUIRE_EXT(ctx, RVB);
395
@@ -XXX,XX +XXX,XX @@ static bool trans_binvi(DisasContext *ctx, arg_binvi *a)
396
return gen_shifti(ctx, a, gen_binv);
397
}
398
399
+static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
400
+{
401
+ tcg_gen_shr_tl(ret, arg1, shamt);
402
+ tcg_gen_andi_tl(ret, ret, 1);
403
+}
404
+
405
static bool trans_bext(DisasContext *ctx, arg_bext *a)
406
{
407
REQUIRE_EXT(ctx, RVB);
408
@@ -XXX,XX +XXX,XX @@ static bool trans_bexti(DisasContext *ctx, arg_bexti *a)
409
return gen_shifti(ctx, a, gen_bext);
410
}
411
412
+static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
413
+{
414
+ tcg_gen_not_tl(ret, arg1);
415
+ tcg_gen_shl_tl(ret, ret, arg2);
416
+ tcg_gen_not_tl(ret, ret);
417
+}
418
+
419
static bool trans_slo(DisasContext *ctx, arg_slo *a)
420
{
421
REQUIRE_EXT(ctx, RVB);
422
@@ -XXX,XX +XXX,XX @@ static bool trans_sloi(DisasContext *ctx, arg_sloi *a)
423
return gen_shifti(ctx, a, gen_slo);
424
}
425
426
+static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
427
+{
428
+ tcg_gen_not_tl(ret, arg1);
429
+ tcg_gen_shr_tl(ret, ret, arg2);
430
+ tcg_gen_not_tl(ret, ret);
431
+}
432
+
433
static bool trans_sro(DisasContext *ctx, arg_sro *a)
434
{
435
REQUIRE_EXT(ctx, RVB);
436
@@ -XXX,XX +XXX,XX @@ static bool trans_grev(DisasContext *ctx, arg_grev *a)
437
return gen_shift(ctx, a, gen_helper_grev);
438
}
439
440
+static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
441
+{
442
+ TCGv source1 = tcg_temp_new();
443
+ TCGv source2;
444
+
445
+ gen_get_gpr(ctx, source1, a->rs1);
446
+
447
+ if (a->shamt == (TARGET_LONG_BITS - 8)) {
448
+ /* rev8, byte swaps */
449
+ tcg_gen_bswap_tl(source1, source1);
450
+ } else {
451
+ source2 = tcg_temp_new();
452
+ tcg_gen_movi_tl(source2, a->shamt);
453
+ gen_helper_grev(source1, source1, source2);
454
+ tcg_temp_free(source2);
455
+ }
456
+
457
+ gen_set_gpr(ctx, a->rd, source1);
458
+ tcg_temp_free(source1);
459
+ return true;
460
+}
461
+
462
static bool trans_grevi(DisasContext *ctx, arg_grevi *a)
463
{
464
REQUIRE_EXT(ctx, RVB);
465
@@ -XXX,XX +XXX,XX @@ static bool trans_gorci(DisasContext *ctx, arg_gorci *a)
466
return gen_shifti(ctx, a, gen_helper_gorc);
467
}
468
469
+#define GEN_SHADD(SHAMT) \
470
+static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \
471
+{ \
472
+ TCGv t = tcg_temp_new(); \
473
+ \
474
+ tcg_gen_shli_tl(t, arg1, SHAMT); \
475
+ tcg_gen_add_tl(ret, t, arg2); \
476
+ \
477
+ tcg_temp_free(t); \
478
+}
479
+
480
+GEN_SHADD(1)
481
+GEN_SHADD(2)
482
+GEN_SHADD(3)
483
+
484
#define GEN_TRANS_SHADD(SHAMT) \
485
static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \
486
{ \
487
@@ -XXX,XX +XXX,XX @@ GEN_TRANS_SHADD(1)
488
GEN_TRANS_SHADD(2)
489
GEN_TRANS_SHADD(3)
490
491
+static void gen_clzw(TCGv ret, TCGv arg1)
492
+{
493
+ tcg_gen_ext32u_tl(ret, arg1);
494
+ tcg_gen_clzi_tl(ret, ret, 64);
495
+ tcg_gen_subi_tl(ret, ret, 32);
496
+}
497
+
498
static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
499
{
500
REQUIRE_64BIT(ctx);
501
@@ -XXX,XX +XXX,XX @@ static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
502
return gen_unary(ctx, a, gen_clzw);
503
}
504
505
+static void gen_ctzw(TCGv ret, TCGv arg1)
506
+{
507
+ tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32));
508
+ tcg_gen_ctzi_tl(ret, ret, 64);
509
+}
510
+
511
static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a)
512
{
513
REQUIRE_64BIT(ctx);
514
@@ -XXX,XX +XXX,XX @@ static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a)
515
return gen_unary(ctx, a, gen_ctzw);
516
}
517
518
+static void gen_cpopw(TCGv ret, TCGv arg1)
519
+{
520
+ tcg_gen_ext32u_tl(arg1, arg1);
521
+ tcg_gen_ctpop_tl(ret, arg1);
522
+}
523
+
524
static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a)
525
{
526
REQUIRE_64BIT(ctx);
527
@@ -XXX,XX +XXX,XX @@ static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a)
528
return gen_unary(ctx, a, gen_cpopw);
529
}
530
531
+static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
532
+{
533
+ TCGv t = tcg_temp_new();
534
+ tcg_gen_ext16s_tl(t, arg2);
535
+ tcg_gen_deposit_tl(ret, arg1, t, 16, 48);
536
+ tcg_temp_free(t);
537
+}
538
+
539
static bool trans_packw(DisasContext *ctx, arg_packw *a)
540
{
541
REQUIRE_64BIT(ctx);
542
@@ -XXX,XX +XXX,XX @@ static bool trans_packw(DisasContext *ctx, arg_packw *a)
543
return gen_arith(ctx, a, EXT_NONE, gen_packw);
544
}
545
546
+static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2)
547
+{
548
+ TCGv t = tcg_temp_new();
549
+ tcg_gen_shri_tl(t, arg1, 16);
550
+ tcg_gen_deposit_tl(ret, arg2, t, 0, 16);
551
+ tcg_gen_ext32s_tl(ret, ret);
552
+ tcg_temp_free(t);
553
+}
554
+
555
static bool trans_packuw(DisasContext *ctx, arg_packuw *a)
556
{
557
REQUIRE_64BIT(ctx);
558
@@ -XXX,XX +XXX,XX @@ static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a)
559
return gen_shiftiw(ctx, a, gen_sro);
560
}
561
562
+static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
563
+{
564
+ TCGv_i32 t1 = tcg_temp_new_i32();
565
+ TCGv_i32 t2 = tcg_temp_new_i32();
566
+
567
+ /* truncate to 32-bits */
568
+ tcg_gen_trunc_tl_i32(t1, arg1);
569
+ tcg_gen_trunc_tl_i32(t2, arg2);
570
+
571
+ tcg_gen_rotr_i32(t1, t1, t2);
572
+
573
+ /* sign-extend 64-bits */
574
+ tcg_gen_ext_i32_tl(ret, t1);
575
+
576
+ tcg_temp_free_i32(t1);
577
+ tcg_temp_free_i32(t2);
578
+}
579
+
580
static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
581
{
582
REQUIRE_64BIT(ctx);
583
@@ -XXX,XX +XXX,XX @@ static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
584
return gen_shiftiw(ctx, a, gen_rorw);
585
}
586
587
+static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
588
+{
589
+ TCGv_i32 t1 = tcg_temp_new_i32();
590
+ TCGv_i32 t2 = tcg_temp_new_i32();
591
+
592
+ /* truncate to 32-bits */
593
+ tcg_gen_trunc_tl_i32(t1, arg1);
594
+ tcg_gen_trunc_tl_i32(t2, arg2);
595
+
596
+ tcg_gen_rotl_i32(t1, t1, t2);
597
+
598
+ /* sign-extend 64-bits */
599
+ tcg_gen_ext_i32_tl(ret, t1);
600
+
601
+ tcg_temp_free_i32(t1);
602
+ tcg_temp_free_i32(t2);
603
+}
604
+
605
static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
606
{
607
REQUIRE_64BIT(ctx);
608
@@ -XXX,XX +XXX,XX @@ static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
609
return gen_shiftw(ctx, a, gen_rolw);
610
}
611
612
+static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
613
+{
614
+ tcg_gen_ext32u_tl(arg1, arg1);
615
+ gen_helper_grev(ret, arg1, arg2);
616
+}
617
+
618
static bool trans_grevw(DisasContext *ctx, arg_grevw *a)
619
{
620
REQUIRE_64BIT(ctx);
621
@@ -XXX,XX +XXX,XX @@ static bool trans_greviw(DisasContext *ctx, arg_greviw *a)
622
return gen_shiftiw(ctx, a, gen_grevw);
623
}
624
625
+static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2)
626
+{
627
+ tcg_gen_ext32u_tl(arg1, arg1);
628
+ gen_helper_gorcw(ret, arg1, arg2);
629
+}
630
+
631
static bool trans_gorcw(DisasContext *ctx, arg_gorcw *a)
632
{
633
REQUIRE_64BIT(ctx);
634
@@ -XXX,XX +XXX,XX @@ static bool trans_gorciw(DisasContext *ctx, arg_gorciw *a)
635
return gen_shiftiw(ctx, a, gen_gorcw);
636
}
637
638
+#define GEN_SHADD_UW(SHAMT) \
639
+static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \
640
+{ \
641
+ TCGv t = tcg_temp_new(); \
642
+ \
643
+ tcg_gen_ext32u_tl(t, arg1); \
644
+ \
645
+ tcg_gen_shli_tl(t, t, SHAMT); \
646
+ tcg_gen_add_tl(ret, t, arg2); \
647
+ \
648
+ tcg_temp_free(t); \
649
+}
650
+
651
+GEN_SHADD_UW(1)
652
+GEN_SHADD_UW(2)
653
+GEN_SHADD_UW(3)
654
+
655
#define GEN_TRANS_SHADD_UW(SHAMT) \
656
static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \
657
arg_sh##SHAMT##add_uw *a) \
658
@@ -XXX,XX +XXX,XX @@ GEN_TRANS_SHADD_UW(1)
659
GEN_TRANS_SHADD_UW(2)
660
GEN_TRANS_SHADD_UW(3)
661
662
+static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2)
663
+{
664
+ tcg_gen_ext32u_tl(arg1, arg1);
665
+ tcg_gen_add_tl(ret, arg1, arg2);
666
+}
667
+
668
static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a)
669
{
670
REQUIRE_64BIT(ctx);
671
--
672
2.31.1
673
674
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
When decode_insn16() fails, we fall back to decode_RV32_64C() for
3
Use ctx->w for ctpopw, which is the only one that can
4
further compressed instruction decoding. However, prior to this change,
4
re-use the generic algorithm for the narrow operation.
5
we did not raise an illegal instruction exception, if decode_RV32_64C()
6
fails to decode the instruction. This means that we skipped illegal
7
compressed instructions instead of raising an illegal instruction
8
exception.
9
5
10
Instead of patching decode_RV32_64C(), we can just remove it,
11
as it is dead code since f330433b363 anyway.
12
13
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
14
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Message-id: 20210322121609.3097928-1-georg.kotheimer@kernkonzept.com
8
Message-id: 20210823195529.560295-12-richard.henderson@linaro.org
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
10
---
19
target/riscv/translate.c | 179 +--------------------------------------
11
target/riscv/translate.c | 14 ++++++--------
20
1 file changed, 1 insertion(+), 178 deletions(-)
12
target/riscv/insn_trans/trans_rvb.c.inc | 24 +++++++++---------------
13
2 files changed, 15 insertions(+), 23 deletions(-)
21
14
22
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
15
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
23
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/translate.c
17
--- a/target/riscv/translate.c
25
+++ b/target/riscv/translate.c
18
+++ b/target/riscv/translate.c
26
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
19
@@ -XXX,XX +XXX,XX @@ static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
27
CPUState *cs;
20
return true;
28
} DisasContext;
21
}
29
22
30
-#ifdef TARGET_RISCV64
23
-static bool gen_unary(DisasContext *ctx, arg_r2 *a,
31
-/* convert riscv funct3 to qemu memop for load/store */
24
- void(*func)(TCGv, TCGv))
32
-static const int tcg_memop_lookup[8] = {
25
+static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
33
- [0 ... 7] = -1,
26
+ void (*func)(TCGv, TCGv))
34
- [0] = MO_SB,
27
{
35
- [1] = MO_TESW,
28
- TCGv source = tcg_temp_new();
36
- [2] = MO_TESL,
37
- [3] = MO_TEQ,
38
- [4] = MO_UB,
39
- [5] = MO_TEUW,
40
- [6] = MO_TEUL,
41
-};
42
-#endif
43
-
29
-
44
#ifdef TARGET_RISCV64
30
- gen_get_gpr(ctx, source, a->rs1);
45
#define CASE_OP_32_64(X) case X: case glue(X, W)
31
+ TCGv dest = dest_gpr(ctx, a->rd);
46
#else
32
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
47
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
33
48
ctx->base.is_jmp = DISAS_NORETURN;
34
- (*func)(source, source);
35
+ func(dest, src1);
36
37
- gen_set_gpr(ctx, a->rd, source);
38
- tcg_temp_free(source);
39
+ gen_set_gpr(ctx, a->rd, dest);
40
return true;
49
}
41
}
50
42
51
-#ifdef TARGET_RISCV64
43
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
52
-static void gen_load_c(DisasContext *ctx, uint32_t opc, int rd, int rs1,
44
index XXXXXXX..XXXXXXX 100644
53
- target_long imm)
45
--- a/target/riscv/insn_trans/trans_rvb.c.inc
54
-{
46
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
55
- TCGv t0 = tcg_temp_new();
47
@@ -XXX,XX +XXX,XX @@ static void gen_clz(TCGv ret, TCGv arg1)
56
- TCGv t1 = tcg_temp_new();
48
static bool trans_clz(DisasContext *ctx, arg_clz *a)
57
- gen_get_gpr(t0, rs1);
49
{
58
- tcg_gen_addi_tl(t0, t0, imm);
50
REQUIRE_EXT(ctx, RVB);
59
- int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
51
- return gen_unary(ctx, a, gen_clz);
60
-
52
+ return gen_unary(ctx, a, EXT_ZERO, gen_clz);
61
- if (memop < 0) {
53
}
62
- gen_exception_illegal(ctx);
54
63
- return;
55
static void gen_ctz(TCGv ret, TCGv arg1)
64
- }
56
@@ -XXX,XX +XXX,XX @@ static void gen_ctz(TCGv ret, TCGv arg1)
65
-
57
static bool trans_ctz(DisasContext *ctx, arg_ctz *a)
66
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
58
{
67
- gen_set_gpr(rd, t1);
59
REQUIRE_EXT(ctx, RVB);
68
- tcg_temp_free(t0);
60
- return gen_unary(ctx, a, gen_ctz);
69
- tcg_temp_free(t1);
61
+ return gen_unary(ctx, a, EXT_ZERO, gen_ctz);
62
}
63
64
static bool trans_cpop(DisasContext *ctx, arg_cpop *a)
65
{
66
REQUIRE_EXT(ctx, RVB);
67
- return gen_unary(ctx, a, tcg_gen_ctpop_tl);
68
+ return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl);
69
}
70
71
static bool trans_andn(DisasContext *ctx, arg_andn *a)
72
@@ -XXX,XX +XXX,XX @@ static bool trans_maxu(DisasContext *ctx, arg_maxu *a)
73
static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a)
74
{
75
REQUIRE_EXT(ctx, RVB);
76
- return gen_unary(ctx, a, tcg_gen_ext8s_tl);
77
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8s_tl);
78
}
79
80
static bool trans_sext_h(DisasContext *ctx, arg_sext_h *a)
81
{
82
REQUIRE_EXT(ctx, RVB);
83
- return gen_unary(ctx, a, tcg_gen_ext16s_tl);
84
+ return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16s_tl);
85
}
86
87
static void gen_sbop_mask(TCGv ret, TCGv shamt)
88
@@ -XXX,XX +XXX,XX @@ GEN_TRANS_SHADD(3)
89
90
static void gen_clzw(TCGv ret, TCGv arg1)
91
{
92
- tcg_gen_ext32u_tl(ret, arg1);
93
tcg_gen_clzi_tl(ret, ret, 64);
94
tcg_gen_subi_tl(ret, ret, 32);
95
}
96
@@ -XXX,XX +XXX,XX @@ static bool trans_clzw(DisasContext *ctx, arg_clzw *a)
97
{
98
REQUIRE_64BIT(ctx);
99
REQUIRE_EXT(ctx, RVB);
100
- return gen_unary(ctx, a, gen_clzw);
101
+ return gen_unary(ctx, a, EXT_ZERO, gen_clzw);
102
}
103
104
static void gen_ctzw(TCGv ret, TCGv arg1)
105
@@ -XXX,XX +XXX,XX @@ static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a)
106
{
107
REQUIRE_64BIT(ctx);
108
REQUIRE_EXT(ctx, RVB);
109
- return gen_unary(ctx, a, gen_ctzw);
70
-}
110
-}
71
-
111
-
72
-static void gen_store_c(DisasContext *ctx, uint32_t opc, int rs1, int rs2,
112
-static void gen_cpopw(TCGv ret, TCGv arg1)
73
- target_long imm)
74
-{
113
-{
75
- TCGv t0 = tcg_temp_new();
114
- tcg_gen_ext32u_tl(arg1, arg1);
76
- TCGv dat = tcg_temp_new();
115
- tcg_gen_ctpop_tl(ret, arg1);
77
- gen_get_gpr(t0, rs1);
116
+ return gen_unary(ctx, a, EXT_NONE, gen_ctzw);
78
- tcg_gen_addi_tl(t0, t0, imm);
117
}
79
- gen_get_gpr(dat, rs2);
118
80
- int memop = tcg_memop_lookup[(opc >> 12) & 0x7];
119
static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a)
81
-
82
- if (memop < 0) {
83
- gen_exception_illegal(ctx);
84
- return;
85
- }
86
-
87
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
88
- tcg_temp_free(t0);
89
- tcg_temp_free(dat);
90
-}
91
-#endif
92
-
93
#ifndef CONFIG_USER_ONLY
94
/* The states of mstatus_fs are:
95
* 0 = disabled, 1 = initial, 2 = clean, 3 = dirty
96
@@ -XXX,XX +XXX,XX @@ static void mark_fs_dirty(DisasContext *ctx)
97
static inline void mark_fs_dirty(DisasContext *ctx) { }
98
#endif
99
100
-#if !defined(TARGET_RISCV64)
101
-static void gen_fp_load(DisasContext *ctx, uint32_t opc, int rd,
102
- int rs1, target_long imm)
103
-{
104
- TCGv t0;
105
-
106
- if (ctx->mstatus_fs == 0) {
107
- gen_exception_illegal(ctx);
108
- return;
109
- }
110
-
111
- t0 = tcg_temp_new();
112
- gen_get_gpr(t0, rs1);
113
- tcg_gen_addi_tl(t0, t0, imm);
114
-
115
- switch (opc) {
116
- case OPC_RISC_FLW:
117
- if (!has_ext(ctx, RVF)) {
118
- goto do_illegal;
119
- }
120
- tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEUL);
121
- /* RISC-V requires NaN-boxing of narrower width floating point values */
122
- tcg_gen_ori_i64(cpu_fpr[rd], cpu_fpr[rd], 0xffffffff00000000ULL);
123
- break;
124
- case OPC_RISC_FLD:
125
- if (!has_ext(ctx, RVD)) {
126
- goto do_illegal;
127
- }
128
- tcg_gen_qemu_ld_i64(cpu_fpr[rd], t0, ctx->mem_idx, MO_TEQ);
129
- break;
130
- do_illegal:
131
- default:
132
- gen_exception_illegal(ctx);
133
- break;
134
- }
135
- tcg_temp_free(t0);
136
-
137
- mark_fs_dirty(ctx);
138
-}
139
-
140
-static void gen_fp_store(DisasContext *ctx, uint32_t opc, int rs1,
141
- int rs2, target_long imm)
142
-{
143
- TCGv t0;
144
-
145
- if (ctx->mstatus_fs == 0) {
146
- gen_exception_illegal(ctx);
147
- return;
148
- }
149
-
150
- t0 = tcg_temp_new();
151
- gen_get_gpr(t0, rs1);
152
- tcg_gen_addi_tl(t0, t0, imm);
153
-
154
- switch (opc) {
155
- case OPC_RISC_FSW:
156
- if (!has_ext(ctx, RVF)) {
157
- goto do_illegal;
158
- }
159
- tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEUL);
160
- break;
161
- case OPC_RISC_FSD:
162
- if (!has_ext(ctx, RVD)) {
163
- goto do_illegal;
164
- }
165
- tcg_gen_qemu_st_i64(cpu_fpr[rs2], t0, ctx->mem_idx, MO_TEQ);
166
- break;
167
- do_illegal:
168
- default:
169
- gen_exception_illegal(ctx);
170
- break;
171
- }
172
-
173
- tcg_temp_free(t0);
174
-}
175
-#endif
176
-
177
static void gen_set_rm(DisasContext *ctx, int rm)
178
{
120
{
179
TCGv_i32 t0;
121
REQUIRE_64BIT(ctx);
180
@@ -XXX,XX +XXX,XX @@ static void gen_set_rm(DisasContext *ctx, int rm)
122
REQUIRE_EXT(ctx, RVB);
181
tcg_temp_free_i32(t0);
123
- return gen_unary(ctx, a, gen_cpopw);
124
+ ctx->w = true;
125
+ return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl);
182
}
126
}
183
127
184
-static void decode_RV32_64C0(DisasContext *ctx, uint16_t opcode)
128
static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2)
185
-{
186
- uint8_t funct3 = extract16(opcode, 13, 3);
187
- uint8_t rd_rs2 = GET_C_RS2S(opcode);
188
- uint8_t rs1s = GET_C_RS1S(opcode);
189
-
190
- switch (funct3) {
191
- case 3:
192
-#if defined(TARGET_RISCV64)
193
- /* C.LD(RV64/128) -> ld rd', offset[7:3](rs1')*/
194
- gen_load_c(ctx, OPC_RISC_LD, rd_rs2, rs1s,
195
- GET_C_LD_IMM(opcode));
196
-#else
197
- /* C.FLW (RV32) -> flw rd', offset[6:2](rs1')*/
198
- gen_fp_load(ctx, OPC_RISC_FLW, rd_rs2, rs1s,
199
- GET_C_LW_IMM(opcode));
200
-#endif
201
- break;
202
- case 7:
203
-#if defined(TARGET_RISCV64)
204
- /* C.SD (RV64/128) -> sd rs2', offset[7:3](rs1')*/
205
- gen_store_c(ctx, OPC_RISC_SD, rs1s, rd_rs2,
206
- GET_C_LD_IMM(opcode));
207
-#else
208
- /* C.FSW (RV32) -> fsw rs2', offset[6:2](rs1')*/
209
- gen_fp_store(ctx, OPC_RISC_FSW, rs1s, rd_rs2,
210
- GET_C_LW_IMM(opcode));
211
-#endif
212
- break;
213
- }
214
-}
215
-
216
-static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode)
217
-{
218
- uint8_t op = extract16(opcode, 0, 2);
219
-
220
- switch (op) {
221
- case 0:
222
- decode_RV32_64C0(ctx, opcode);
223
- break;
224
- }
225
-}
226
-
227
static int ex_plus_1(DisasContext *ctx, int nf)
228
{
229
return nf + 1;
230
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
231
} else {
232
ctx->pc_succ_insn = ctx->base.pc_next + 2;
233
if (!decode_insn16(ctx, opcode)) {
234
- /* fall back to old decoder */
235
- decode_RV32_64C(ctx, opcode);
236
+ gen_exception_illegal(ctx);
237
}
238
}
239
} else {
240
--
129
--
241
2.30.1
130
2.31.1
242
131
243
132
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
According to the specification the "field SPVP of hstatus controls the
3
These operations are greatly simplified by ctx->w, which allows
4
privilege level of the access" for the hypervisor virtual-machine load
4
us to fold gen_shiftw into gen_shift. Split gen_shifti into
5
and store instructions HLV, HLVX and HSV.
5
gen_shift_imm_{fn,tl} like we do for gen_arith_imm_{fn,tl}.
6
6
7
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
7
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20210311103005.1400718-1-georg.kotheimer@kernkonzept.com
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20210823195529.560295-13-richard.henderson@linaro.org
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
---
12
target/riscv/cpu_helper.c | 25 ++++++++++++++-----------
13
target/riscv/translate.c | 110 +++++++++-----------
13
1 file changed, 14 insertions(+), 11 deletions(-)
14
target/riscv/insn_trans/trans_rvb.c.inc | 129 +++++++++++-------------
15
target/riscv/insn_trans/trans_rvi.c.inc | 88 ++++------------
16
3 files changed, 125 insertions(+), 202 deletions(-)
14
17
15
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
18
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_helper.c
20
--- a/target/riscv/translate.c
18
+++ b/target/riscv/cpu_helper.c
21
+++ b/target/riscv/translate.c
19
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
22
@@ -XXX,XX +XXX,XX @@ static inline bool is_32bit(DisasContext *ctx)
20
use_background = true;
23
}
24
#endif
25
26
+/* The word size for this operation. */
27
+static inline int oper_len(DisasContext *ctx)
28
+{
29
+ return ctx->w ? 32 : TARGET_LONG_BITS;
30
+}
31
+
32
+
33
/*
34
* RISC-V requires NaN-boxing of narrower width floating point values.
35
* This applies when a 32-bit value is assigned to a 64-bit FP register.
36
@@ -XXX,XX +XXX,XX @@ static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
37
return true;
38
}
39
40
-static bool gen_shift(DisasContext *ctx, arg_r *a,
41
- void(*func)(TCGv, TCGv, TCGv))
42
-{
43
- TCGv source1 = tcg_temp_new();
44
- TCGv source2 = tcg_temp_new();
45
-
46
- gen_get_gpr(ctx, source1, a->rs1);
47
- gen_get_gpr(ctx, source2, a->rs2);
48
-
49
- tcg_gen_andi_tl(source2, source2, TARGET_LONG_BITS - 1);
50
- (*func)(source1, source1, source2);
51
-
52
- gen_set_gpr(ctx, a->rd, source1);
53
- tcg_temp_free(source1);
54
- tcg_temp_free(source2);
55
- return true;
56
-}
57
-
58
-static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
59
+static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
60
+ void (*func)(TCGv, TCGv, target_long))
61
{
62
- DisasContext *ctx = container_of(dcbase, DisasContext, base);
63
- CPUState *cpu = ctx->cs;
64
- CPURISCVState *env = cpu->env_ptr;
65
+ TCGv dest, src1;
66
+ int max_len = oper_len(ctx);
67
68
- return cpu_ldl_code(env, pc);
69
-}
70
-
71
-static bool gen_shifti(DisasContext *ctx, arg_shift *a,
72
- void(*func)(TCGv, TCGv, TCGv))
73
-{
74
- if (a->shamt >= TARGET_LONG_BITS) {
75
+ if (a->shamt >= max_len) {
76
return false;
21
}
77
}
22
78
23
- if (mode == PRV_M && access_type != MMU_INST_FETCH) {
79
- TCGv source1 = tcg_temp_new();
24
+ /* MPRV does not affect the virtual-machine load/store
80
- TCGv source2 = tcg_temp_new();
25
+ instructions, HLV, HLVX, and HSV. */
81
-
26
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
82
- gen_get_gpr(ctx, source1, a->rs1);
27
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
83
+ dest = dest_gpr(ctx, a->rd);
28
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
84
+ src1 = get_gpr(ctx, a->rs1, ext);
29
if (get_field(env->mstatus, MSTATUS_MPRV)) {
85
30
mode = get_field(env->mstatus, MSTATUS_MPP);
86
- tcg_gen_movi_tl(source2, a->shamt);
31
}
87
- (*func)(source1, source1, source2);
32
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
88
+ func(dest, src1, a->shamt);
33
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
89
34
__func__, address, access_type, mmu_idx);
90
- gen_set_gpr(ctx, a->rd, source1);
35
91
- tcg_temp_free(source1);
36
- if (mode == PRV_M && access_type != MMU_INST_FETCH) {
92
- tcg_temp_free(source2);
37
- if (get_field(env->mstatus, MSTATUS_MPRV)) {
93
+ gen_set_gpr(ctx, a->rd, dest);
38
- mode = get_field(env->mstatus, MSTATUS_MPP);
94
return true;
39
+ /* MPRV does not affect the virtual-machine load/store
95
}
40
+ instructions, HLV, HLVX, and HSV. */
96
41
+ if (riscv_cpu_two_stage_lookup(mmu_idx)) {
97
-static bool gen_shiftw(DisasContext *ctx, arg_r *a,
42
+ mode = get_field(env->hstatus, HSTATUS_SPVP);
98
- void(*func)(TCGv, TCGv, TCGv))
43
+ } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
99
+static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
44
+ get_field(env->mstatus, MSTATUS_MPRV)) {
100
+ void (*func)(TCGv, TCGv, TCGv))
45
+ mode = get_field(env->mstatus, MSTATUS_MPP);
101
{
46
+ if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
102
- TCGv source1 = tcg_temp_new();
47
+ two_stage_lookup = true;
103
- TCGv source2 = tcg_temp_new();
48
}
104
+ TCGv dest, src1, src2;
105
+ int max_len = oper_len(ctx);
106
+
107
+ if (a->shamt >= max_len) {
108
+ return false;
109
+ }
110
111
- gen_get_gpr(ctx, source1, a->rs1);
112
- gen_get_gpr(ctx, source2, a->rs2);
113
+ dest = dest_gpr(ctx, a->rd);
114
+ src1 = get_gpr(ctx, a->rs1, ext);
115
+ src2 = tcg_constant_tl(a->shamt);
116
117
- tcg_gen_andi_tl(source2, source2, 31);
118
- (*func)(source1, source1, source2);
119
- tcg_gen_ext32s_tl(source1, source1);
120
+ func(dest, src1, src2);
121
122
- gen_set_gpr(ctx, a->rd, source1);
123
- tcg_temp_free(source1);
124
- tcg_temp_free(source2);
125
+ gen_set_gpr(ctx, a->rd, dest);
126
return true;
127
}
128
129
-static bool gen_shiftiw(DisasContext *ctx, arg_shift *a,
130
- void(*func)(TCGv, TCGv, TCGv))
131
+static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
132
+ void (*func)(TCGv, TCGv, TCGv))
133
{
134
- TCGv source1 = tcg_temp_new();
135
- TCGv source2 = tcg_temp_new();
136
-
137
- gen_get_gpr(ctx, source1, a->rs1);
138
- tcg_gen_movi_tl(source2, a->shamt);
139
+ TCGv dest = dest_gpr(ctx, a->rd);
140
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
141
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
142
+ TCGv ext2 = tcg_temp_new();
143
144
- (*func)(source1, source1, source2);
145
- tcg_gen_ext32s_tl(source1, source1);
146
+ tcg_gen_andi_tl(ext2, src2, oper_len(ctx) - 1);
147
+ func(dest, src1, ext2);
148
149
- gen_set_gpr(ctx, a->rd, source1);
150
- tcg_temp_free(source1);
151
- tcg_temp_free(source2);
152
+ gen_set_gpr(ctx, a->rd, dest);
153
+ tcg_temp_free(ext2);
154
return true;
155
}
156
157
@@ -XXX,XX +XXX,XX @@ static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
158
return true;
159
}
160
161
+static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
162
+{
163
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
164
+ CPUState *cpu = ctx->cs;
165
+ CPURISCVState *env = cpu->env_ptr;
166
+
167
+ return cpu_ldl_code(env, pc);
168
+}
169
+
170
/* Include insn module translation function */
171
#include "insn_trans/trans_rvi.c.inc"
172
#include "insn_trans/trans_rvm.c.inc"
173
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/riscv/insn_trans/trans_rvb.c.inc
176
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
177
@@ -XXX,XX +XXX,XX @@ static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt)
178
static bool trans_bset(DisasContext *ctx, arg_bset *a)
179
{
180
REQUIRE_EXT(ctx, RVB);
181
- return gen_shift(ctx, a, gen_bset);
182
+ return gen_shift(ctx, a, EXT_NONE, gen_bset);
183
}
184
185
static bool trans_bseti(DisasContext *ctx, arg_bseti *a)
186
{
187
REQUIRE_EXT(ctx, RVB);
188
- return gen_shifti(ctx, a, gen_bset);
189
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset);
190
}
191
192
static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
193
@@ -XXX,XX +XXX,XX @@ static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt)
194
static bool trans_bclr(DisasContext *ctx, arg_bclr *a)
195
{
196
REQUIRE_EXT(ctx, RVB);
197
- return gen_shift(ctx, a, gen_bclr);
198
+ return gen_shift(ctx, a, EXT_NONE, gen_bclr);
199
}
200
201
static bool trans_bclri(DisasContext *ctx, arg_bclri *a)
202
{
203
REQUIRE_EXT(ctx, RVB);
204
- return gen_shifti(ctx, a, gen_bclr);
205
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr);
206
}
207
208
static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
209
@@ -XXX,XX +XXX,XX @@ static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt)
210
static bool trans_binv(DisasContext *ctx, arg_binv *a)
211
{
212
REQUIRE_EXT(ctx, RVB);
213
- return gen_shift(ctx, a, gen_binv);
214
+ return gen_shift(ctx, a, EXT_NONE, gen_binv);
215
}
216
217
static bool trans_binvi(DisasContext *ctx, arg_binvi *a)
218
{
219
REQUIRE_EXT(ctx, RVB);
220
- return gen_shifti(ctx, a, gen_binv);
221
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv);
222
}
223
224
static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
225
@@ -XXX,XX +XXX,XX @@ static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt)
226
static bool trans_bext(DisasContext *ctx, arg_bext *a)
227
{
228
REQUIRE_EXT(ctx, RVB);
229
- return gen_shift(ctx, a, gen_bext);
230
+ return gen_shift(ctx, a, EXT_NONE, gen_bext);
231
}
232
233
static bool trans_bexti(DisasContext *ctx, arg_bexti *a)
234
{
235
REQUIRE_EXT(ctx, RVB);
236
- return gen_shifti(ctx, a, gen_bext);
237
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext);
238
}
239
240
static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
241
@@ -XXX,XX +XXX,XX @@ static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2)
242
static bool trans_slo(DisasContext *ctx, arg_slo *a)
243
{
244
REQUIRE_EXT(ctx, RVB);
245
- return gen_shift(ctx, a, gen_slo);
246
+ return gen_shift(ctx, a, EXT_NONE, gen_slo);
247
}
248
249
static bool trans_sloi(DisasContext *ctx, arg_sloi *a)
250
{
251
REQUIRE_EXT(ctx, RVB);
252
- return gen_shifti(ctx, a, gen_slo);
253
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_slo);
254
}
255
256
static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
257
@@ -XXX,XX +XXX,XX @@ static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2)
258
static bool trans_sro(DisasContext *ctx, arg_sro *a)
259
{
260
REQUIRE_EXT(ctx, RVB);
261
- return gen_shift(ctx, a, gen_sro);
262
+ return gen_shift(ctx, a, EXT_ZERO, gen_sro);
263
}
264
265
static bool trans_sroi(DisasContext *ctx, arg_sroi *a)
266
{
267
REQUIRE_EXT(ctx, RVB);
268
- return gen_shifti(ctx, a, gen_sro);
269
+ return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_sro);
270
}
271
272
static bool trans_ror(DisasContext *ctx, arg_ror *a)
273
{
274
REQUIRE_EXT(ctx, RVB);
275
- return gen_shift(ctx, a, tcg_gen_rotr_tl);
276
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_rotr_tl);
277
}
278
279
static bool trans_rori(DisasContext *ctx, arg_rori *a)
280
{
281
REQUIRE_EXT(ctx, RVB);
282
- return gen_shifti(ctx, a, tcg_gen_rotr_tl);
283
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_rotri_tl);
284
}
285
286
static bool trans_rol(DisasContext *ctx, arg_rol *a)
287
{
288
REQUIRE_EXT(ctx, RVB);
289
- return gen_shift(ctx, a, tcg_gen_rotl_tl);
290
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_rotl_tl);
291
}
292
293
static bool trans_grev(DisasContext *ctx, arg_grev *a)
294
{
295
REQUIRE_EXT(ctx, RVB);
296
- return gen_shift(ctx, a, gen_helper_grev);
297
+ return gen_shift(ctx, a, EXT_NONE, gen_helper_grev);
298
}
299
300
-static bool gen_grevi(DisasContext *ctx, arg_grevi *a)
301
+static void gen_grevi(TCGv dest, TCGv src, target_long shamt)
302
{
303
- TCGv source1 = tcg_temp_new();
304
- TCGv source2;
305
-
306
- gen_get_gpr(ctx, source1, a->rs1);
307
-
308
- if (a->shamt == (TARGET_LONG_BITS - 8)) {
309
+ if (shamt == TARGET_LONG_BITS - 8) {
310
/* rev8, byte swaps */
311
- tcg_gen_bswap_tl(source1, source1);
312
+ tcg_gen_bswap_tl(dest, src);
313
} else {
314
- source2 = tcg_temp_new();
315
- tcg_gen_movi_tl(source2, a->shamt);
316
- gen_helper_grev(source1, source1, source2);
317
- tcg_temp_free(source2);
318
+ gen_helper_grev(dest, src, tcg_constant_tl(shamt));
49
}
319
}
50
320
-
51
- if (riscv_has_ext(env, RVH) && env->priv == PRV_M &&
321
- gen_set_gpr(ctx, a->rd, source1);
52
- access_type != MMU_INST_FETCH &&
322
- tcg_temp_free(source1);
53
- get_field(env->mstatus, MSTATUS_MPRV) &&
323
- return true;
54
- get_field(env->mstatus, MSTATUS_MPV)) {
324
}
55
- two_stage_lookup = true;
325
326
static bool trans_grevi(DisasContext *ctx, arg_grevi *a)
327
{
328
REQUIRE_EXT(ctx, RVB);
329
-
330
- if (a->shamt >= TARGET_LONG_BITS) {
331
- return false;
56
- }
332
- }
57
-
333
-
58
if (riscv_cpu_virt_enabled(env) ||
334
- return gen_grevi(ctx, a);
59
((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
335
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_grevi);
60
access_type != MMU_INST_FETCH)) {
336
}
337
338
static bool trans_gorc(DisasContext *ctx, arg_gorc *a)
339
{
340
REQUIRE_EXT(ctx, RVB);
341
- return gen_shift(ctx, a, gen_helper_gorc);
342
+ return gen_shift(ctx, a, EXT_ZERO, gen_helper_gorc);
343
}
344
345
static bool trans_gorci(DisasContext *ctx, arg_gorci *a)
346
{
347
REQUIRE_EXT(ctx, RVB);
348
- return gen_shifti(ctx, a, gen_helper_gorc);
349
+ return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_gorc);
350
}
351
352
#define GEN_SHADD(SHAMT) \
353
@@ -XXX,XX +XXX,XX @@ static bool trans_bsetw(DisasContext *ctx, arg_bsetw *a)
354
{
355
REQUIRE_64BIT(ctx);
356
REQUIRE_EXT(ctx, RVB);
357
- return gen_shiftw(ctx, a, gen_bset);
358
+ ctx->w = true;
359
+ return gen_shift(ctx, a, EXT_NONE, gen_bset);
360
}
361
362
static bool trans_bsetiw(DisasContext *ctx, arg_bsetiw *a)
363
{
364
REQUIRE_64BIT(ctx);
365
REQUIRE_EXT(ctx, RVB);
366
- return gen_shiftiw(ctx, a, gen_bset);
367
+ ctx->w = true;
368
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset);
369
}
370
371
static bool trans_bclrw(DisasContext *ctx, arg_bclrw *a)
372
{
373
REQUIRE_64BIT(ctx);
374
REQUIRE_EXT(ctx, RVB);
375
- return gen_shiftw(ctx, a, gen_bclr);
376
+ ctx->w = true;
377
+ return gen_shift(ctx, a, EXT_NONE, gen_bclr);
378
}
379
380
static bool trans_bclriw(DisasContext *ctx, arg_bclriw *a)
381
{
382
REQUIRE_64BIT(ctx);
383
REQUIRE_EXT(ctx, RVB);
384
- return gen_shiftiw(ctx, a, gen_bclr);
385
+ ctx->w = true;
386
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr);
387
}
388
389
static bool trans_binvw(DisasContext *ctx, arg_binvw *a)
390
{
391
REQUIRE_64BIT(ctx);
392
REQUIRE_EXT(ctx, RVB);
393
- return gen_shiftw(ctx, a, gen_binv);
394
+ ctx->w = true;
395
+ return gen_shift(ctx, a, EXT_NONE, gen_binv);
396
}
397
398
static bool trans_binviw(DisasContext *ctx, arg_binviw *a)
399
{
400
REQUIRE_64BIT(ctx);
401
REQUIRE_EXT(ctx, RVB);
402
- return gen_shiftiw(ctx, a, gen_binv);
403
+ ctx->w = true;
404
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv);
405
}
406
407
static bool trans_bextw(DisasContext *ctx, arg_bextw *a)
408
{
409
REQUIRE_64BIT(ctx);
410
REQUIRE_EXT(ctx, RVB);
411
- return gen_shiftw(ctx, a, gen_bext);
412
+ ctx->w = true;
413
+ return gen_shift(ctx, a, EXT_NONE, gen_bext);
414
}
415
416
static bool trans_slow(DisasContext *ctx, arg_slow *a)
417
{
418
REQUIRE_64BIT(ctx);
419
REQUIRE_EXT(ctx, RVB);
420
- return gen_shiftw(ctx, a, gen_slo);
421
+ ctx->w = true;
422
+ return gen_shift(ctx, a, EXT_NONE, gen_slo);
423
}
424
425
static bool trans_sloiw(DisasContext *ctx, arg_sloiw *a)
426
{
427
REQUIRE_64BIT(ctx);
428
REQUIRE_EXT(ctx, RVB);
429
- return gen_shiftiw(ctx, a, gen_slo);
430
+ ctx->w = true;
431
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_slo);
432
}
433
434
static bool trans_srow(DisasContext *ctx, arg_srow *a)
435
{
436
REQUIRE_64BIT(ctx);
437
REQUIRE_EXT(ctx, RVB);
438
- return gen_shiftw(ctx, a, gen_sro);
439
+ ctx->w = true;
440
+ return gen_shift(ctx, a, EXT_ZERO, gen_sro);
441
}
442
443
static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a)
444
{
445
REQUIRE_64BIT(ctx);
446
REQUIRE_EXT(ctx, RVB);
447
- return gen_shiftiw(ctx, a, gen_sro);
448
+ ctx->w = true;
449
+ return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_sro);
450
}
451
452
static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2)
453
@@ -XXX,XX +XXX,XX @@ static bool trans_rorw(DisasContext *ctx, arg_rorw *a)
454
{
455
REQUIRE_64BIT(ctx);
456
REQUIRE_EXT(ctx, RVB);
457
- return gen_shiftw(ctx, a, gen_rorw);
458
+ ctx->w = true;
459
+ return gen_shift(ctx, a, EXT_NONE, gen_rorw);
460
}
461
462
static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
463
{
464
REQUIRE_64BIT(ctx);
465
REQUIRE_EXT(ctx, RVB);
466
- return gen_shiftiw(ctx, a, gen_rorw);
467
+ ctx->w = true;
468
+ return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_rorw);
469
}
470
471
static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
472
@@ -XXX,XX +XXX,XX @@ static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
473
{
474
REQUIRE_64BIT(ctx);
475
REQUIRE_EXT(ctx, RVB);
476
- return gen_shiftw(ctx, a, gen_rolw);
477
-}
478
-
479
-static void gen_grevw(TCGv ret, TCGv arg1, TCGv arg2)
480
-{
481
- tcg_gen_ext32u_tl(arg1, arg1);
482
- gen_helper_grev(ret, arg1, arg2);
483
+ ctx->w = true;
484
+ return gen_shift(ctx, a, EXT_NONE, gen_rolw);
485
}
486
487
static bool trans_grevw(DisasContext *ctx, arg_grevw *a)
488
{
489
REQUIRE_64BIT(ctx);
490
REQUIRE_EXT(ctx, RVB);
491
- return gen_shiftw(ctx, a, gen_grevw);
492
+ ctx->w = true;
493
+ return gen_shift(ctx, a, EXT_ZERO, gen_helper_grev);
494
}
495
496
static bool trans_greviw(DisasContext *ctx, arg_greviw *a)
497
{
498
REQUIRE_64BIT(ctx);
499
REQUIRE_EXT(ctx, RVB);
500
- return gen_shiftiw(ctx, a, gen_grevw);
501
-}
502
-
503
-static void gen_gorcw(TCGv ret, TCGv arg1, TCGv arg2)
504
-{
505
- tcg_gen_ext32u_tl(arg1, arg1);
506
- gen_helper_gorcw(ret, arg1, arg2);
507
+ ctx->w = true;
508
+ return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_grev);
509
}
510
511
static bool trans_gorcw(DisasContext *ctx, arg_gorcw *a)
512
{
513
REQUIRE_64BIT(ctx);
514
REQUIRE_EXT(ctx, RVB);
515
- return gen_shiftw(ctx, a, gen_gorcw);
516
+ ctx->w = true;
517
+ return gen_shift(ctx, a, EXT_ZERO, gen_helper_gorc);
518
}
519
520
static bool trans_gorciw(DisasContext *ctx, arg_gorciw *a)
521
{
522
REQUIRE_64BIT(ctx);
523
REQUIRE_EXT(ctx, RVB);
524
- return gen_shiftiw(ctx, a, gen_gorcw);
525
+ ctx->w = true;
526
+ return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_gorc);
527
}
528
529
#define GEN_SHADD_UW(SHAMT) \
530
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
531
index XXXXXXX..XXXXXXX 100644
532
--- a/target/riscv/insn_trans/trans_rvi.c.inc
533
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
534
@@ -XXX,XX +XXX,XX @@ static bool trans_andi(DisasContext *ctx, arg_andi *a)
535
536
static bool trans_slli(DisasContext *ctx, arg_slli *a)
537
{
538
- return gen_shifti(ctx, a, tcg_gen_shl_tl);
539
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
540
}
541
542
static bool trans_srli(DisasContext *ctx, arg_srli *a)
543
{
544
- return gen_shifti(ctx, a, tcg_gen_shr_tl);
545
+ return gen_shift_imm_fn(ctx, a, EXT_ZERO, tcg_gen_shri_tl);
546
}
547
548
static bool trans_srai(DisasContext *ctx, arg_srai *a)
549
{
550
- return gen_shifti(ctx, a, tcg_gen_sar_tl);
551
+ return gen_shift_imm_fn(ctx, a, EXT_SIGN, tcg_gen_sari_tl);
552
}
553
554
static bool trans_add(DisasContext *ctx, arg_add *a)
555
@@ -XXX,XX +XXX,XX @@ static bool trans_sub(DisasContext *ctx, arg_sub *a)
556
557
static bool trans_sll(DisasContext *ctx, arg_sll *a)
558
{
559
- return gen_shift(ctx, a, &tcg_gen_shl_tl);
560
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
561
}
562
563
static bool trans_slt(DisasContext *ctx, arg_slt *a)
564
@@ -XXX,XX +XXX,XX @@ static bool trans_xor(DisasContext *ctx, arg_xor *a)
565
566
static bool trans_srl(DisasContext *ctx, arg_srl *a)
567
{
568
- return gen_shift(ctx, a, &tcg_gen_shr_tl);
569
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
570
}
571
572
static bool trans_sra(DisasContext *ctx, arg_sra *a)
573
{
574
- return gen_shift(ctx, a, &tcg_gen_sar_tl);
575
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
576
}
577
578
static bool trans_or(DisasContext *ctx, arg_or *a)
579
@@ -XXX,XX +XXX,XX @@ static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
580
static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
581
{
582
REQUIRE_64BIT(ctx);
583
- return gen_shiftiw(ctx, a, tcg_gen_shl_tl);
584
+ ctx->w = true;
585
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
586
}
587
588
static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
589
{
590
REQUIRE_64BIT(ctx);
591
- TCGv t = tcg_temp_new();
592
- gen_get_gpr(ctx, t, a->rs1);
593
- tcg_gen_extract_tl(t, t, a->shamt, 32 - a->shamt);
594
- /* sign-extend for W instructions */
595
- tcg_gen_ext32s_tl(t, t);
596
- gen_set_gpr(ctx, a->rd, t);
597
- tcg_temp_free(t);
598
- return true;
599
+ ctx->w = true;
600
+ return gen_shift_imm_fn(ctx, a, EXT_ZERO, tcg_gen_shri_tl);
601
}
602
603
static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
604
{
605
REQUIRE_64BIT(ctx);
606
- TCGv t = tcg_temp_new();
607
- gen_get_gpr(ctx, t, a->rs1);
608
- tcg_gen_sextract_tl(t, t, a->shamt, 32 - a->shamt);
609
- gen_set_gpr(ctx, a->rd, t);
610
- tcg_temp_free(t);
611
- return true;
612
+ ctx->w = true;
613
+ return gen_shift_imm_fn(ctx, a, EXT_SIGN, tcg_gen_sari_tl);
614
}
615
616
static bool trans_addw(DisasContext *ctx, arg_addw *a)
617
@@ -XXX,XX +XXX,XX @@ static bool trans_subw(DisasContext *ctx, arg_subw *a)
618
static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
619
{
620
REQUIRE_64BIT(ctx);
621
- TCGv source1 = tcg_temp_new();
622
- TCGv source2 = tcg_temp_new();
623
-
624
- gen_get_gpr(ctx, source1, a->rs1);
625
- gen_get_gpr(ctx, source2, a->rs2);
626
-
627
- tcg_gen_andi_tl(source2, source2, 0x1F);
628
- tcg_gen_shl_tl(source1, source1, source2);
629
-
630
- tcg_gen_ext32s_tl(source1, source1);
631
- gen_set_gpr(ctx, a->rd, source1);
632
- tcg_temp_free(source1);
633
- tcg_temp_free(source2);
634
- return true;
635
+ ctx->w = true;
636
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
637
}
638
639
static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
640
{
641
REQUIRE_64BIT(ctx);
642
- TCGv source1 = tcg_temp_new();
643
- TCGv source2 = tcg_temp_new();
644
-
645
- gen_get_gpr(ctx, source1, a->rs1);
646
- gen_get_gpr(ctx, source2, a->rs2);
647
-
648
- /* clear upper 32 */
649
- tcg_gen_ext32u_tl(source1, source1);
650
- tcg_gen_andi_tl(source2, source2, 0x1F);
651
- tcg_gen_shr_tl(source1, source1, source2);
652
-
653
- tcg_gen_ext32s_tl(source1, source1);
654
- gen_set_gpr(ctx, a->rd, source1);
655
- tcg_temp_free(source1);
656
- tcg_temp_free(source2);
657
- return true;
658
+ ctx->w = true;
659
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
660
}
661
662
static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
663
{
664
REQUIRE_64BIT(ctx);
665
- TCGv source1 = tcg_temp_new();
666
- TCGv source2 = tcg_temp_new();
667
-
668
- gen_get_gpr(ctx, source1, a->rs1);
669
- gen_get_gpr(ctx, source2, a->rs2);
670
-
671
- /*
672
- * first, trick to get it to act like working on 32 bits (get rid of
673
- * upper 32, sign extend to fill space)
674
- */
675
- tcg_gen_ext32s_tl(source1, source1);
676
- tcg_gen_andi_tl(source2, source2, 0x1F);
677
- tcg_gen_sar_tl(source1, source1, source2);
678
-
679
- gen_set_gpr(ctx, a->rd, source1);
680
- tcg_temp_free(source1);
681
- tcg_temp_free(source2);
682
-
683
- return true;
684
+ ctx->w = true;
685
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
686
}
687
688
static bool trans_fence(DisasContext *ctx, arg_fence *a)
61
--
689
--
62
2.30.1
690
2.31.1
63
691
64
692
diff view generated by jsdifflib
1
From: Jim Shu <cwshu@andestech.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Currently, PMP permission checking of TLB page is bypassed if TLB hits
3
These operations can be done in one instruction on some hosts.
4
Fix it by propagating PMP permission to TLB page permission.
5
4
6
PMP permission checking also use MMU-style API to change TLB permission
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
and size.
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
8
9
Signed-off-by: Jim Shu <cwshu@andestech.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-id: 1613916082-19528-2-git-send-email-cwshu@andestech.com
8
Message-id: 20210823195529.560295-14-richard.henderson@linaro.org
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
10
---
14
target/riscv/pmp.h | 4 +-
11
target/riscv/insn_trans/trans_rvi.c.inc | 14 ++++++++++++--
15
target/riscv/cpu_helper.c | 84 +++++++++++++++++++++++++++++----------
12
1 file changed, 12 insertions(+), 2 deletions(-)
16
target/riscv/pmp.c | 80 +++++++++++++++++++++++++++----------
17
3 files changed, 125 insertions(+), 43 deletions(-)
18
13
19
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
14
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/pmp.h
16
--- a/target/riscv/insn_trans/trans_rvi.c.inc
22
+++ b/target/riscv/pmp.h
17
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
23
@@ -XXX,XX +XXX,XX @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
18
@@ -XXX,XX +XXX,XX @@ static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
24
target_ulong val);
19
return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
25
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
26
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
27
- target_ulong size, pmp_priv_t priv, target_ulong mode);
28
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
29
+ target_ulong mode);
30
bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
31
target_ulong *tlb_size);
32
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
33
void pmp_update_rule_nums(CPURISCVState *env);
34
uint32_t pmp_get_num_rules(CPURISCVState *env);
35
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv);
36
37
#endif
38
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/cpu_helper.c
41
+++ b/target/riscv/cpu_helper.c
42
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
43
env->load_res = -1;
44
}
20
}
45
21
46
+/*
22
+static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
47
+ * get_physical_address_pmp - check PMP permission for this physical address
48
+ *
49
+ * Match the PMP region and check permission for this physical address and it's
50
+ * TLB page. Returns 0 if the permission checking was successful
51
+ *
52
+ * @env: CPURISCVState
53
+ * @prot: The returned protection attributes
54
+ * @tlb_size: TLB page size containing addr. It could be modified after PMP
55
+ * permission checking. NULL if not set TLB page for addr.
56
+ * @addr: The physical address to be checked permission
57
+ * @access_type: The type of MMU access
58
+ * @mode: Indicates current privilege level.
59
+ */
60
+static int get_physical_address_pmp(CPURISCVState *env, int *prot,
61
+ target_ulong *tlb_size, hwaddr addr,
62
+ int size, MMUAccessType access_type,
63
+ int mode)
64
+{
23
+{
65
+ pmp_priv_t pmp_priv;
24
+ tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
66
+ target_ulong tlb_size_pmp = 0;
67
+
68
+ if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
69
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
70
+ return TRANSLATE_SUCCESS;
71
+ }
72
+
73
+ if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
74
+ mode)) {
75
+ *prot = 0;
76
+ return TRANSLATE_PMP_FAIL;
77
+ }
78
+
79
+ *prot = pmp_priv_to_page_prot(pmp_priv);
80
+ if (tlb_size != NULL) {
81
+ if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
82
+ *tlb_size = tlb_size_pmp;
83
+ }
84
+ }
85
+
86
+ return TRANSLATE_SUCCESS;
87
+}
25
+}
88
+
26
+
89
/* get_physical_address - get the physical address for this virtual address
27
static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
90
*
28
{
91
* Do a page table walk to obtain the physical address corresponding to a
29
REQUIRE_64BIT(ctx);
92
@@ -XXX,XX +XXX,XX @@ restart:
30
ctx->w = true;
93
pte_addr = base + idx * ptesize;
31
- return gen_shift_imm_fn(ctx, a, EXT_ZERO, tcg_gen_shri_tl);
94
}
32
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw);
95
96
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
97
- !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong),
98
- 1 << MMU_DATA_LOAD, PRV_S)) {
99
+ int pmp_prot;
100
+ int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
101
+ sizeof(target_ulong),
102
+ MMU_DATA_LOAD, PRV_S);
103
+ if (pmp_ret != TRANSLATE_SUCCESS) {
104
return TRANSLATE_PMP_FAIL;
105
}
106
107
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
108
#ifndef CONFIG_USER_ONLY
109
vaddr im_address;
110
hwaddr pa = 0;
111
- int prot, prot2;
112
+ int prot, prot2, prot_pmp;
113
bool pmp_violation = false;
114
bool first_stage_error = true;
115
bool two_stage_lookup = false;
116
int ret = TRANSLATE_FAIL;
117
int mode = mmu_idx;
118
- target_ulong tlb_size = 0;
119
+ /* default TLB page size */
120
+ target_ulong tlb_size = TARGET_PAGE_SIZE;
121
122
env->guest_phys_fault_addr = 0;
123
124
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
125
126
prot &= prot2;
127
128
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
129
- (ret == TRANSLATE_SUCCESS) &&
130
- !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
131
- ret = TRANSLATE_PMP_FAIL;
132
+ if (ret == TRANSLATE_SUCCESS) {
133
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
134
+ size, access_type, mode);
135
+ prot &= prot_pmp;
136
}
137
138
if (ret != TRANSLATE_SUCCESS) {
139
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
140
"%s address=%" VADDR_PRIx " ret %d physical "
141
TARGET_FMT_plx " prot %d\n",
142
__func__, address, ret, pa, prot);
143
- }
144
145
- if (riscv_feature(env, RISCV_FEATURE_PMP) &&
146
- (ret == TRANSLATE_SUCCESS) &&
147
- !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
148
- ret = TRANSLATE_PMP_FAIL;
149
+ if (ret == TRANSLATE_SUCCESS) {
150
+ ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
151
+ size, access_type, mode);
152
+ prot &= prot_pmp;
153
+ }
154
}
155
+
156
if (ret == TRANSLATE_PMP_FAIL) {
157
pmp_violation = true;
158
}
159
160
if (ret == TRANSLATE_SUCCESS) {
161
- if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) {
162
- tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
163
- prot, mmu_idx, tlb_size);
164
- } else {
165
- tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
166
- prot, mmu_idx, TARGET_PAGE_SIZE);
167
- }
168
+ tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
169
+ prot, mmu_idx, tlb_size);
170
return true;
171
} else if (probe) {
172
return false;
173
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/riscv/pmp.c
176
+++ b/target/riscv/pmp.c
177
@@ -XXX,XX +XXX,XX @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
178
return result;
179
}
180
181
+/*
182
+ * Check if the address has required RWX privs when no PMP entry is matched.
183
+ */
184
+static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
185
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
186
+ target_ulong mode)
187
+{
188
+ bool ret;
189
+
190
+ if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
191
+ /*
192
+ * Privileged spec v1.10 states if HW doesn't implement any PMP entry
193
+ * or no PMP entry matches an M-Mode access, the access succeeds.
194
+ */
195
+ ret = true;
196
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
197
+ } else {
198
+ /*
199
+ * Other modes are not allowed to succeed if they don't * match a rule,
200
+ * but there are rules. We've checked for no rule earlier in this
201
+ * function.
202
+ */
203
+ ret = false;
204
+ *allowed_privs = 0;
205
+ }
206
+
207
+ return ret;
208
+}
33
+}
209
+
34
+
210
35
+static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
211
/*
36
+{
212
* Public Interface
37
+ tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
213
@@ -XXX,XX +XXX,XX @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
38
}
214
* Check if the address has required RWX privs to complete desired operation
39
215
*/
40
static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
216
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
217
- target_ulong size, pmp_priv_t privs, target_ulong mode)
218
+ target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
219
+ target_ulong mode)
220
{
41
{
221
int i = 0;
42
REQUIRE_64BIT(ctx);
222
int ret = -1;
43
ctx->w = true;
223
int pmp_size = 0;
44
- return gen_shift_imm_fn(ctx, a, EXT_SIGN, tcg_gen_sari_tl);
224
target_ulong s = 0;
45
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw);
225
target_ulong e = 0;
226
- pmp_priv_t allowed_privs = 0;
227
228
/* Short cut if no rules */
229
if (0 == pmp_get_num_rules(env)) {
230
- return (env->priv == PRV_M) ? true : false;
231
+ return pmp_hart_has_privs_default(env, addr, size, privs,
232
+ allowed_privs, mode);
233
}
234
235
if (size == 0) {
236
@@ -XXX,XX +XXX,XX @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
237
* check
238
*/
239
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
240
- allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
241
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
242
if ((mode != PRV_M) || pmp_is_locked(env, i)) {
243
- allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
244
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
245
}
246
247
- if ((privs & allowed_privs) == privs) {
248
- ret = 1;
249
- break;
250
- } else {
251
- ret = 0;
252
- break;
253
- }
254
+ ret = ((privs & *allowed_privs) == privs);
255
+ break;
256
}
257
}
258
259
/* No rule matched */
260
if (ret == -1) {
261
- if (mode == PRV_M) {
262
- ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an
263
- * M-Mode access, the access succeeds */
264
- } else {
265
- ret = 0; /* Other modes are not allowed to succeed if they don't
266
- * match a rule, but there are rules. We've checked for
267
- * no rule earlier in this function. */
268
- }
269
+ return pmp_hart_has_privs_default(env, addr, size, privs,
270
+ allowed_privs, mode);
271
}
272
273
return ret == 1 ? true : false;
274
}
46
}
275
47
276
-
48
static bool trans_addw(DisasContext *ctx, arg_addw *a)
277
/*
278
* Handle a write to a pmpcfg CSP
279
*/
280
@@ -XXX,XX +XXX,XX @@ bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
281
282
return false;
283
}
284
+
285
+/*
286
+ * Convert PMP privilege to TLB page privilege.
287
+ */
288
+int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
289
+{
290
+ int prot = 0;
291
+
292
+ if (pmp_priv & PMP_READ) {
293
+ prot |= PAGE_READ;
294
+ }
295
+ if (pmp_priv & PMP_WRITE) {
296
+ prot |= PAGE_WRITE;
297
+ }
298
+ if (pmp_priv & PMP_EXEC) {
299
+ prot |= PAGE_EXEC;
300
+ }
301
+
302
+ return prot;
303
+}
304
--
49
--
305
2.30.1
50
2.31.1
306
51
307
52
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The current condition for the use of background registers only
3
Narrow the scope of t0 in trans_jalr.
4
considers the hypervisor load and store instructions,
5
but not accesses from M mode via MSTATUS_MPRV+MPV.
6
4
7
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
5
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20210311103036.1401073-1-georg.kotheimer@kernkonzept.com
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20210823195529.560295-15-richard.henderson@linaro.org
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
10
---
12
target/riscv/cpu_helper.c | 2 +-
11
target/riscv/insn_trans/trans_rvi.c.inc | 25 ++++++++++---------------
13
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 10 insertions(+), 15 deletions(-)
14
13
15
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
14
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
16
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_helper.c
16
--- a/target/riscv/insn_trans/trans_rvi.c.inc
18
+++ b/target/riscv/cpu_helper.c
17
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
19
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
18
@@ -XXX,XX +XXX,XX @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
20
* was called. Background registers will be used if the guest has
19
21
* forced a two stage translation to be on (in HS or M mode).
20
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
22
*/
21
{
23
- if (!riscv_cpu_virt_enabled(env) && riscv_cpu_two_stage_lookup(mmu_idx)) {
22
- /* no chaining with JALR */
24
+ if (!riscv_cpu_virt_enabled(env) && two_stage) {
23
TCGLabel *misaligned = NULL;
25
use_background = true;
24
- TCGv t0 = tcg_temp_new();
25
-
26
27
- gen_get_gpr(ctx, cpu_pc, a->rs1);
28
- tcg_gen_addi_tl(cpu_pc, cpu_pc, a->imm);
29
+ tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
30
tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
31
32
if (!has_ext(ctx, RVC)) {
33
+ TCGv t0 = tcg_temp_new();
34
+
35
misaligned = gen_new_label();
36
tcg_gen_andi_tl(t0, cpu_pc, 0x2);
37
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
38
+ tcg_temp_free(t0);
26
}
39
}
27
40
41
if (a->rd != 0) {
42
tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
43
}
44
+
45
+ /* No chaining with JALR. */
46
lookup_and_goto_ptr(ctx);
47
48
if (misaligned) {
49
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
50
}
51
ctx->base.is_jmp = DISAS_NORETURN;
52
53
- tcg_temp_free(t0);
54
return true;
55
}
56
57
static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
58
{
59
TCGLabel *l = gen_new_label();
60
- TCGv source1, source2;
61
- source1 = tcg_temp_new();
62
- source2 = tcg_temp_new();
63
- gen_get_gpr(ctx, source1, a->rs1);
64
- gen_get_gpr(ctx, source2, a->rs2);
65
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
66
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
67
68
- tcg_gen_brcond_tl(cond, source1, source2, l);
69
+ tcg_gen_brcond_tl(cond, src1, src2, l);
70
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
71
+
72
gen_set_label(l); /* branch taken */
73
74
if (!has_ext(ctx, RVC) && ((ctx->base.pc_next + a->imm) & 0x3)) {
75
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
76
}
77
ctx->base.is_jmp = DISAS_NORETURN;
78
79
- tcg_temp_free(source1);
80
- tcg_temp_free(source2);
81
-
82
return true;
83
}
84
28
--
85
--
29
2.30.1
86
2.31.1
30
87
31
88
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20210823195529.560295-16-richard.henderson@linaro.org
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/insn_trans/trans_rvi.c.inc | 38 +++++++++++++------------
10
1 file changed, 20 insertions(+), 18 deletions(-)
11
12
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/insn_trans/trans_rvi.c.inc
15
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
16
@@ -XXX,XX +XXX,XX @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
17
18
static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
19
{
20
- TCGv t0 = tcg_temp_new();
21
- TCGv t1 = tcg_temp_new();
22
- gen_get_gpr(ctx, t0, a->rs1);
23
- tcg_gen_addi_tl(t0, t0, a->imm);
24
-
25
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, memop);
26
- gen_set_gpr(ctx, a->rd, t1);
27
- tcg_temp_free(t0);
28
- tcg_temp_free(t1);
29
+ TCGv dest = dest_gpr(ctx, a->rd);
30
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
31
+
32
+ if (a->imm) {
33
+ TCGv temp = temp_new(ctx);
34
+ tcg_gen_addi_tl(temp, addr, a->imm);
35
+ addr = temp;
36
+ }
37
+
38
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
39
+ gen_set_gpr(ctx, a->rd, dest);
40
return true;
41
}
42
43
@@ -XXX,XX +XXX,XX @@ static bool trans_lhu(DisasContext *ctx, arg_lhu *a)
44
45
static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
46
{
47
- TCGv t0 = tcg_temp_new();
48
- TCGv dat = tcg_temp_new();
49
- gen_get_gpr(ctx, t0, a->rs1);
50
- tcg_gen_addi_tl(t0, t0, a->imm);
51
- gen_get_gpr(ctx, dat, a->rs2);
52
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
53
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
54
55
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx, memop);
56
- tcg_temp_free(t0);
57
- tcg_temp_free(dat);
58
+ if (a->imm) {
59
+ TCGv temp = temp_new(ctx);
60
+ tcg_gen_addi_tl(temp, addr, a->imm);
61
+ addr = temp;
62
+ }
63
+
64
+ tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
65
return true;
66
}
67
68
-
69
static bool trans_sb(DisasContext *ctx, arg_sb *a)
70
{
71
return gen_store(ctx, a, MO_SB);
72
--
73
2.31.1
74
75
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
3
We distinguish write-only by passing ret_value as NULL.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-id: 20210311094902.1377593-1-georg.kotheimer@kernkonzept.com
8
Message-id: 20210823195529.560295-17-richard.henderson@linaro.org
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
10
---
8
target/riscv/csr.c | 7 ++++---
11
target/riscv/csr.c | 23 +++++++++++++++--------
9
1 file changed, 4 insertions(+), 3 deletions(-)
12
1 file changed, 15 insertions(+), 8 deletions(-)
10
13
11
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
14
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/csr.c
16
--- a/target/riscv/csr.c
14
+++ b/target/riscv/csr.c
17
+++ b/target/riscv/csr.c
15
@@ -XXX,XX +XXX,XX @@ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
18
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
16
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
19
/* Shift the S bits to their VS bit location in mip */
17
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD;
20
int ret = rmw_mip(env, 0, ret_value, new_value << 1,
18
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
21
(write_mask << 1) & vsip_writable_mask & env->hideleg);
19
-static const target_ulong hip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
22
- *ret_value &= VS_MODE_INTERRUPTS;
20
+static const target_ulong hip_writable_mask = MIP_VSSIP;
23
- /* Shift the VS bits to their S bit location in vsip */
21
+static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
24
- *ret_value >>= 1;
22
static const target_ulong vsip_writable_mask = MIP_VSSIP;
25
+
23
26
+ if (ret_value) {
24
static const char valid_vm_1_10_32[16] = {
27
+ *ret_value &= VS_MODE_INTERRUPTS;
25
@@ -XXX,XX +XXX,XX @@ static int rmw_hvip(CPURISCVState *env, int csrno, target_ulong *ret_value,
28
+ /* Shift the VS bits to their S bit location in vsip */
26
target_ulong new_value, target_ulong write_mask)
29
+ *ret_value >>= 1;
27
{
30
+ }
28
int ret = rmw_mip(env, 0, ret_value, new_value,
29
- write_mask & hip_writable_mask);
30
+ write_mask & hvip_writable_mask);
31
32
- *ret_value &= hip_writable_mask;
33
+ *ret_value &= hvip_writable_mask;
34
35
return ret;
31
return ret;
36
}
32
}
33
34
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_sip(CPURISCVState *env, int csrno,
35
write_mask & env->mideleg & sip_writable_mask);
36
}
37
38
- *ret_value &= env->mideleg;
39
+ if (ret_value) {
40
+ *ret_value &= env->mideleg;
41
+ }
42
return ret;
43
}
44
45
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
46
int ret = rmw_mip(env, 0, ret_value, new_value,
47
write_mask & hvip_writable_mask);
48
49
- *ret_value &= hvip_writable_mask;
50
-
51
+ if (ret_value) {
52
+ *ret_value &= hvip_writable_mask;
53
+ }
54
return ret;
55
}
56
57
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_hip(CPURISCVState *env, int csrno,
58
int ret = rmw_mip(env, 0, ret_value, new_value,
59
write_mask & hip_writable_mask);
60
61
- *ret_value &= hip_writable_mask;
62
-
63
+ if (ret_value) {
64
+ *ret_value &= hip_writable_mask;
65
+ }
66
return ret;
67
}
68
37
--
69
--
38
2.30.1
70
2.31.1
39
71
40
72
diff view generated by jsdifflib
1
From: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The previous implementation was broken in many ways:
3
We failed to write into *val for these read functions;
4
- Used mideleg instead of hideleg to mask accesses
4
replace them with read_zero. Only warn about unsupported
5
- Used MIP_VSSIP instead of VS_MODE_INTERRUPTS to mask writes to vsie
5
non-zero value when writing a non-zero value.
6
- Did not shift between S bits and VS bits (VSEIP <-> SEIP, ...)
7
6
8
Signed-off-by: Georg Kotheimer <georg.kotheimer@kernkonzept.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20210311094738.1376795-1-georg.kotheimer@kernkonzept.com
10
Message-id: 20210823195529.560295-18-richard.henderson@linaro.org
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
12
---
13
target/riscv/csr.c | 68 +++++++++++++++++++++++-----------------------
13
target/riscv/csr.c | 26 ++++++++------------------
14
1 file changed, 34 insertions(+), 34 deletions(-)
14
1 file changed, 8 insertions(+), 18 deletions(-)
15
15
16
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
16
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/csr.c
18
--- a/target/riscv/csr.c
19
+++ b/target/riscv/csr.c
19
+++ b/target/riscv/csr.c
20
@@ -XXX,XX +XXX,XX @@ static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val)
20
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
21
return write_mstatus(env, CSR_MSTATUS, newval);
21
return RISCV_EXCP_NONE;
22
}
22
}
23
23
24
+static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val)
24
-static RISCVException read_hgeie(CPURISCVState *env, int csrno,
25
+{
25
- target_ulong *val)
26
+ /* Shift the VS bits to their S bit location in vsie */
27
+ *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1;
28
+ return 0;
29
+}
30
+
31
static int read_sie(CPURISCVState *env, int csrno, target_ulong *val)
32
{
33
if (riscv_cpu_virt_enabled(env)) {
34
- /* Tell the guest the VS bits, shifted to the S bit locations */
35
- *val = (env->mie & env->mideleg & VS_MODE_INTERRUPTS) >> 1;
36
+ read_vsie(env, CSR_VSIE, val);
37
} else {
38
*val = env->mie & env->mideleg;
39
}
40
return 0;
41
}
42
43
-static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
44
+static int write_vsie(CPURISCVState *env, int csrno, target_ulong val)
45
{
46
- target_ulong newval;
47
+ /* Shift the S bits to their VS bit location in mie */
48
+ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) |
49
+ ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS);
50
+ return write_mie(env, CSR_MIE, newval);
51
+}
52
53
+static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
54
+{
55
if (riscv_cpu_virt_enabled(env)) {
56
- /* Shift the guests S bits to VS */
57
- newval = (env->mie & ~VS_MODE_INTERRUPTS) |
58
- ((val << 1) & VS_MODE_INTERRUPTS);
59
+ write_vsie(env, CSR_VSIE, val);
60
} else {
61
- newval = (env->mie & ~S_MODE_INTERRUPTS) | (val & S_MODE_INTERRUPTS);
62
+ target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) |
63
+ (val & S_MODE_INTERRUPTS);
64
+ write_mie(env, CSR_MIE, newval);
65
}
66
67
- return write_mie(env, CSR_MIE, newval);
68
+ return 0;
69
}
70
71
static int read_stvec(CPURISCVState *env, int csrno, target_ulong *val)
72
@@ -XXX,XX +XXX,XX @@ static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val)
73
return 0;
74
}
75
76
+static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value,
77
+ target_ulong new_value, target_ulong write_mask)
78
+{
79
+ /* Shift the S bits to their VS bit location in mip */
80
+ int ret = rmw_mip(env, 0, ret_value, new_value << 1,
81
+ (write_mask << 1) & vsip_writable_mask & env->hideleg);
82
+ *ret_value &= VS_MODE_INTERRUPTS;
83
+ /* Shift the VS bits to their S bit location in vsip */
84
+ *ret_value >>= 1;
85
+ return ret;
86
+}
87
+
88
static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value,
89
target_ulong new_value, target_ulong write_mask)
90
{
91
int ret;
92
93
if (riscv_cpu_virt_enabled(env)) {
94
- /* Shift the new values to line up with the VS bits */
95
- ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value << 1,
96
- (write_mask & sip_writable_mask) << 1 & env->mideleg);
97
- ret &= vsip_writable_mask;
98
- ret >>= 1;
99
+ ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask);
100
} else {
101
ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value,
102
write_mask & env->mideleg & sip_writable_mask);
103
@@ -XXX,XX +XXX,XX @@ static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val)
104
return 0;
105
}
106
107
-static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value,
108
- target_ulong new_value, target_ulong write_mask)
109
-{
26
-{
110
- int ret = rmw_mip(env, 0, ret_value, new_value,
27
- qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
111
- write_mask & env->mideleg & vsip_writable_mask);
28
- return RISCV_EXCP_NONE;
112
- return ret;
113
-}
29
-}
114
-
30
-
115
-static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val)
31
static RISCVException write_hgeie(CPURISCVState *env, int csrno,
32
target_ulong val)
33
{
34
- qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
35
+ if (val) {
36
+ qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
37
+ }
38
return RISCV_EXCP_NONE;
39
}
40
41
@@ -XXX,XX +XXX,XX @@ static RISCVException write_htinst(CPURISCVState *env, int csrno,
42
return RISCV_EXCP_NONE;
43
}
44
45
-static RISCVException read_hgeip(CPURISCVState *env, int csrno,
46
- target_ulong *val)
116
-{
47
-{
117
- *val = env->mie & env->mideleg & VS_MODE_INTERRUPTS;
48
- qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
118
- return 0;
49
- return RISCV_EXCP_NONE;
119
-}
50
-}
120
-
51
-
121
-static int write_vsie(CPURISCVState *env, int csrno, target_ulong val)
52
static RISCVException write_hgeip(CPURISCVState *env, int csrno,
122
-{
53
target_ulong val)
123
- target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg & MIP_VSSIP);
124
- return write_mie(env, CSR_MIE, newval);
125
-}
126
-
127
static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
128
{
54
{
129
*val = env->vstvec;
55
- qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
56
+ if (val) {
57
+ qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN.");
58
+ }
59
return RISCV_EXCP_NONE;
60
}
61
62
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
63
[CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip },
64
[CSR_HIE] = { "hie", hmode, read_hie, write_hie },
65
[CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren },
66
- [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie },
67
+ [CSR_HGEIE] = { "hgeie", hmode, read_zero, write_hgeie },
68
[CSR_HTVAL] = { "htval", hmode, read_htval, write_htval },
69
[CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst },
70
- [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, write_hgeip },
71
+ [CSR_HGEIP] = { "hgeip", hmode, read_zero, write_hgeip },
72
[CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp },
73
[CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta },
74
[CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah },
130
--
75
--
131
2.30.1
76
2.31.1
132
77
133
78
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Introduce csrr and csrw helpers, for read-only and write-only insns.
4
5
Note that we do not properly implement this in riscv_csrrw, in that
6
we cannot distinguish true read-only (rs1 == 0) from any other zero
7
write_mask another source register -- this should still raise an
8
exception for read-only registers.
9
10
Only issue gen_io_start for CF_USE_ICOUNT.
11
Use ctx->zero for csrrc.
12
Use get_gpr and dest_gpr.
13
14
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Message-id: 20210823195529.560295-19-richard.henderson@linaro.org
18
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
---
20
target/riscv/helper.h | 6 +-
21
target/riscv/op_helper.c | 18 +--
22
target/riscv/insn_trans/trans_rvi.c.inc | 174 +++++++++++++++++-------
23
3 files changed, 132 insertions(+), 66 deletions(-)
24
25
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/helper.h
28
+++ b/target/riscv/helper.h
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(gorc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
30
DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
31
32
/* Special functions */
33
-DEF_HELPER_3(csrrw, tl, env, tl, tl)
34
-DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
35
-DEF_HELPER_4(csrrc, tl, env, tl, tl, tl)
36
+DEF_HELPER_2(csrr, tl, env, int)
37
+DEF_HELPER_3(csrw, void, env, int, tl)
38
+DEF_HELPER_4(csrrw, tl, env, int, tl, tl)
39
#ifndef CONFIG_USER_ONLY
40
DEF_HELPER_2(sret, tl, env, tl)
41
DEF_HELPER_2(mret, tl, env, tl)
42
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/op_helper.c
45
+++ b/target/riscv/op_helper.c
46
@@ -XXX,XX +XXX,XX @@ void helper_raise_exception(CPURISCVState *env, uint32_t exception)
47
riscv_raise_exception(env, exception, 0);
48
}
49
50
-target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
51
- target_ulong csr)
52
+target_ulong helper_csrr(CPURISCVState *env, int csr)
53
{
54
target_ulong val = 0;
55
- RISCVException ret = riscv_csrrw(env, csr, &val, src, -1);
56
+ RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0);
57
58
if (ret != RISCV_EXCP_NONE) {
59
riscv_raise_exception(env, ret, GETPC());
60
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
61
return val;
62
}
63
64
-target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
65
- target_ulong csr, target_ulong rs1_pass)
66
+void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
67
{
68
- target_ulong val = 0;
69
- RISCVException ret = riscv_csrrw(env, csr, &val, -1, rs1_pass ? src : 0);
70
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, -1);
71
72
if (ret != RISCV_EXCP_NONE) {
73
riscv_raise_exception(env, ret, GETPC());
74
}
75
- return val;
76
}
77
78
-target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
79
- target_ulong csr, target_ulong rs1_pass)
80
+target_ulong helper_csrrw(CPURISCVState *env, int csr,
81
+ target_ulong src, target_ulong write_mask)
82
{
83
target_ulong val = 0;
84
- RISCVException ret = riscv_csrrw(env, csr, &val, 0, rs1_pass ? src : 0);
85
+ RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
86
87
if (ret != RISCV_EXCP_NONE) {
88
riscv_raise_exception(env, ret, GETPC());
89
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/riscv/insn_trans/trans_rvi.c.inc
92
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
93
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
94
return true;
95
}
96
97
-#define RISCV_OP_CSR_PRE do {\
98
- source1 = tcg_temp_new(); \
99
- csr_store = tcg_temp_new(); \
100
- dest = tcg_temp_new(); \
101
- rs1_pass = tcg_temp_new(); \
102
- gen_get_gpr(ctx, source1, a->rs1); \
103
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); \
104
- tcg_gen_movi_tl(rs1_pass, a->rs1); \
105
- tcg_gen_movi_tl(csr_store, a->csr); \
106
- gen_io_start();\
107
-} while (0)
108
-
109
-#define RISCV_OP_CSR_POST do {\
110
- gen_set_gpr(ctx, a->rd, dest); \
111
- tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); \
112
- exit_tb(ctx); \
113
- ctx->base.is_jmp = DISAS_NORETURN; \
114
- tcg_temp_free(source1); \
115
- tcg_temp_free(csr_store); \
116
- tcg_temp_free(dest); \
117
- tcg_temp_free(rs1_pass); \
118
-} while (0)
119
+static bool do_csr_post(DisasContext *ctx)
120
+{
121
+ /* We may have changed important cpu state -- exit to main loop. */
122
+ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
123
+ exit_tb(ctx);
124
+ ctx->base.is_jmp = DISAS_NORETURN;
125
+ return true;
126
+}
127
+
128
+static bool do_csrr(DisasContext *ctx, int rd, int rc)
129
+{
130
+ TCGv dest = dest_gpr(ctx, rd);
131
+ TCGv_i32 csr = tcg_constant_i32(rc);
132
+
133
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
134
+ gen_io_start();
135
+ }
136
+ gen_helper_csrr(dest, cpu_env, csr);
137
+ gen_set_gpr(ctx, rd, dest);
138
+ return do_csr_post(ctx);
139
+}
140
+
141
+static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
142
+{
143
+ TCGv_i32 csr = tcg_constant_i32(rc);
144
145
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
146
+ gen_io_start();
147
+ }
148
+ gen_helper_csrw(cpu_env, csr, src);
149
+ return do_csr_post(ctx);
150
+}
151
+
152
+static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
153
+{
154
+ TCGv dest = dest_gpr(ctx, rd);
155
+ TCGv_i32 csr = tcg_constant_i32(rc);
156
+
157
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
158
+ gen_io_start();
159
+ }
160
+ gen_helper_csrrw(dest, cpu_env, csr, src, mask);
161
+ gen_set_gpr(ctx, rd, dest);
162
+ return do_csr_post(ctx);
163
+}
164
165
static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
166
{
167
- TCGv source1, csr_store, dest, rs1_pass;
168
- RISCV_OP_CSR_PRE;
169
- gen_helper_csrrw(dest, cpu_env, source1, csr_store);
170
- RISCV_OP_CSR_POST;
171
- return true;
172
+ TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
173
+
174
+ /*
175
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
176
+ * side effects that might occur on a csr read.
177
+ */
178
+ if (a->rd == 0) {
179
+ return do_csrw(ctx, a->csr, src);
180
+ }
181
+
182
+ TCGv mask = tcg_constant_tl(-1);
183
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
184
}
185
186
static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
187
{
188
- TCGv source1, csr_store, dest, rs1_pass;
189
- RISCV_OP_CSR_PRE;
190
- gen_helper_csrrs(dest, cpu_env, source1, csr_store, rs1_pass);
191
- RISCV_OP_CSR_POST;
192
- return true;
193
+ /*
194
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
195
+ * cause any of the side effects that might occur on a csr write.
196
+ * Note that if rs1 specifies a register other than x0, holding
197
+ * a zero value, the instruction will still attempt to write the
198
+ * unmodified value back to the csr and will cause side effects.
199
+ */
200
+ if (a->rs1 == 0) {
201
+ return do_csrr(ctx, a->rd, a->csr);
202
+ }
203
+
204
+ TCGv ones = tcg_constant_tl(-1);
205
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
206
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
207
}
208
209
static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
210
{
211
- TCGv source1, csr_store, dest, rs1_pass;
212
- RISCV_OP_CSR_PRE;
213
- gen_helper_csrrc(dest, cpu_env, source1, csr_store, rs1_pass);
214
- RISCV_OP_CSR_POST;
215
- return true;
216
+ /*
217
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
218
+ * cause any of the side effects that might occur on a csr write.
219
+ * Note that if rs1 specifies a register other than x0, holding
220
+ * a zero value, the instruction will still attempt to write the
221
+ * unmodified value back to the csr and will cause side effects.
222
+ */
223
+ if (a->rs1 == 0) {
224
+ return do_csrr(ctx, a->rd, a->csr);
225
+ }
226
+
227
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
228
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
229
}
230
231
static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
232
{
233
- TCGv source1, csr_store, dest, rs1_pass;
234
- RISCV_OP_CSR_PRE;
235
- gen_helper_csrrw(dest, cpu_env, rs1_pass, csr_store);
236
- RISCV_OP_CSR_POST;
237
- return true;
238
+ TCGv src = tcg_constant_tl(a->rs1);
239
+
240
+ /*
241
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
242
+ * side effects that might occur on a csr read.
243
+ */
244
+ if (a->rd == 0) {
245
+ return do_csrw(ctx, a->csr, src);
246
+ }
247
+
248
+ TCGv mask = tcg_constant_tl(-1);
249
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
250
}
251
252
static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
253
{
254
- TCGv source1, csr_store, dest, rs1_pass;
255
- RISCV_OP_CSR_PRE;
256
- gen_helper_csrrs(dest, cpu_env, rs1_pass, csr_store, rs1_pass);
257
- RISCV_OP_CSR_POST;
258
- return true;
259
+ /*
260
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
261
+ * cause any of the side effects that might occur on a csr write.
262
+ * Note that if rs1 specifies a register other than x0, holding
263
+ * a zero value, the instruction will still attempt to write the
264
+ * unmodified value back to the csr and will cause side effects.
265
+ */
266
+ if (a->rs1 == 0) {
267
+ return do_csrr(ctx, a->rd, a->csr);
268
+ }
269
+
270
+ TCGv ones = tcg_constant_tl(-1);
271
+ TCGv mask = tcg_constant_tl(a->rs1);
272
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
273
}
274
275
static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
276
{
277
- TCGv source1, csr_store, dest, rs1_pass;
278
- RISCV_OP_CSR_PRE;
279
- gen_helper_csrrc(dest, cpu_env, rs1_pass, csr_store, rs1_pass);
280
- RISCV_OP_CSR_POST;
281
- return true;
282
+ /*
283
+ * If rs1 == 0, the insn shall not write to the csr at all, nor
284
+ * cause any of the side effects that might occur on a csr write.
285
+ * Note that if rs1 specifies a register other than x0, holding
286
+ * a zero value, the instruction will still attempt to write the
287
+ * unmodified value back to the csr and will cause side effects.
288
+ */
289
+ if (a->rs1 == 0) {
290
+ return do_csrr(ctx, a->rd, a->csr);
291
+ }
292
+
293
+ TCGv mask = tcg_constant_tl(a->rs1);
294
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
295
}
296
--
297
2.31.1
298
299
diff view generated by jsdifflib
1
From: Jim Shu <cwshu@andestech.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
If PMP permission of any address has been changed by updating PMP entry,
3
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
4
flush all TLB pages to prevent from getting old permission.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
6
Signed-off-by: Jim Shu <cwshu@andestech.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 1613916082-19528-4-git-send-email-cwshu@andestech.com
6
Message-id: 20210823195529.560295-20-richard.henderson@linaro.org
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
8
---
11
target/riscv/pmp.c | 4 ++++
9
target/riscv/insn_trans/trans_rva.c.inc | 47 ++++++++++---------------
12
1 file changed, 4 insertions(+)
10
1 file changed, 19 insertions(+), 28 deletions(-)
13
11
14
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
12
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/pmp.c
14
--- a/target/riscv/insn_trans/trans_rva.c.inc
17
+++ b/target/riscv/pmp.c
15
+++ b/target/riscv/insn_trans/trans_rva.c.inc
18
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@
19
#include "qapi/error.h"
17
* this program. If not, see <http://www.gnu.org/licenses/>.
20
#include "cpu.h"
18
*/
21
#include "trace.h"
19
22
+#include "exec/exec-all.h"
20
-static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
23
21
+static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
24
static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
22
{
25
uint8_t val);
23
- TCGv src1 = tcg_temp_new();
26
@@ -XXX,XX +XXX,XX @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
24
- /* Put addr in load_res, data in load_val. */
27
cfg_val = (val >> 8 * i) & 0xff;
25
- gen_get_gpr(ctx, src1, a->rs1);
28
pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
26
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
27
+
28
if (a->rl) {
29
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
30
}
31
@@ -XXX,XX +XXX,XX @@ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
32
if (a->aq) {
33
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
29
}
34
}
30
+
35
+
31
+ /* If PMP permission of any addr has been changed, flush TLB pages. */
36
+ /* Put addr in load_res, data in load_val. */
32
+ tlb_flush(env_cpu(env));
37
tcg_gen_mov_tl(load_res, src1);
38
gen_set_gpr(ctx, a->rd, load_val);
39
40
- tcg_temp_free(src1);
41
return true;
33
}
42
}
34
43
44
-static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
45
+static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
46
{
47
- TCGv src1 = tcg_temp_new();
48
- TCGv src2 = tcg_temp_new();
49
- TCGv dat = tcg_temp_new();
50
+ TCGv dest, src1, src2;
51
TCGLabel *l1 = gen_new_label();
52
TCGLabel *l2 = gen_new_label();
53
54
- gen_get_gpr(ctx, src1, a->rs1);
55
+ src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
56
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
57
58
- gen_get_gpr(ctx, src2, a->rs2);
59
/*
60
* Note that the TCG atomic primitives are SC,
61
* so we can ignore AQ/RL along this path.
62
*/
63
- tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2,
64
+ dest = dest_gpr(ctx, a->rd);
65
+ src2 = get_gpr(ctx, a->rs2, EXT_NONE);
66
+ tcg_gen_atomic_cmpxchg_tl(dest, load_res, load_val, src2,
67
ctx->mem_idx, mop);
68
- tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val);
69
- gen_set_gpr(ctx, a->rd, dat);
70
+ tcg_gen_setcond_tl(TCG_COND_NE, dest, dest, load_val);
71
+ gen_set_gpr(ctx, a->rd, dest);
72
tcg_gen_br(l2);
73
74
gen_set_label(l1);
75
@@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
76
* provide the memory barrier implied by AQ/RL.
77
*/
78
tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL);
79
- tcg_gen_movi_tl(dat, 1);
80
- gen_set_gpr(ctx, a->rd, dat);
81
+ gen_set_gpr(ctx, a->rd, tcg_constant_tl(1));
82
83
gen_set_label(l2);
84
/*
85
@@ -XXX,XX +XXX,XX @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
86
*/
87
tcg_gen_movi_tl(load_res, -1);
88
89
- tcg_temp_free(dat);
90
- tcg_temp_free(src1);
91
- tcg_temp_free(src2);
92
return true;
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
96
void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp),
97
MemOp mop)
98
{
99
- TCGv src1 = tcg_temp_new();
100
- TCGv src2 = tcg_temp_new();
101
-
102
- gen_get_gpr(ctx, src1, a->rs1);
103
- gen_get_gpr(ctx, src2, a->rs2);
104
+ TCGv dest = dest_gpr(ctx, a->rd);
105
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
106
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
107
108
- (*func)(src2, src1, src2, ctx->mem_idx, mop);
109
+ func(dest, src1, src2, ctx->mem_idx, mop);
110
111
- gen_set_gpr(ctx, a->rd, src2);
112
- tcg_temp_free(src1);
113
- tcg_temp_free(src2);
114
+ gen_set_gpr(ctx, a->rd, dest);
115
return true;
116
}
35
117
36
--
118
--
37
2.30.1
119
2.31.1
38
120
39
121
diff view generated by jsdifflib
1
From: Bin Meng <bin.meng@windriver.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Since HSS commit c20a89f8dcac, the Icicle Kit reference design has
3
Always use tcg_gen_deposit_z_tl; the special case for
4
been updated to use a register mapped at 0x4f000000 instead of a
4
shamt >= 32 is handled there.
5
GPIO to control whether eMMC or SD card is to be used. With this
6
support the same HSS image can be used for both eMMC and SD card
7
boot flow, while previously two different board configurations were
8
used. This is undocumented but one can take a look at the HSS code
9
HSS_MMCInit() in services/mmc/mmc_api.c.
10
5
11
With this commit, HSS image built from 2020.12 release boots again.
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
13
Signed-off-by: Bin Meng <bin.meng@windriver.com>
14
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
15
Message-id: 20210322075248.136255-1-bmeng.cn@gmail.com
8
Message-id: 20210823195529.560295-21-richard.henderson@linaro.org
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
10
---
18
include/hw/riscv/microchip_pfsoc.h | 1 +
11
target/riscv/insn_trans/trans_rvb.c.inc | 19 ++++++-------------
19
hw/riscv/microchip_pfsoc.c | 6 ++++++
12
1 file changed, 6 insertions(+), 13 deletions(-)
20
2 files changed, 7 insertions(+)
21
13
22
diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h
14
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
23
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/riscv/microchip_pfsoc.h
16
--- a/target/riscv/insn_trans/trans_rvb.c.inc
25
+++ b/include/hw/riscv/microchip_pfsoc.h
17
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
26
@@ -XXX,XX +XXX,XX @@ enum {
18
@@ -XXX,XX +XXX,XX @@ static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a)
27
MICROCHIP_PFSOC_ENVM_DATA,
19
return gen_arith(ctx, a, EXT_NONE, gen_add_uw);
28
MICROCHIP_PFSOC_QSPI_XIP,
20
}
29
MICROCHIP_PFSOC_IOSCB,
21
30
+ MICROCHIP_PFSOC_EMMC_SD_MUX,
22
+static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt)
31
MICROCHIP_PFSOC_DRAM_LO,
23
+{
32
MICROCHIP_PFSOC_DRAM_LO_ALIAS,
24
+ tcg_gen_deposit_z_tl(dest, src, shamt, MIN(32, TARGET_LONG_BITS - shamt));
33
MICROCHIP_PFSOC_DRAM_HI,
25
+}
34
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/hw/riscv/microchip_pfsoc.c
37
+++ b/hw/riscv/microchip_pfsoc.c
38
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry microchip_pfsoc_memmap[] = {
39
[MICROCHIP_PFSOC_ENVM_DATA] = { 0x20220000, 0x20000 },
40
[MICROCHIP_PFSOC_QSPI_XIP] = { 0x21000000, 0x1000000 },
41
[MICROCHIP_PFSOC_IOSCB] = { 0x30000000, 0x10000000 },
42
+ [MICROCHIP_PFSOC_EMMC_SD_MUX] = { 0x4f000000, 0x4 },
43
[MICROCHIP_PFSOC_DRAM_LO] = { 0x80000000, 0x40000000 },
44
[MICROCHIP_PFSOC_DRAM_LO_ALIAS] = { 0xc0000000, 0x40000000 },
45
[MICROCHIP_PFSOC_DRAM_HI] = { 0x1000000000, 0x0 },
46
@@ -XXX,XX +XXX,XX @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp)
47
sysbus_mmio_map(SYS_BUS_DEVICE(&s->ioscb), 0,
48
memmap[MICROCHIP_PFSOC_IOSCB].base);
49
50
+ /* eMMC/SD mux */
51
+ create_unimplemented_device("microchip.pfsoc.emmc_sd_mux",
52
+ memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].base,
53
+ memmap[MICROCHIP_PFSOC_EMMC_SD_MUX].size);
54
+
26
+
55
/* QSPI Flash */
27
static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a)
56
memory_region_init_rom(qspi_xip_mem, OBJECT(dev),
28
{
57
"microchip.pfsoc.qspi_xip",
29
REQUIRE_64BIT(ctx);
30
REQUIRE_EXT(ctx, RVB);
31
-
32
- TCGv source1 = tcg_temp_new();
33
- gen_get_gpr(ctx, source1, a->rs1);
34
-
35
- if (a->shamt < 32) {
36
- tcg_gen_deposit_z_tl(source1, source1, a->shamt, 32);
37
- } else {
38
- tcg_gen_shli_tl(source1, source1, a->shamt);
39
- }
40
-
41
- gen_set_gpr(ctx, a->rd, source1);
42
- tcg_temp_free(source1);
43
- return true;
44
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw);
45
}
58
--
46
--
59
2.30.1
47
2.31.1
60
48
61
49
diff view generated by jsdifflib
1
From: Bin Meng <bin.meng@windriver.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Per SST25VF016B datasheet [1], SST flash requires a dummy byte after
3
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
4
the address bytes. Note only SPI mode is supported by SST flashes.
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
[1] http://ww1.microchip.com/downloads/en/devicedoc/s71271_04.pdf
6
Message-id: 20210823195529.560295-22-richard.henderson@linaro.org
7
8
Signed-off-by: Bin Meng <bin.meng@windriver.com>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20210306060152.7250-1-bmeng.cn@gmail.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
8
---
13
hw/block/m25p80.c | 3 +++
9
target/riscv/insn_trans/trans_rvf.c.inc | 146 ++++++++++++------------
14
1 file changed, 3 insertions(+)
10
1 file changed, 70 insertions(+), 76 deletions(-)
15
11
16
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
12
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/block/m25p80.c
14
--- a/target/riscv/insn_trans/trans_rvf.c.inc
19
+++ b/hw/block/m25p80.c
15
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
20
@@ -XXX,XX +XXX,XX @@ static void decode_fast_read_cmd(Flash *s)
16
@@ -XXX,XX +XXX,XX @@
21
s->needed_bytes = get_addr_length(s);
17
22
switch (get_man(s)) {
18
static bool trans_flw(DisasContext *ctx, arg_flw *a)
23
/* Dummy cycles - modeled with bytes writes instead of bits */
19
{
24
+ case MAN_SST:
20
+ TCGv_i64 dest;
25
+ s->needed_bytes += 1;
21
+ TCGv addr;
26
+ break;
22
+
27
case MAN_WINBOND:
23
REQUIRE_FPU;
28
s->needed_bytes += 8;
24
REQUIRE_EXT(ctx, RVF);
29
break;
25
- TCGv t0 = tcg_temp_new();
26
- gen_get_gpr(ctx, t0, a->rs1);
27
- tcg_gen_addi_tl(t0, t0, a->imm);
28
29
- tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL);
30
- gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
31
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
32
+ if (a->imm) {
33
+ TCGv temp = temp_new(ctx);
34
+ tcg_gen_addi_tl(temp, addr, a->imm);
35
+ addr = temp;
36
+ }
37
+
38
+ dest = cpu_fpr[a->rd];
39
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
40
+ gen_nanbox_s(dest, dest);
41
42
- tcg_temp_free(t0);
43
mark_fs_dirty(ctx);
44
return true;
45
}
46
47
static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
48
{
49
+ TCGv addr;
50
+
51
REQUIRE_FPU;
52
REQUIRE_EXT(ctx, RVF);
53
- TCGv t0 = tcg_temp_new();
54
- gen_get_gpr(ctx, t0, a->rs1);
55
56
- tcg_gen_addi_tl(t0, t0, a->imm);
57
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
58
+ if (a->imm) {
59
+ TCGv temp = tcg_temp_new();
60
+ tcg_gen_addi_tl(temp, addr, a->imm);
61
+ addr = temp;
62
+ }
63
64
- tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUL);
65
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
66
67
- tcg_temp_free(t0);
68
return true;
69
}
70
71
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
72
REQUIRE_FPU;
73
REQUIRE_EXT(ctx, RVF);
74
75
- TCGv t0 = tcg_temp_new();
76
- gen_set_rm(ctx, a->rm);
77
- gen_helper_fcvt_w_s(t0, cpu_env, cpu_fpr[a->rs1]);
78
- gen_set_gpr(ctx, a->rd, t0);
79
- tcg_temp_free(t0);
80
+ TCGv dest = dest_gpr(ctx, a->rd);
81
82
+ gen_set_rm(ctx, a->rm);
83
+ gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]);
84
+ gen_set_gpr(ctx, a->rd, dest);
85
return true;
86
}
87
88
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
89
REQUIRE_FPU;
90
REQUIRE_EXT(ctx, RVF);
91
92
- TCGv t0 = tcg_temp_new();
93
- gen_set_rm(ctx, a->rm);
94
- gen_helper_fcvt_wu_s(t0, cpu_env, cpu_fpr[a->rs1]);
95
- gen_set_gpr(ctx, a->rd, t0);
96
- tcg_temp_free(t0);
97
+ TCGv dest = dest_gpr(ctx, a->rd);
98
99
+ gen_set_rm(ctx, a->rm);
100
+ gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]);
101
+ gen_set_gpr(ctx, a->rd, dest);
102
return true;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
106
REQUIRE_FPU;
107
REQUIRE_EXT(ctx, RVF);
108
109
- TCGv t0 = tcg_temp_new();
110
+ TCGv dest = dest_gpr(ctx, a->rd);
111
112
#if defined(TARGET_RISCV64)
113
- tcg_gen_ext32s_tl(t0, cpu_fpr[a->rs1]);
114
+ tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]);
115
#else
116
- tcg_gen_extrl_i64_i32(t0, cpu_fpr[a->rs1]);
117
+ tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
118
#endif
119
120
- gen_set_gpr(ctx, a->rd, t0);
121
- tcg_temp_free(t0);
122
-
123
+ gen_set_gpr(ctx, a->rd, dest);
124
return true;
125
}
126
127
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
128
{
129
REQUIRE_FPU;
130
REQUIRE_EXT(ctx, RVF);
131
- TCGv t0 = tcg_temp_new();
132
- gen_helper_feq_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
133
- gen_set_gpr(ctx, a->rd, t0);
134
- tcg_temp_free(t0);
135
+
136
+ TCGv dest = dest_gpr(ctx, a->rd);
137
+
138
+ gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
139
+ gen_set_gpr(ctx, a->rd, dest);
140
return true;
141
}
142
143
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
144
{
145
REQUIRE_FPU;
146
REQUIRE_EXT(ctx, RVF);
147
- TCGv t0 = tcg_temp_new();
148
- gen_helper_flt_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
149
- gen_set_gpr(ctx, a->rd, t0);
150
- tcg_temp_free(t0);
151
+
152
+ TCGv dest = dest_gpr(ctx, a->rd);
153
+
154
+ gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
155
+ gen_set_gpr(ctx, a->rd, dest);
156
return true;
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
160
{
161
REQUIRE_FPU;
162
REQUIRE_EXT(ctx, RVF);
163
- TCGv t0 = tcg_temp_new();
164
- gen_helper_fle_s(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
165
- gen_set_gpr(ctx, a->rd, t0);
166
- tcg_temp_free(t0);
167
+
168
+ TCGv dest = dest_gpr(ctx, a->rd);
169
+
170
+ gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
171
+ gen_set_gpr(ctx, a->rd, dest);
172
return true;
173
}
174
175
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
176
REQUIRE_FPU;
177
REQUIRE_EXT(ctx, RVF);
178
179
- TCGv t0 = tcg_temp_new();
180
-
181
- gen_helper_fclass_s(t0, cpu_fpr[a->rs1]);
182
-
183
- gen_set_gpr(ctx, a->rd, t0);
184
- tcg_temp_free(t0);
185
+ TCGv dest = dest_gpr(ctx, a->rd);
186
187
+ gen_helper_fclass_s(dest, cpu_fpr[a->rs1]);
188
+ gen_set_gpr(ctx, a->rd, dest);
189
return true;
190
}
191
192
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
193
REQUIRE_FPU;
194
REQUIRE_EXT(ctx, RVF);
195
196
- TCGv t0 = tcg_temp_new();
197
- gen_get_gpr(ctx, t0, a->rs1);
198
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
199
200
gen_set_rm(ctx, a->rm);
201
- gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, t0);
202
+ gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src);
203
204
mark_fs_dirty(ctx);
205
- tcg_temp_free(t0);
206
-
207
return true;
208
}
209
210
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
211
REQUIRE_FPU;
212
REQUIRE_EXT(ctx, RVF);
213
214
- TCGv t0 = tcg_temp_new();
215
- gen_get_gpr(ctx, t0, a->rs1);
216
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
217
218
gen_set_rm(ctx, a->rm);
219
- gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, t0);
220
+ gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src);
221
222
mark_fs_dirty(ctx);
223
- tcg_temp_free(t0);
224
-
225
return true;
226
}
227
228
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
229
REQUIRE_FPU;
230
REQUIRE_EXT(ctx, RVF);
231
232
- TCGv t0 = tcg_temp_new();
233
- gen_get_gpr(ctx, t0, a->rs1);
234
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
235
236
- tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0);
237
+ tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src);
238
gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
239
240
mark_fs_dirty(ctx);
241
- tcg_temp_free(t0);
242
-
243
return true;
244
}
245
246
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
247
REQUIRE_FPU;
248
REQUIRE_EXT(ctx, RVF);
249
250
- TCGv t0 = tcg_temp_new();
251
+ TCGv dest = dest_gpr(ctx, a->rd);
252
+
253
gen_set_rm(ctx, a->rm);
254
- gen_helper_fcvt_l_s(t0, cpu_env, cpu_fpr[a->rs1]);
255
- gen_set_gpr(ctx, a->rd, t0);
256
- tcg_temp_free(t0);
257
+ gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]);
258
+ gen_set_gpr(ctx, a->rd, dest);
259
return true;
260
}
261
262
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
263
REQUIRE_FPU;
264
REQUIRE_EXT(ctx, RVF);
265
266
- TCGv t0 = tcg_temp_new();
267
+ TCGv dest = dest_gpr(ctx, a->rd);
268
+
269
gen_set_rm(ctx, a->rm);
270
- gen_helper_fcvt_lu_s(t0, cpu_env, cpu_fpr[a->rs1]);
271
- gen_set_gpr(ctx, a->rd, t0);
272
- tcg_temp_free(t0);
273
+ gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]);
274
+ gen_set_gpr(ctx, a->rd, dest);
275
return true;
276
}
277
278
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
279
REQUIRE_FPU;
280
REQUIRE_EXT(ctx, RVF);
281
282
- TCGv t0 = tcg_temp_new();
283
- gen_get_gpr(ctx, t0, a->rs1);
284
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
285
286
gen_set_rm(ctx, a->rm);
287
- gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, t0);
288
+ gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src);
289
290
mark_fs_dirty(ctx);
291
- tcg_temp_free(t0);
292
return true;
293
}
294
295
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
296
REQUIRE_FPU;
297
REQUIRE_EXT(ctx, RVF);
298
299
- TCGv t0 = tcg_temp_new();
300
- gen_get_gpr(ctx, t0, a->rs1);
301
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
302
303
gen_set_rm(ctx, a->rm);
304
- gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, t0);
305
+ gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src);
306
307
mark_fs_dirty(ctx);
308
- tcg_temp_free(t0);
309
return true;
310
}
30
--
311
--
31
2.30.1
312
2.31.1
32
313
33
314
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20210823195529.560295-23-richard.henderson@linaro.org
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/insn_trans/trans_rvd.c.inc | 125 ++++++++++++------------
10
1 file changed, 60 insertions(+), 65 deletions(-)
11
12
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/insn_trans/trans_rvd.c.inc
15
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
16
@@ -XXX,XX +XXX,XX @@
17
18
static bool trans_fld(DisasContext *ctx, arg_fld *a)
19
{
20
+ TCGv addr;
21
+
22
REQUIRE_FPU;
23
REQUIRE_EXT(ctx, RVD);
24
- TCGv t0 = tcg_temp_new();
25
- gen_get_gpr(ctx, t0, a->rs1);
26
- tcg_gen_addi_tl(t0, t0, a->imm);
27
28
- tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ);
29
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
30
+ if (a->imm) {
31
+ TCGv temp = temp_new(ctx);
32
+ tcg_gen_addi_tl(temp, addr, a->imm);
33
+ addr = temp;
34
+ }
35
+
36
+ tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEQ);
37
38
mark_fs_dirty(ctx);
39
- tcg_temp_free(t0);
40
return true;
41
}
42
43
static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
44
{
45
+ TCGv addr;
46
+
47
REQUIRE_FPU;
48
REQUIRE_EXT(ctx, RVD);
49
- TCGv t0 = tcg_temp_new();
50
- gen_get_gpr(ctx, t0, a->rs1);
51
- tcg_gen_addi_tl(t0, t0, a->imm);
52
53
- tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ);
54
+ addr = get_gpr(ctx, a->rs1, EXT_NONE);
55
+ if (a->imm) {
56
+ TCGv temp = temp_new(ctx);
57
+ tcg_gen_addi_tl(temp, addr, a->imm);
58
+ addr = temp;
59
+ }
60
+
61
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEQ);
62
63
- tcg_temp_free(t0);
64
return true;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
68
REQUIRE_FPU;
69
REQUIRE_EXT(ctx, RVD);
70
71
- TCGv t0 = tcg_temp_new();
72
- gen_helper_feq_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
73
- gen_set_gpr(ctx, a->rd, t0);
74
- tcg_temp_free(t0);
75
+ TCGv dest = dest_gpr(ctx, a->rd);
76
77
+ gen_helper_feq_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
78
+ gen_set_gpr(ctx, a->rd, dest);
79
return true;
80
}
81
82
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
83
REQUIRE_FPU;
84
REQUIRE_EXT(ctx, RVD);
85
86
- TCGv t0 = tcg_temp_new();
87
- gen_helper_flt_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
88
- gen_set_gpr(ctx, a->rd, t0);
89
- tcg_temp_free(t0);
90
+ TCGv dest = dest_gpr(ctx, a->rd);
91
92
+ gen_helper_flt_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
93
+ gen_set_gpr(ctx, a->rd, dest);
94
return true;
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
98
REQUIRE_FPU;
99
REQUIRE_EXT(ctx, RVD);
100
101
- TCGv t0 = tcg_temp_new();
102
- gen_helper_fle_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
103
- gen_set_gpr(ctx, a->rd, t0);
104
- tcg_temp_free(t0);
105
+ TCGv dest = dest_gpr(ctx, a->rd);
106
107
+ gen_helper_fle_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
108
+ gen_set_gpr(ctx, a->rd, dest);
109
return true;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
113
REQUIRE_FPU;
114
REQUIRE_EXT(ctx, RVD);
115
116
- TCGv t0 = tcg_temp_new();
117
- gen_helper_fclass_d(t0, cpu_fpr[a->rs1]);
118
- gen_set_gpr(ctx, a->rd, t0);
119
- tcg_temp_free(t0);
120
+ TCGv dest = dest_gpr(ctx, a->rd);
121
+
122
+ gen_helper_fclass_d(dest, cpu_fpr[a->rs1]);
123
+ gen_set_gpr(ctx, a->rd, dest);
124
return true;
125
}
126
127
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
128
REQUIRE_FPU;
129
REQUIRE_EXT(ctx, RVD);
130
131
- TCGv t0 = tcg_temp_new();
132
- gen_set_rm(ctx, a->rm);
133
- gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[a->rs1]);
134
- gen_set_gpr(ctx, a->rd, t0);
135
- tcg_temp_free(t0);
136
+ TCGv dest = dest_gpr(ctx, a->rd);
137
138
+ gen_set_rm(ctx, a->rm);
139
+ gen_helper_fcvt_w_d(dest, cpu_env, cpu_fpr[a->rs1]);
140
+ gen_set_gpr(ctx, a->rd, dest);
141
return true;
142
}
143
144
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
145
REQUIRE_FPU;
146
REQUIRE_EXT(ctx, RVD);
147
148
- TCGv t0 = tcg_temp_new();
149
- gen_set_rm(ctx, a->rm);
150
- gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[a->rs1]);
151
- gen_set_gpr(ctx, a->rd, t0);
152
- tcg_temp_free(t0);
153
+ TCGv dest = dest_gpr(ctx, a->rd);
154
155
+ gen_set_rm(ctx, a->rm);
156
+ gen_helper_fcvt_wu_d(dest, cpu_env, cpu_fpr[a->rs1]);
157
+ gen_set_gpr(ctx, a->rd, dest);
158
return true;
159
}
160
161
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
162
REQUIRE_FPU;
163
REQUIRE_EXT(ctx, RVD);
164
165
- TCGv t0 = tcg_temp_new();
166
- gen_get_gpr(ctx, t0, a->rs1);
167
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
168
169
gen_set_rm(ctx, a->rm);
170
- gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, t0);
171
- tcg_temp_free(t0);
172
+ gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, src);
173
174
mark_fs_dirty(ctx);
175
return true;
176
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
177
REQUIRE_FPU;
178
REQUIRE_EXT(ctx, RVD);
179
180
- TCGv t0 = tcg_temp_new();
181
- gen_get_gpr(ctx, t0, a->rs1);
182
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
183
184
gen_set_rm(ctx, a->rm);
185
- gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, t0);
186
- tcg_temp_free(t0);
187
+ gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, src);
188
189
mark_fs_dirty(ctx);
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
192
REQUIRE_FPU;
193
REQUIRE_EXT(ctx, RVD);
194
195
- TCGv t0 = tcg_temp_new();
196
+ TCGv dest = dest_gpr(ctx, a->rd);
197
+
198
gen_set_rm(ctx, a->rm);
199
- gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[a->rs1]);
200
- gen_set_gpr(ctx, a->rd, t0);
201
- tcg_temp_free(t0);
202
+ gen_helper_fcvt_l_d(dest, cpu_env, cpu_fpr[a->rs1]);
203
+ gen_set_gpr(ctx, a->rd, dest);
204
return true;
205
}
206
207
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
208
REQUIRE_FPU;
209
REQUIRE_EXT(ctx, RVD);
210
211
- TCGv t0 = tcg_temp_new();
212
+ TCGv dest = dest_gpr(ctx, a->rd);
213
+
214
gen_set_rm(ctx, a->rm);
215
- gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[a->rs1]);
216
- gen_set_gpr(ctx, a->rd, t0);
217
- tcg_temp_free(t0);
218
+ gen_helper_fcvt_lu_d(dest, cpu_env, cpu_fpr[a->rs1]);
219
+ gen_set_gpr(ctx, a->rd, dest);
220
return true;
221
}
222
223
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
224
REQUIRE_FPU;
225
REQUIRE_EXT(ctx, RVD);
226
227
- TCGv t0 = tcg_temp_new();
228
- gen_get_gpr(ctx, t0, a->rs1);
229
+ TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
230
231
gen_set_rm(ctx, a->rm);
232
- gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, t0);
233
- tcg_temp_free(t0);
234
+ gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, src);
235
+
236
mark_fs_dirty(ctx);
237
return true;
238
}
239
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
240
REQUIRE_FPU;
241
REQUIRE_EXT(ctx, RVD);
242
243
- TCGv t0 = tcg_temp_new();
244
- gen_get_gpr(ctx, t0, a->rs1);
245
+ TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
246
247
gen_set_rm(ctx, a->rm);
248
- gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, t0);
249
- tcg_temp_free(t0);
250
+ gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, src);
251
+
252
mark_fs_dirty(ctx);
253
return true;
254
}
255
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a)
256
REQUIRE_EXT(ctx, RVD);
257
258
#ifdef TARGET_RISCV64
259
- TCGv t0 = tcg_temp_new();
260
- gen_get_gpr(ctx, t0, a->rs1);
261
-
262
- tcg_gen_mov_tl(cpu_fpr[a->rd], t0);
263
- tcg_temp_free(t0);
264
+ tcg_gen_mov_tl(cpu_fpr[a->rd], get_gpr(ctx, a->rs1, EXT_NONE));
265
mark_fs_dirty(ctx);
266
return true;
267
#else
268
--
269
2.31.1
270
271
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Exit early if check_access fails.
4
Split out do_hlv, do_hsv, do_hlvx subroutines.
5
Use dest_gpr, get_gpr in the new subroutines.
6
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20210823195529.560295-24-richard.henderson@linaro.org
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/insn32.decode | 1 +
14
target/riscv/insn_trans/trans_rvh.c.inc | 266 +++++-------------------
15
2 files changed, 57 insertions(+), 210 deletions(-)
16
17
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/insn32.decode
20
+++ b/target/riscv/insn32.decode
21
@@ -XXX,XX +XXX,XX @@
22
&j imm rd
23
&r rd rs1 rs2
24
&r2 rd rs1
25
+&r2_s rs1 rs2
26
&s imm rs1 rs2
27
&u imm rd
28
&shift shamt rs1 rd
29
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/insn_trans/trans_rvh.c.inc
32
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
33
@@ -XXX,XX +XXX,XX @@
34
*/
35
36
#ifndef CONFIG_USER_ONLY
37
-static void check_access(DisasContext *ctx) {
38
+static bool check_access(DisasContext *ctx)
39
+{
40
if (!ctx->hlsx) {
41
if (ctx->virt_enabled) {
42
generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
43
} else {
44
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
45
}
46
+ return false;
47
}
48
+ return true;
49
}
50
#endif
51
52
-static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
53
+static bool do_hlv(DisasContext *ctx, arg_r2 *a, MemOp mop)
54
{
55
- REQUIRE_EXT(ctx, RVH);
56
-#ifndef CONFIG_USER_ONLY
57
- TCGv t0 = tcg_temp_new();
58
- TCGv t1 = tcg_temp_new();
59
-
60
- check_access(ctx);
61
-
62
- gen_get_gpr(ctx, t0, a->rs1);
63
-
64
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
65
- gen_set_gpr(ctx, a->rd, t1);
66
-
67
- tcg_temp_free(t0);
68
- tcg_temp_free(t1);
69
- return true;
70
-#else
71
+#ifdef CONFIG_USER_ONLY
72
return false;
73
+#else
74
+ if (check_access(ctx)) {
75
+ TCGv dest = dest_gpr(ctx, a->rd);
76
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
77
+ int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
78
+ tcg_gen_qemu_ld_tl(dest, addr, mem_idx, mop);
79
+ gen_set_gpr(ctx, a->rd, dest);
80
+ }
81
+ return true;
82
#endif
83
}
84
85
-static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
86
+static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
87
{
88
REQUIRE_EXT(ctx, RVH);
89
-#ifndef CONFIG_USER_ONLY
90
- TCGv t0 = tcg_temp_new();
91
- TCGv t1 = tcg_temp_new();
92
-
93
- check_access(ctx);
94
-
95
- gen_get_gpr(ctx, t0, a->rs1);
96
-
97
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
98
- gen_set_gpr(ctx, a->rd, t1);
99
+ return do_hlv(ctx, a, MO_SB);
100
+}
101
102
- tcg_temp_free(t0);
103
- tcg_temp_free(t1);
104
- return true;
105
-#else
106
- return false;
107
-#endif
108
+static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
109
+{
110
+ REQUIRE_EXT(ctx, RVH);
111
+ return do_hlv(ctx, a, MO_TESW);
112
}
113
114
static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a)
115
{
116
REQUIRE_EXT(ctx, RVH);
117
-#ifndef CONFIG_USER_ONLY
118
- TCGv t0 = tcg_temp_new();
119
- TCGv t1 = tcg_temp_new();
120
-
121
- check_access(ctx);
122
-
123
- gen_get_gpr(ctx, t0, a->rs1);
124
-
125
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
126
- gen_set_gpr(ctx, a->rd, t1);
127
-
128
- tcg_temp_free(t0);
129
- tcg_temp_free(t1);
130
- return true;
131
-#else
132
- return false;
133
-#endif
134
+ return do_hlv(ctx, a, MO_TESL);
135
}
136
137
static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a)
138
{
139
REQUIRE_EXT(ctx, RVH);
140
-#ifndef CONFIG_USER_ONLY
141
- TCGv t0 = tcg_temp_new();
142
- TCGv t1 = tcg_temp_new();
143
-
144
- check_access(ctx);
145
-
146
- gen_get_gpr(ctx, t0, a->rs1);
147
-
148
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_UB);
149
- gen_set_gpr(ctx, a->rd, t1);
150
-
151
- tcg_temp_free(t0);
152
- tcg_temp_free(t1);
153
- return true;
154
-#else
155
- return false;
156
-#endif
157
+ return do_hlv(ctx, a, MO_UB);
158
}
159
160
static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a)
161
{
162
REQUIRE_EXT(ctx, RVH);
163
-#ifndef CONFIG_USER_ONLY
164
- TCGv t0 = tcg_temp_new();
165
- TCGv t1 = tcg_temp_new();
166
-
167
- check_access(ctx);
168
-
169
- gen_get_gpr(ctx, t0, a->rs1);
170
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUW);
171
- gen_set_gpr(ctx, a->rd, t1);
172
+ return do_hlv(ctx, a, MO_TEUW);
173
+}
174
175
- tcg_temp_free(t0);
176
- tcg_temp_free(t1);
177
- return true;
178
-#else
179
+static bool do_hsv(DisasContext *ctx, arg_r2_s *a, MemOp mop)
180
+{
181
+#ifdef CONFIG_USER_ONLY
182
return false;
183
+#else
184
+ if (check_access(ctx)) {
185
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
186
+ TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
187
+ int mem_idx = ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
188
+ tcg_gen_qemu_st_tl(data, addr, mem_idx, mop);
189
+ }
190
+ return true;
191
#endif
192
}
193
194
static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a)
195
{
196
REQUIRE_EXT(ctx, RVH);
197
-#ifndef CONFIG_USER_ONLY
198
- TCGv t0 = tcg_temp_new();
199
- TCGv dat = tcg_temp_new();
200
-
201
- check_access(ctx);
202
-
203
- gen_get_gpr(ctx, t0, a->rs1);
204
- gen_get_gpr(ctx, dat, a->rs2);
205
-
206
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
207
-
208
- tcg_temp_free(t0);
209
- tcg_temp_free(dat);
210
- return true;
211
-#else
212
- return false;
213
-#endif
214
+ return do_hsv(ctx, a, MO_SB);
215
}
216
217
static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a)
218
{
219
REQUIRE_EXT(ctx, RVH);
220
-#ifndef CONFIG_USER_ONLY
221
- TCGv t0 = tcg_temp_new();
222
- TCGv dat = tcg_temp_new();
223
-
224
- check_access(ctx);
225
-
226
- gen_get_gpr(ctx, t0, a->rs1);
227
- gen_get_gpr(ctx, dat, a->rs2);
228
-
229
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
230
-
231
- tcg_temp_free(t0);
232
- tcg_temp_free(dat);
233
- return true;
234
-#else
235
- return false;
236
-#endif
237
+ return do_hsv(ctx, a, MO_TESW);
238
}
239
240
static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a)
241
{
242
REQUIRE_EXT(ctx, RVH);
243
-#ifndef CONFIG_USER_ONLY
244
- TCGv t0 = tcg_temp_new();
245
- TCGv dat = tcg_temp_new();
246
-
247
- check_access(ctx);
248
-
249
- gen_get_gpr(ctx, t0, a->rs1);
250
- gen_get_gpr(ctx, dat, a->rs2);
251
-
252
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
253
-
254
- tcg_temp_free(t0);
255
- tcg_temp_free(dat);
256
- return true;
257
-#else
258
- return false;
259
-#endif
260
+ return do_hsv(ctx, a, MO_TESL);
261
}
262
263
static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a)
264
{
265
REQUIRE_64BIT(ctx);
266
REQUIRE_EXT(ctx, RVH);
267
-
268
-#ifndef CONFIG_USER_ONLY
269
- TCGv t0 = tcg_temp_new();
270
- TCGv t1 = tcg_temp_new();
271
-
272
- check_access(ctx);
273
-
274
- gen_get_gpr(ctx, t0, a->rs1);
275
-
276
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUL);
277
- gen_set_gpr(ctx, a->rd, t1);
278
-
279
- tcg_temp_free(t0);
280
- tcg_temp_free(t1);
281
- return true;
282
-#else
283
- return false;
284
-#endif
285
+ return do_hlv(ctx, a, MO_TEUL);
286
}
287
288
static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
289
{
290
REQUIRE_64BIT(ctx);
291
REQUIRE_EXT(ctx, RVH);
292
-
293
-#ifndef CONFIG_USER_ONLY
294
- TCGv t0 = tcg_temp_new();
295
- TCGv t1 = tcg_temp_new();
296
-
297
- check_access(ctx);
298
-
299
- gen_get_gpr(ctx, t0, a->rs1);
300
-
301
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
302
- gen_set_gpr(ctx, a->rd, t1);
303
-
304
- tcg_temp_free(t0);
305
- tcg_temp_free(t1);
306
- return true;
307
-#else
308
- return false;
309
-#endif
310
+ return do_hlv(ctx, a, MO_TEQ);
311
}
312
313
static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
314
{
315
REQUIRE_64BIT(ctx);
316
REQUIRE_EXT(ctx, RVH);
317
+ return do_hsv(ctx, a, MO_TEQ);
318
+}
319
320
#ifndef CONFIG_USER_ONLY
321
- TCGv t0 = tcg_temp_new();
322
- TCGv dat = tcg_temp_new();
323
-
324
- check_access(ctx);
325
-
326
- gen_get_gpr(ctx, t0, a->rs1);
327
- gen_get_gpr(ctx, dat, a->rs2);
328
-
329
- tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
330
-
331
- tcg_temp_free(t0);
332
- tcg_temp_free(dat);
333
+static bool do_hlvx(DisasContext *ctx, arg_r2 *a,
334
+ void (*func)(TCGv, TCGv_env, TCGv))
335
+{
336
+ if (check_access(ctx)) {
337
+ TCGv dest = dest_gpr(ctx, a->rd);
338
+ TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
339
+ func(dest, cpu_env, addr);
340
+ gen_set_gpr(ctx, a->rd, dest);
341
+ }
342
return true;
343
-#else
344
- return false;
345
-#endif
346
}
347
+#endif
348
349
static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a)
350
{
351
REQUIRE_EXT(ctx, RVH);
352
#ifndef CONFIG_USER_ONLY
353
- TCGv t0 = tcg_temp_new();
354
- TCGv t1 = tcg_temp_new();
355
-
356
- check_access(ctx);
357
-
358
- gen_get_gpr(ctx, t0, a->rs1);
359
-
360
- gen_helper_hyp_hlvx_hu(t1, cpu_env, t0);
361
- gen_set_gpr(ctx, a->rd, t1);
362
-
363
- tcg_temp_free(t0);
364
- tcg_temp_free(t1);
365
- return true;
366
+ return do_hlvx(ctx, a, gen_helper_hyp_hlvx_hu);
367
#else
368
return false;
369
#endif
370
@@ -XXX,XX +XXX,XX @@ static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a)
371
{
372
REQUIRE_EXT(ctx, RVH);
373
#ifndef CONFIG_USER_ONLY
374
- TCGv t0 = tcg_temp_new();
375
- TCGv t1 = tcg_temp_new();
376
-
377
- check_access(ctx);
378
-
379
- gen_get_gpr(ctx, t0, a->rs1);
380
-
381
- gen_helper_hyp_hlvx_wu(t1, cpu_env, t0);
382
- gen_set_gpr(ctx, a->rd, t1);
383
-
384
- tcg_temp_free(t0);
385
- tcg_temp_free(t1);
386
- return true;
387
+ return do_hlvx(ctx, a, gen_helper_hyp_hlvx_wu);
388
#else
389
return false;
390
#endif
391
--
392
2.31.1
393
394
diff view generated by jsdifflib
New patch
1
1
From: Richard Henderson <richard.henderson@linaro.org>
2
3
Remove gen_get_gpr, as the function becomes unused.
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-id: 20210823195529.560295-25-richard.henderson@linaro.org
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/translate.c | 13 ++---
11
target/riscv/insn_trans/trans_rvv.c.inc | 74 +++++++------------------
12
2 files changed, 26 insertions(+), 61 deletions(-)
13
14
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/translate.c
17
+++ b/target/riscv/translate.c
18
@@ -XXX,XX +XXX,XX @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext)
19
g_assert_not_reached();
20
}
21
22
-static void gen_get_gpr(DisasContext *ctx, TCGv t, int reg_num)
23
-{
24
- tcg_gen_mov_tl(t, get_gpr(ctx, reg_num, EXT_NONE));
25
-}
26
-
27
static TCGv dest_gpr(DisasContext *ctx, int reg_num)
28
{
29
if (reg_num == 0 || ctx->w) {
30
@@ -XXX,XX +XXX,XX @@ void riscv_translate_init(void)
31
{
32
int i;
33
34
- /* cpu_gpr[0] is a placeholder for the zero register. Do not use it. */
35
- /* Use the gen_set_gpr and gen_get_gpr helper functions when accessing */
36
- /* registers, unless you specifically block reads/writes to reg 0 */
37
+ /*
38
+ * cpu_gpr[0] is a placeholder for the zero register. Do not use it.
39
+ * Use the gen_set_gpr and get_gpr helper functions when accessing regs,
40
+ * unless you specifically block reads/writes to reg 0.
41
+ */
42
cpu_gpr[0] = NULL;
43
44
for (i = 1; i < 32; i++) {
45
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/insn_trans/trans_rvv.c.inc
48
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
49
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
50
return false;
51
}
52
53
- s2 = tcg_temp_new();
54
- dst = tcg_temp_new();
55
+ s2 = get_gpr(ctx, a->rs2, EXT_ZERO);
56
+ dst = dest_gpr(ctx, a->rd);
57
58
/* Using x0 as the rs1 register specifier, encodes an infinite AVL */
59
if (a->rs1 == 0) {
60
/* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
61
s1 = tcg_constant_tl(RV_VLEN_MAX);
62
} else {
63
- s1 = tcg_temp_new();
64
- gen_get_gpr(ctx, s1, a->rs1);
65
+ s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
66
}
67
- gen_get_gpr(ctx, s2, a->rs2);
68
gen_helper_vsetvl(dst, cpu_env, s1, s2);
69
gen_set_gpr(ctx, a->rd, dst);
70
+
71
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
72
lookup_and_goto_ptr(ctx);
73
ctx->base.is_jmp = DISAS_NORETURN;
74
-
75
- tcg_temp_free(s1);
76
- tcg_temp_free(s2);
77
- tcg_temp_free(dst);
78
return true;
79
}
80
81
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
82
}
83
84
s2 = tcg_constant_tl(a->zimm);
85
- dst = tcg_temp_new();
86
+ dst = dest_gpr(ctx, a->rd);
87
88
/* Using x0 as the rs1 register specifier, encodes an infinite AVL */
89
if (a->rs1 == 0) {
90
/* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
91
s1 = tcg_constant_tl(RV_VLEN_MAX);
92
} else {
93
- s1 = tcg_temp_new();
94
- gen_get_gpr(ctx, s1, a->rs1);
95
+ s1 = get_gpr(ctx, a->rs1, EXT_ZERO);
96
}
97
gen_helper_vsetvl(dst, cpu_env, s1, s2);
98
gen_set_gpr(ctx, a->rd, dst);
99
+
100
gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
101
ctx->base.is_jmp = DISAS_NORETURN;
102
-
103
- tcg_temp_free(s1);
104
- tcg_temp_free(dst);
105
return true;
106
}
107
108
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
109
110
dest = tcg_temp_new_ptr();
111
mask = tcg_temp_new_ptr();
112
- base = tcg_temp_new();
113
+ base = get_gpr(s, rs1, EXT_NONE);
114
115
/*
116
* As simd_desc supports at most 256 bytes, and in this implementation,
117
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
118
*/
119
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
120
121
- gen_get_gpr(s, base, rs1);
122
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
123
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
124
125
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
126
127
tcg_temp_free_ptr(dest);
128
tcg_temp_free_ptr(mask);
129
- tcg_temp_free(base);
130
gen_set_label(over);
131
return true;
132
}
133
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
134
135
dest = tcg_temp_new_ptr();
136
mask = tcg_temp_new_ptr();
137
- base = tcg_temp_new();
138
- stride = tcg_temp_new();
139
+ base = get_gpr(s, rs1, EXT_NONE);
140
+ stride = get_gpr(s, rs2, EXT_NONE);
141
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
142
143
- gen_get_gpr(s, base, rs1);
144
- gen_get_gpr(s, stride, rs2);
145
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
146
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
147
148
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
149
150
tcg_temp_free_ptr(dest);
151
tcg_temp_free_ptr(mask);
152
- tcg_temp_free(base);
153
- tcg_temp_free(stride);
154
gen_set_label(over);
155
return true;
156
}
157
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
158
dest = tcg_temp_new_ptr();
159
mask = tcg_temp_new_ptr();
160
index = tcg_temp_new_ptr();
161
- base = tcg_temp_new();
162
+ base = get_gpr(s, rs1, EXT_NONE);
163
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
164
165
- gen_get_gpr(s, base, rs1);
166
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
167
tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
168
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
169
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
170
tcg_temp_free_ptr(dest);
171
tcg_temp_free_ptr(mask);
172
tcg_temp_free_ptr(index);
173
- tcg_temp_free(base);
174
gen_set_label(over);
175
return true;
176
}
177
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
178
179
dest = tcg_temp_new_ptr();
180
mask = tcg_temp_new_ptr();
181
- base = tcg_temp_new();
182
+ base = get_gpr(s, rs1, EXT_NONE);
183
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
184
185
- gen_get_gpr(s, base, rs1);
186
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
187
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
188
189
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
190
191
tcg_temp_free_ptr(dest);
192
tcg_temp_free_ptr(mask);
193
- tcg_temp_free(base);
194
gen_set_label(over);
195
return true;
196
}
197
@@ -XXX,XX +XXX,XX @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
198
dest = tcg_temp_new_ptr();
199
mask = tcg_temp_new_ptr();
200
index = tcg_temp_new_ptr();
201
- base = tcg_temp_new();
202
+ base = get_gpr(s, rs1, EXT_NONE);
203
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
204
205
- gen_get_gpr(s, base, rs1);
206
tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
207
tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
208
tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
209
@@ -XXX,XX +XXX,XX @@ static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
210
tcg_temp_free_ptr(dest);
211
tcg_temp_free_ptr(mask);
212
tcg_temp_free_ptr(index);
213
- tcg_temp_free(base);
214
gen_set_label(over);
215
return true;
216
}
217
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
218
dest = tcg_temp_new_ptr();
219
mask = tcg_temp_new_ptr();
220
src2 = tcg_temp_new_ptr();
221
- src1 = tcg_temp_new();
222
- gen_get_gpr(s, src1, rs1);
223
+ src1 = get_gpr(s, rs1, EXT_NONE);
224
225
data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
226
data = FIELD_DP32(data, VDATA, VM, vm);
227
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
228
tcg_temp_free_ptr(dest);
229
tcg_temp_free_ptr(mask);
230
tcg_temp_free_ptr(src2);
231
- tcg_temp_free(src1);
232
gen_set_label(over);
233
return true;
234
}
235
@@ -XXX,XX +XXX,XX @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
236
237
if (a->vm && s->vl_eq_vlmax) {
238
TCGv_i64 src1 = tcg_temp_new_i64();
239
- TCGv tmp = tcg_temp_new();
240
241
- gen_get_gpr(s, tmp, a->rs1);
242
- tcg_gen_ext_tl_i64(src1, tmp);
243
+ tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
244
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
245
src1, MAXSZ(s), MAXSZ(s));
246
247
tcg_temp_free_i64(src1);
248
- tcg_temp_free(tmp);
249
return true;
250
}
251
return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
252
@@ -XXX,XX +XXX,XX @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
253
254
if (a->vm && s->vl_eq_vlmax) {
255
TCGv_i32 src1 = tcg_temp_new_i32();
256
- TCGv tmp = tcg_temp_new();
257
258
- gen_get_gpr(s, tmp, a->rs1);
259
- tcg_gen_trunc_tl_i32(src1, tmp);
260
+ tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
261
tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
262
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
263
src1, MAXSZ(s), MAXSZ(s));
264
265
tcg_temp_free_i32(src1);
266
- tcg_temp_free(tmp);
267
return true;
268
}
269
return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
270
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
271
TCGLabel *over = gen_new_label();
272
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
273
274
- s1 = tcg_temp_new();
275
- gen_get_gpr(s, s1, a->rs1);
276
+ s1 = get_gpr(s, a->rs1, EXT_SIGN);
277
278
if (s->vl_eq_vlmax) {
279
tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
280
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
281
tcg_temp_free_i64(s1_i64);
282
}
283
284
- tcg_temp_free(s1);
285
gen_set_label(over);
286
return true;
287
}
288
@@ -XXX,XX +XXX,XX @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
289
290
mask = tcg_temp_new_ptr();
291
src2 = tcg_temp_new_ptr();
292
- dst = tcg_temp_new();
293
+ dst = dest_gpr(s, a->rd);
294
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
295
296
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
297
@@ -XXX,XX +XXX,XX @@ static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
298
299
tcg_temp_free_ptr(mask);
300
tcg_temp_free_ptr(src2);
301
- tcg_temp_free(dst);
302
return true;
303
}
304
return false;
305
@@ -XXX,XX +XXX,XX @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
306
307
mask = tcg_temp_new_ptr();
308
src2 = tcg_temp_new_ptr();
309
- dst = tcg_temp_new();
310
+ dst = dest_gpr(s, a->rd);
311
desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
312
313
tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
314
@@ -XXX,XX +XXX,XX @@ static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
315
316
tcg_temp_free_ptr(mask);
317
tcg_temp_free_ptr(src2);
318
- tcg_temp_free(dst);
319
return true;
320
}
321
return false;
322
@@ -XXX,XX +XXX,XX @@ static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
323
static bool trans_vext_x_v(DisasContext *s, arg_r *a)
324
{
325
TCGv_i64 tmp = tcg_temp_new_i64();
326
- TCGv dest = tcg_temp_new();
327
+ TCGv dest = dest_gpr(s, a->rd);
328
329
if (a->rs1 == 0) {
330
/* Special case vmv.x.s rd, vs2. */
331
@@ -XXX,XX +XXX,XX @@ static bool trans_vext_x_v(DisasContext *s, arg_r *a)
332
int vlmax = s->vlen >> (3 + s->sew);
333
vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
334
}
335
+
336
tcg_gen_trunc_i64_tl(dest, tmp);
337
gen_set_gpr(s, a->rd, dest);
338
339
- tcg_temp_free(dest);
340
tcg_temp_free_i64(tmp);
341
return true;
342
}
343
--
344
2.31.1
345
346
diff view generated by jsdifflib