1
From: Alistair Francis <alistair.francis@wdc.com>
1
From: Alistair Francis <alistair.francis@wdc.com>
2
2
3
The following changes since commit 64ada298b98a51eb2512607f6e6180cb330c47b1:
3
The following changes since commit 284c52eec2d0a1b9c47f06c3eee46762c5fc0915:
4
4
5
Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220302' into staging (2022-03-02 12:38:46 +0000)
5
Merge tag 'win-socket-pull-request' of https://gitlab.com/marcandre.lureau/qemu into staging (2023-03-13 13:44:17 +0000)
6
6
7
are available in the Git repository at:
7
are available in the Git repository at:
8
8
9
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20220303
9
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20230314
10
10
11
for you to fetch changes up to 6b1accefd4876ea5475d55454c7d5b52c02cb73c:
11
for you to fetch changes up to 0d581506de803204c5a321100afa270573382932:
12
12
13
target/riscv: expose zfinx, zdinx, zhinx{min} properties (2022-03-03 13:14:50 +1000)
13
Fix incorrect register name in disassembler for fmv,fabs,fneg instructions (2023-03-14 16:36:43 +1000)
14
14
15
----------------------------------------------------------------
15
----------------------------------------------------------------
16
Fifth RISC-V PR for QEMU 7.0
16
Seventh RISC-V PR for 8.0
17
17
18
* Fixup checks for ext_zb[abcs]
18
* Fix slli_uw decoding
19
* Add AIA support for virt machine
19
* Fix incorrect register name in disassembler for fmv,fabs,fneg instructions
20
* Increase maximum number of CPUs in virt machine
21
* Fixup OpenTitan SPI address
22
* Add support for zfinx, zdinx and zhinx{min} extensions
23
20
24
----------------------------------------------------------------
21
----------------------------------------------------------------
25
Anup Patel (5):
22
Ivan Klokov (1):
26
hw/riscv: virt: Add optional AIA APLIC support to virt machine
23
disas/riscv: Fix slli_uw decoding
27
hw/intc: Add RISC-V AIA IMSIC device emulation
28
hw/riscv: virt: Add optional AIA IMSIC support to virt machine
29
docs/system: riscv: Document AIA options for virt machine
30
hw/riscv: virt: Increase maximum number of allowed CPUs
31
24
32
Philipp Tomsich (1):
25
Mikhail Tyutin (1):
33
target/riscv: fix inverted checks for ext_zb[abcs]
26
Fix incorrect register name in disassembler for fmv,fabs,fneg instructions
34
27
35
Weiwei Li (6):
28
disas/riscv.c | 27 ++++++++++++++-------------
36
target/riscv: add cfg properties for zfinx, zdinx and zhinx{min}
29
1 file changed, 14 insertions(+), 13 deletions(-)
37
target/riscv: hardwire mstatus.FS to zero when enable zfinx
38
target/riscv: add support for zfinx
39
target/riscv: add support for zdinx
40
target/riscv: add support for zhinx/zhinxmin
41
target/riscv: expose zfinx, zdinx, zhinx{min} properties
42
43
Wilfred Mallawa (1):
44
hw: riscv: opentitan: fixup SPI addresses
45
46
docs/system/riscv/virt.rst | 16 +
47
include/hw/intc/riscv_imsic.h | 68 +++
48
include/hw/riscv/opentitan.h | 4 +-
49
include/hw/riscv/virt.h | 41 +-
50
target/riscv/cpu.h | 4 +
51
target/riscv/helper.h | 4 +-
52
target/riscv/internals.h | 32 +-
53
hw/intc/riscv_imsic.c | 448 +++++++++++++++++++
54
hw/riscv/opentitan.c | 12 +-
55
hw/riscv/virt.c | 698 +++++++++++++++++++++++++-----
56
target/riscv/cpu.c | 17 +
57
target/riscv/cpu_helper.c | 6 +-
58
target/riscv/csr.c | 25 +-
59
target/riscv/fpu_helper.c | 178 ++++----
60
target/riscv/translate.c | 149 ++++++-
61
target/riscv/insn_trans/trans_rvb.c.inc | 8 +-
62
target/riscv/insn_trans/trans_rvd.c.inc | 285 ++++++++----
63
target/riscv/insn_trans/trans_rvf.c.inc | 314 ++++++++++----
64
target/riscv/insn_trans/trans_rvzfh.c.inc | 332 ++++++++++----
65
hw/intc/Kconfig | 3 +
66
hw/intc/meson.build | 1 +
67
hw/riscv/Kconfig | 2 +
68
22 files changed, 2146 insertions(+), 501 deletions(-)
69
create mode 100644 include/hw/intc/riscv_imsic.h
70
create mode 100644 hw/intc/riscv_imsic.c
diff view generated by jsdifflib
1
From: Philipp Tomsich <philipp.tomsich@vrull.eu>
1
From: Ivan Klokov <ivan.klokov@syntacore.com>
2
2
3
While changing to the use of cfg_ptr, the conditions for REQUIRE_ZB[ABCS]
3
The decoding of the slli_uw currently contains decoding
4
inadvertently became inverted and slipped through the initial testing (which
4
error: shamt part of opcode has six bits, not five.
5
used RV64GC_XVentanaCondOps as a target).
6
This fixes the regression.
7
5
8
Tested against SPEC2017 w/ GCC 12 (prerelease) for RV64GC_zba_zbb_zbc_zbs.
6
Fixes 3de1fb71("target/riscv: update disas.c for xnor/orn/andn and slli.uw")
9
7
10
Fixes: f2a32bec8f0da99 ("target/riscv: access cfg structure through DisasContext")
8
Signed-off-by: Ivan Klokov <ivan.klokov@syntacore.com>
11
Signed-off-by: Philipp Tomsich <philipp.tomsich@vrull.eu>
9
Reviewed-by: Philipp Tomsich <philipp.tomsich@vrull.eu>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-Id: <20220203153946.2676353-1-philipp.tomsich@vrull.eu>
11
Message-Id: <20230227090228.17117-1-ivan.klokov@syntacore.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
13
---
16
target/riscv/insn_trans/trans_rvb.c.inc | 8 ++++----
14
disas/riscv.c | 8 ++++----
17
1 file changed, 4 insertions(+), 4 deletions(-)
15
1 file changed, 4 insertions(+), 4 deletions(-)
18
16
19
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
17
diff --git a/disas/riscv.c b/disas/riscv.c
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/insn_trans/trans_rvb.c.inc
19
--- a/disas/riscv.c
22
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
20
+++ b/disas/riscv.c
23
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data opcode_data[] = {
24
*/
22
{ "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
25
23
{ "ctzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
26
#define REQUIRE_ZBA(ctx) do { \
24
{ "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
27
- if (ctx->cfg_ptr->ext_zba) { \
25
- { "slli.uw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
28
+ if (!ctx->cfg_ptr->ext_zba) { \
26
+ { "slli.uw", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 },
29
return false; \
27
{ "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
30
} \
28
{ "rolw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
31
} while (0)
29
{ "rorw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
32
30
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
33
#define REQUIRE_ZBB(ctx) do { \
31
switch (((inst >> 12) & 0b111)) {
34
- if (ctx->cfg_ptr->ext_zbb) { \
32
case 0: op = rv_op_addiw; break;
35
+ if (!ctx->cfg_ptr->ext_zbb) { \
33
case 1:
36
return false; \
34
- switch (((inst >> 25) & 0b1111111)) {
37
} \
35
+ switch (((inst >> 26) & 0b111111)) {
38
} while (0)
36
case 0: op = rv_op_slliw; break;
39
37
- case 4: op = rv_op_slli_uw; break;
40
#define REQUIRE_ZBC(ctx) do { \
38
- case 48:
41
- if (ctx->cfg_ptr->ext_zbc) { \
39
+ case 2: op = rv_op_slli_uw; break;
42
+ if (!ctx->cfg_ptr->ext_zbc) { \
40
+ case 24:
43
return false; \
41
switch ((inst >> 20) & 0b11111) {
44
} \
42
case 0b00000: op = rv_op_clzw; break;
45
} while (0)
43
case 0b00001: op = rv_op_ctzw; break;
46
47
#define REQUIRE_ZBS(ctx) do { \
48
- if (ctx->cfg_ptr->ext_zbs) { \
49
+ if (!ctx->cfg_ptr->ext_zbs) { \
50
return false; \
51
} \
52
} while (0)
53
--
44
--
54
2.35.1
45
2.39.2
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <anup.patel@wdc.com>
2
1
3
We extend virt machine to emulate AIA APLIC devices only when
4
"aia=aplic" parameter is passed along with machine name in QEMU
5
command-line. When "aia=none" or not specified then we fallback
6
to original PLIC device emulation.
7
8
Signed-off-by: Anup Patel <anup.patel@wdc.com>
9
Signed-off-by: Anup Patel <anup@brainfault.org>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-Id: <20220220085526.808674-2-anup@brainfault.org>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
include/hw/riscv/virt.h | 26 +++-
15
hw/riscv/virt.c | 291 ++++++++++++++++++++++++++++++++--------
16
hw/riscv/Kconfig | 1 +
17
3 files changed, 259 insertions(+), 59 deletions(-)
18
19
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/riscv/virt.h
22
+++ b/include/hw/riscv/virt.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct RISCVVirtState RISCVVirtState;
24
DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
25
TYPE_RISCV_VIRT_MACHINE)
26
27
+typedef enum RISCVVirtAIAType {
28
+ VIRT_AIA_TYPE_NONE = 0,
29
+ VIRT_AIA_TYPE_APLIC,
30
+} RISCVVirtAIAType;
31
+
32
struct RISCVVirtState {
33
/*< private >*/
34
MachineState parent;
35
36
/*< public >*/
37
RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
38
- DeviceState *plic[VIRT_SOCKETS_MAX];
39
+ DeviceState *irqchip[VIRT_SOCKETS_MAX];
40
PFlashCFI01 *flash[2];
41
FWCfgState *fw_cfg;
42
43
int fdt_size;
44
bool have_aclint;
45
+ RISCVVirtAIAType aia_type;
46
};
47
48
enum {
49
@@ -XXX,XX +XXX,XX @@ enum {
50
VIRT_CLINT,
51
VIRT_ACLINT_SSWI,
52
VIRT_PLIC,
53
+ VIRT_APLIC_M,
54
+ VIRT_APLIC_S,
55
VIRT_UART0,
56
VIRT_VIRTIO,
57
VIRT_FW_CFG,
58
@@ -XXX,XX +XXX,XX @@ enum {
59
VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
60
};
61
62
-#define VIRT_PLIC_NUM_SOURCES 127
63
-#define VIRT_PLIC_NUM_PRIORITIES 7
64
+#define VIRT_IRQCHIP_NUM_SOURCES 127
65
+#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
66
+
67
#define VIRT_PLIC_PRIORITY_BASE 0x04
68
#define VIRT_PLIC_PENDING_BASE 0x1000
69
#define VIRT_PLIC_ENABLE_BASE 0x2000
70
@@ -XXX,XX +XXX,XX @@ enum {
71
72
#define FDT_PCI_ADDR_CELLS 3
73
#define FDT_PCI_INT_CELLS 1
74
-#define FDT_PLIC_ADDR_CELLS 0
75
#define FDT_PLIC_INT_CELLS 1
76
-#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \
77
- FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS)
78
+#define FDT_APLIC_INT_CELLS 2
79
+#define FDT_MAX_INT_CELLS 2
80
+#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
81
+ 1 + FDT_MAX_INT_CELLS)
82
+#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
83
+ 1 + FDT_PLIC_INT_CELLS)
84
+#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
85
+ 1 + FDT_APLIC_INT_CELLS)
86
87
#endif
88
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/hw/riscv/virt.c
91
+++ b/hw/riscv/virt.c
92
@@ -XXX,XX +XXX,XX @@
93
#include "hw/riscv/boot.h"
94
#include "hw/riscv/numa.h"
95
#include "hw/intc/riscv_aclint.h"
96
+#include "hw/intc/riscv_aplic.h"
97
#include "hw/intc/sifive_plic.h"
98
#include "hw/misc/sifive_test.h"
99
#include "chardev/char.h"
100
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry virt_memmap[] = {
101
[VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 },
102
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
103
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
104
+ [VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) },
105
+ [VIRT_APLIC_S] = { 0xd000000, APLIC_SIZE(VIRT_CPUS_MAX) },
106
[VIRT_UART0] = { 0x10000000, 0x100 },
107
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
108
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
109
@@ -XXX,XX +XXX,XX @@ static void virt_flash_map(RISCVVirtState *s,
110
sysmem);
111
}
112
113
-static void create_pcie_irq_map(void *fdt, char *nodename,
114
- uint32_t plic_phandle)
115
+static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
116
+ uint32_t irqchip_phandle)
117
{
118
int pin, dev;
119
- uint32_t
120
- full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {};
121
+ uint32_t irq_map_stride = 0;
122
+ uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS *
123
+ FDT_MAX_INT_MAP_WIDTH] = {};
124
uint32_t *irq_map = full_irq_map;
125
126
/* This code creates a standard swizzle of interrupts such that
127
@@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename,
128
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
129
int i = 0;
130
131
+ /* Fill PCI address cells */
132
irq_map[i] = cpu_to_be32(devfn << 8);
133
-
134
i += FDT_PCI_ADDR_CELLS;
135
- irq_map[i] = cpu_to_be32(pin + 1);
136
137
+ /* Fill PCI Interrupt cells */
138
+ irq_map[i] = cpu_to_be32(pin + 1);
139
i += FDT_PCI_INT_CELLS;
140
- irq_map[i++] = cpu_to_be32(plic_phandle);
141
142
- i += FDT_PLIC_ADDR_CELLS;
143
- irq_map[i] = cpu_to_be32(irq_nr);
144
+ /* Fill interrupt controller phandle and cells */
145
+ irq_map[i++] = cpu_to_be32(irqchip_phandle);
146
+ irq_map[i++] = cpu_to_be32(irq_nr);
147
+ if (s->aia_type != VIRT_AIA_TYPE_NONE) {
148
+ irq_map[i++] = cpu_to_be32(0x4);
149
+ }
150
151
- irq_map += FDT_INT_MAP_WIDTH;
152
+ if (!irq_map_stride) {
153
+ irq_map_stride = i;
154
+ }
155
+ irq_map += irq_map_stride;
156
}
157
}
158
159
- qemu_fdt_setprop(fdt, nodename, "interrupt-map",
160
- full_irq_map, sizeof(full_irq_map));
161
+ qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
162
+ GPEX_NUM_IRQS * GPEX_NUM_IRQS *
163
+ irq_map_stride * sizeof(uint32_t));
164
165
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
166
0x1800, 0, 0, 0x7);
167
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
168
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
169
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
170
qemu_fdt_add_subnode(mc->fdt, plic_name);
171
- qemu_fdt_setprop_cell(mc->fdt, plic_name,
172
- "#address-cells", FDT_PLIC_ADDR_CELLS);
173
qemu_fdt_setprop_cell(mc->fdt, plic_name,
174
"#interrupt-cells", FDT_PLIC_INT_CELLS);
175
qemu_fdt_setprop_string_array(mc->fdt, plic_name, "compatible",
176
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
177
g_free(plic_cells);
178
}
179
180
+static void create_fdt_socket_aia(RISCVVirtState *s,
181
+ const MemMapEntry *memmap, int socket,
182
+ uint32_t *phandle, uint32_t *intc_phandles,
183
+ uint32_t *aplic_phandles)
184
+{
185
+ int cpu;
186
+ char *aplic_name;
187
+ uint32_t *aplic_cells;
188
+ unsigned long aplic_addr;
189
+ MachineState *mc = MACHINE(s);
190
+ uint32_t aplic_m_phandle, aplic_s_phandle;
191
+
192
+ aplic_m_phandle = (*phandle)++;
193
+ aplic_s_phandle = (*phandle)++;
194
+ aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
195
+
196
+ /* M-level APLIC node */
197
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
198
+ aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
199
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
200
+ }
201
+ aplic_addr = memmap[VIRT_APLIC_M].base +
202
+ (memmap[VIRT_APLIC_M].size * socket);
203
+ aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
204
+ qemu_fdt_add_subnode(mc->fdt, aplic_name);
205
+ qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
206
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name,
207
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
208
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
209
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
210
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
211
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
212
+ 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
213
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
214
+ VIRT_IRQCHIP_NUM_SOURCES);
215
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,children",
216
+ aplic_s_phandle);
217
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "riscv,delegate",
218
+ aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
219
+ riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
220
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_m_phandle);
221
+ g_free(aplic_name);
222
+
223
+ /* S-level APLIC node */
224
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
225
+ aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
226
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
227
+ }
228
+ aplic_addr = memmap[VIRT_APLIC_S].base +
229
+ (memmap[VIRT_APLIC_S].size * socket);
230
+ aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
231
+ qemu_fdt_add_subnode(mc->fdt, aplic_name);
232
+ qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
233
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name,
234
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
235
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
236
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
237
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
238
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
239
+ 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
240
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
241
+ VIRT_IRQCHIP_NUM_SOURCES);
242
+ riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
243
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_s_phandle);
244
+ g_free(aplic_name);
245
+
246
+ g_free(aplic_cells);
247
+ aplic_phandles[socket] = aplic_s_phandle;
248
+}
249
+
250
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
251
bool is_32_bit, uint32_t *phandle,
252
uint32_t *irq_mmio_phandle,
253
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
254
}
255
}
256
257
- create_fdt_socket_plic(s, memmap, socket, phandle,
258
- intc_phandles, xplic_phandles);
259
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
260
+ create_fdt_socket_plic(s, memmap, socket, phandle,
261
+ intc_phandles, xplic_phandles);
262
+ } else {
263
+ create_fdt_socket_aia(s, memmap, socket, phandle,
264
+ intc_phandles, xplic_phandles);
265
+ }
266
267
g_free(intc_phandles);
268
g_free(clust_name);
269
@@ -XXX,XX +XXX,XX @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
270
0x0, memmap[VIRT_VIRTIO].size);
271
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
272
irq_virtio_phandle);
273
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", VIRTIO_IRQ + i);
274
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
275
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts",
276
+ VIRTIO_IRQ + i);
277
+ } else {
278
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts",
279
+ VIRTIO_IRQ + i, 0x4);
280
+ }
281
g_free(name);
282
}
283
}
284
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
285
2, virt_high_pcie_memmap.base,
286
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
287
288
- create_pcie_irq_map(mc->fdt, name, irq_pcie_phandle);
289
+ create_pcie_irq_map(s, mc->fdt, name, irq_pcie_phandle);
290
g_free(name);
291
}
292
293
@@ -XXX,XX +XXX,XX @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
294
0x0, memmap[VIRT_UART0].size);
295
qemu_fdt_setprop_cell(mc->fdt, name, "clock-frequency", 3686400);
296
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", irq_mmio_phandle);
297
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
298
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
299
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
300
+ } else {
301
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", UART0_IRQ, 0x4);
302
+ }
303
304
qemu_fdt_add_subnode(mc->fdt, "/chosen");
305
qemu_fdt_setprop_string(mc->fdt, "/chosen", "stdout-path", name);
306
@@ -XXX,XX +XXX,XX @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
307
0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
308
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
309
irq_mmio_phandle);
310
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
311
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
312
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
313
+ } else {
314
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", RTC_IRQ, 0x4);
315
+ }
316
g_free(name);
317
}
318
319
@@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
320
hwaddr high_mmio_base,
321
hwaddr high_mmio_size,
322
hwaddr pio_base,
323
- DeviceState *plic)
324
+ DeviceState *irqchip)
325
{
326
DeviceState *dev;
327
MemoryRegion *ecam_alias, *ecam_reg;
328
@@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
329
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
330
331
for (i = 0; i < GPEX_NUM_IRQS; i++) {
332
- irq = qdev_get_gpio_in(plic, PCIE_IRQ + i);
333
+ irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i);
334
335
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
336
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
337
@@ -XXX,XX +XXX,XX @@ static FWCfgState *create_fw_cfg(const MachineState *mc)
338
return fw_cfg;
339
}
340
341
+static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
342
+ int base_hartid, int hart_count)
343
+{
344
+ DeviceState *ret;
345
+ char *plic_hart_config;
346
+
347
+ /* Per-socket PLIC hart topology configuration string */
348
+ plic_hart_config = riscv_plic_hart_config_string(hart_count);
349
+
350
+ /* Per-socket PLIC */
351
+ ret = sifive_plic_create(
352
+ memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
353
+ plic_hart_config, hart_count, base_hartid,
354
+ VIRT_IRQCHIP_NUM_SOURCES,
355
+ ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
356
+ VIRT_PLIC_PRIORITY_BASE,
357
+ VIRT_PLIC_PENDING_BASE,
358
+ VIRT_PLIC_ENABLE_BASE,
359
+ VIRT_PLIC_ENABLE_STRIDE,
360
+ VIRT_PLIC_CONTEXT_BASE,
361
+ VIRT_PLIC_CONTEXT_STRIDE,
362
+ memmap[VIRT_PLIC].size);
363
+
364
+ g_free(plic_hart_config);
365
+
366
+ return ret;
367
+}
368
+
369
+static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type,
370
+ const MemMapEntry *memmap, int socket,
371
+ int base_hartid, int hart_count)
372
+{
373
+ DeviceState *aplic_m;
374
+
375
+ /* Per-socket M-level APLIC */
376
+ aplic_m = riscv_aplic_create(
377
+ memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
378
+ memmap[VIRT_APLIC_M].size,
379
+ base_hartid, hart_count,
380
+ VIRT_IRQCHIP_NUM_SOURCES,
381
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
382
+ false, true, NULL);
383
+
384
+ if (aplic_m) {
385
+ /* Per-socket S-level APLIC */
386
+ riscv_aplic_create(
387
+ memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
388
+ memmap[VIRT_APLIC_S].size,
389
+ base_hartid, hart_count,
390
+ VIRT_IRQCHIP_NUM_SOURCES,
391
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
392
+ false, false, aplic_m);
393
+ }
394
+
395
+ return aplic_m;
396
+}
397
+
398
static void virt_machine_init(MachineState *machine)
399
{
400
const MemMapEntry *memmap = virt_memmap;
401
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
402
MemoryRegion *system_memory = get_system_memory();
403
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
404
- char *plic_hart_config, *soc_name;
405
+ char *soc_name;
406
target_ulong start_addr = memmap[VIRT_DRAM].base;
407
target_ulong firmware_end_addr, kernel_start_addr;
408
uint32_t fdt_load_addr;
409
uint64_t kernel_entry;
410
- DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
411
+ DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
412
int i, base_hartid, hart_count;
413
414
/* Check socket count limit */
415
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
416
}
417
418
/* Initialize sockets */
419
- mmio_plic = virtio_plic = pcie_plic = NULL;
420
+ mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
421
for (i = 0; i < riscv_socket_count(machine); i++) {
422
if (!riscv_socket_check_hartids(machine, i)) {
423
error_report("discontinuous hartids in socket%d", i);
424
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
425
}
426
}
427
428
- /* Per-socket PLIC hart topology configuration string */
429
- plic_hart_config = riscv_plic_hart_config_string(hart_count);
430
-
431
- /* Per-socket PLIC */
432
- s->plic[i] = sifive_plic_create(
433
- memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size,
434
- plic_hart_config, hart_count, base_hartid,
435
- VIRT_PLIC_NUM_SOURCES,
436
- VIRT_PLIC_NUM_PRIORITIES,
437
- VIRT_PLIC_PRIORITY_BASE,
438
- VIRT_PLIC_PENDING_BASE,
439
- VIRT_PLIC_ENABLE_BASE,
440
- VIRT_PLIC_ENABLE_STRIDE,
441
- VIRT_PLIC_CONTEXT_BASE,
442
- VIRT_PLIC_CONTEXT_STRIDE,
443
- memmap[VIRT_PLIC].size);
444
- g_free(plic_hart_config);
445
+ /* Per-socket interrupt controller */
446
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
447
+ s->irqchip[i] = virt_create_plic(memmap, i,
448
+ base_hartid, hart_count);
449
+ } else {
450
+ s->irqchip[i] = virt_create_aia(s->aia_type, memmap, i,
451
+ base_hartid, hart_count);
452
+ }
453
454
- /* Try to use different PLIC instance based device type */
455
+ /* Try to use different IRQCHIP instance based device type */
456
if (i == 0) {
457
- mmio_plic = s->plic[i];
458
- virtio_plic = s->plic[i];
459
- pcie_plic = s->plic[i];
460
+ mmio_irqchip = s->irqchip[i];
461
+ virtio_irqchip = s->irqchip[i];
462
+ pcie_irqchip = s->irqchip[i];
463
}
464
if (i == 1) {
465
- virtio_plic = s->plic[i];
466
- pcie_plic = s->plic[i];
467
+ virtio_irqchip = s->irqchip[i];
468
+ pcie_irqchip = s->irqchip[i];
469
}
470
if (i == 2) {
471
- pcie_plic = s->plic[i];
472
+ pcie_irqchip = s->irqchip[i];
473
}
474
}
475
476
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
477
for (i = 0; i < VIRTIO_COUNT; i++) {
478
sysbus_create_simple("virtio-mmio",
479
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
480
- qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i));
481
+ qdev_get_gpio_in(DEVICE(virtio_irqchip), VIRTIO_IRQ + i));
482
}
483
484
gpex_pcie_init(system_memory,
485
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
486
virt_high_pcie_memmap.base,
487
virt_high_pcie_memmap.size,
488
memmap[VIRT_PCIE_PIO].base,
489
- DEVICE(pcie_plic));
490
+ DEVICE(pcie_irqchip));
491
492
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
493
- 0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193,
494
+ 0, qdev_get_gpio_in(DEVICE(mmio_irqchip), UART0_IRQ), 399193,
495
serial_hd(0), DEVICE_LITTLE_ENDIAN);
496
497
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
498
- qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ));
499
+ qdev_get_gpio_in(DEVICE(mmio_irqchip), RTC_IRQ));
500
501
virt_flash_create(s);
502
503
@@ -XXX,XX +XXX,XX @@ static void virt_machine_instance_init(Object *obj)
504
{
505
}
506
507
+static char *virt_get_aia(Object *obj, Error **errp)
508
+{
509
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
510
+ const char *val;
511
+
512
+ switch (s->aia_type) {
513
+ case VIRT_AIA_TYPE_APLIC:
514
+ val = "aplic";
515
+ break;
516
+ default:
517
+ val = "none";
518
+ break;
519
+ };
520
+
521
+ return g_strdup(val);
522
+}
523
+
524
+static void virt_set_aia(Object *obj, const char *val, Error **errp)
525
+{
526
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
527
+
528
+ if (!strcmp(val, "none")) {
529
+ s->aia_type = VIRT_AIA_TYPE_NONE;
530
+ } else if (!strcmp(val, "aplic")) {
531
+ s->aia_type = VIRT_AIA_TYPE_APLIC;
532
+ } else {
533
+ error_setg(errp, "Invalid AIA interrupt controller type");
534
+ error_append_hint(errp, "Valid values are none, and aplic.\n");
535
+ }
536
+}
537
+
538
static bool virt_get_aclint(Object *obj, Error **errp)
539
{
540
MachineState *ms = MACHINE(obj);
541
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
542
object_class_property_set_description(oc, "aclint",
543
"Set on/off to enable/disable "
544
"emulating ACLINT devices");
545
+
546
+ object_class_property_add_str(oc, "aia", virt_get_aia,
547
+ virt_set_aia);
548
+ object_class_property_set_description(oc, "aia",
549
+ "Set type of AIA interrupt "
550
+ "conttoller. Valid values are "
551
+ "none, and aplic.");
552
}
553
554
static const TypeInfo virt_machine_typeinfo = {
555
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
556
index XXXXXXX..XXXXXXX 100644
557
--- a/hw/riscv/Kconfig
558
+++ b/hw/riscv/Kconfig
559
@@ -XXX,XX +XXX,XX @@ config RISCV_VIRT
560
select PFLASH_CFI01
561
select SERIAL
562
select RISCV_ACLINT
563
+ select RISCV_APLIC
564
select SIFIVE_PLIC
565
select SIFIVE_TEST
566
select VIRTIO_MMIO
567
--
568
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <anup.patel@wdc.com>
2
1
3
The RISC-V AIA (Advanced Interrupt Architecture) defines a new
4
interrupt controller for MSIs (message signal interrupts) called
5
IMSIC (Incoming Message Signal Interrupt Controller). The IMSIC
6
is per-HART device and also suppport virtualizaiton of MSIs using
7
dedicated VS-level guest interrupt files.
8
9
This patch adds device emulation for RISC-V AIA IMSIC which
10
supports M-level, S-level, and VS-level MSIs.
11
12
Signed-off-by: Anup Patel <anup.patel@wdc.com>
13
Signed-off-by: Anup Patel <anup@brainfault.org>
14
Reviewed-by: Frank Chang <frank.chang@sifive.com>
15
Message-Id: <20220220085526.808674-3-anup@brainfault.org>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
include/hw/intc/riscv_imsic.h | 68 ++++++
19
hw/intc/riscv_imsic.c | 448 ++++++++++++++++++++++++++++++++++
20
hw/intc/Kconfig | 3 +
21
hw/intc/meson.build | 1 +
22
4 files changed, 520 insertions(+)
23
create mode 100644 include/hw/intc/riscv_imsic.h
24
create mode 100644 hw/intc/riscv_imsic.c
25
26
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
27
new file mode 100644
28
index XXXXXXX..XXXXXXX
29
--- /dev/null
30
+++ b/include/hw/intc/riscv_imsic.h
31
@@ -XXX,XX +XXX,XX @@
32
+/*
33
+ * RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface
34
+ *
35
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
36
+ *
37
+ * This program is free software; you can redistribute it and/or modify it
38
+ * under the terms and conditions of the GNU General Public License,
39
+ * version 2 or later, as published by the Free Software Foundation.
40
+ *
41
+ * This program is distributed in the hope it will be useful, but WITHOUT
42
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
43
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
44
+ * more details.
45
+ *
46
+ * You should have received a copy of the GNU General Public License along with
47
+ * this program. If not, see <http://www.gnu.org/licenses/>.
48
+ */
49
+
50
+#ifndef HW_RISCV_IMSIC_H
51
+#define HW_RISCV_IMSIC_H
52
+
53
+#include "hw/sysbus.h"
54
+#include "qom/object.h"
55
+
56
+#define TYPE_RISCV_IMSIC "riscv.imsic"
57
+
58
+typedef struct RISCVIMSICState RISCVIMSICState;
59
+DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC)
60
+
61
+#define IMSIC_MMIO_PAGE_SHIFT 12
62
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
63
+#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ)
64
+
65
+#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6
66
+#define IMSIC_MMIO_GROUP_MIN_SHIFT 24
67
+
68
+#define IMSIC_HART_NUM_GUESTS(__guest_bits) \
69
+ (1U << (__guest_bits))
70
+#define IMSIC_HART_SIZE(__guest_bits) \
71
+ (IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ)
72
+#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \
73
+ (1U << (__hart_bits))
74
+#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \
75
+ (IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits))
76
+
77
+struct RISCVIMSICState {
78
+ /*< private >*/
79
+ SysBusDevice parent_obj;
80
+ qemu_irq *external_irqs;
81
+
82
+ /*< public >*/
83
+ MemoryRegion mmio;
84
+ uint32_t num_eistate;
85
+ uint32_t *eidelivery;
86
+ uint32_t *eithreshold;
87
+ uint32_t *eistate;
88
+
89
+ /* config */
90
+ bool mmode;
91
+ uint32_t hartid;
92
+ uint32_t num_pages;
93
+ uint32_t num_irqs;
94
+};
95
+
96
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
97
+ uint32_t num_pages, uint32_t num_ids);
98
+
99
+#endif
100
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
101
new file mode 100644
102
index XXXXXXX..XXXXXXX
103
--- /dev/null
104
+++ b/hw/intc/riscv_imsic.c
105
@@ -XXX,XX +XXX,XX @@
106
+/*
107
+ * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller)
108
+ *
109
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
110
+ *
111
+ * This program is free software; you can redistribute it and/or modify it
112
+ * under the terms and conditions of the GNU General Public License,
113
+ * version 2 or later, as published by the Free Software Foundation.
114
+ *
115
+ * This program is distributed in the hope it will be useful, but WITHOUT
116
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
117
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
118
+ * more details.
119
+ *
120
+ * You should have received a copy of the GNU General Public License along with
121
+ * this program. If not, see <http://www.gnu.org/licenses/>.
122
+ */
123
+
124
+#include "qemu/osdep.h"
125
+#include "qapi/error.h"
126
+#include "qemu/log.h"
127
+#include "qemu/module.h"
128
+#include "qemu/error-report.h"
129
+#include "qemu/bswap.h"
130
+#include "exec/address-spaces.h"
131
+#include "hw/sysbus.h"
132
+#include "hw/pci/msi.h"
133
+#include "hw/boards.h"
134
+#include "hw/qdev-properties.h"
135
+#include "hw/intc/riscv_imsic.h"
136
+#include "hw/irq.h"
137
+#include "target/riscv/cpu.h"
138
+#include "target/riscv/cpu_bits.h"
139
+#include "sysemu/sysemu.h"
140
+#include "migration/vmstate.h"
141
+
142
+#define IMSIC_MMIO_PAGE_LE 0x00
143
+#define IMSIC_MMIO_PAGE_BE 0x04
144
+
145
+#define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1)
146
+#define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK)
147
+
148
+#define IMSIC_EISTATE_PENDING (1U << 0)
149
+#define IMSIC_EISTATE_ENABLED (1U << 1)
150
+#define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \
151
+ IMSIC_EISTATE_PENDING)
152
+
153
+static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
154
+{
155
+ uint32_t i, max_irq, base;
156
+
157
+ base = page * imsic->num_irqs;
158
+ max_irq = (imsic->eithreshold[page] &&
159
+ (imsic->eithreshold[page] <= imsic->num_irqs)) ?
160
+ imsic->eithreshold[page] : imsic->num_irqs;
161
+ for (i = 1; i < max_irq; i++) {
162
+ if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) ==
163
+ IMSIC_EISTATE_ENPEND) {
164
+ return (i << IMSIC_TOPEI_IID_SHIFT) | i;
165
+ }
166
+ }
167
+
168
+ return 0;
169
+}
170
+
171
+static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page)
172
+{
173
+ if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) {
174
+ qemu_irq_raise(imsic->external_irqs[page]);
175
+ } else {
176
+ qemu_irq_lower(imsic->external_irqs[page]);
177
+ }
178
+}
179
+
180
+static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page,
181
+ target_ulong *val,
182
+ target_ulong new_val,
183
+ target_ulong wr_mask)
184
+{
185
+ target_ulong old_val = imsic->eidelivery[page];
186
+
187
+ if (val) {
188
+ *val = old_val;
189
+ }
190
+
191
+ wr_mask &= 0x1;
192
+ imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
193
+
194
+ riscv_imsic_update(imsic, page);
195
+ return 0;
196
+}
197
+
198
+static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page,
199
+ target_ulong *val,
200
+ target_ulong new_val,
201
+ target_ulong wr_mask)
202
+{
203
+ target_ulong old_val = imsic->eithreshold[page];
204
+
205
+ if (val) {
206
+ *val = old_val;
207
+ }
208
+
209
+ wr_mask &= IMSIC_MAX_ID;
210
+ imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
211
+
212
+ riscv_imsic_update(imsic, page);
213
+ return 0;
214
+}
215
+
216
+static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page,
217
+ target_ulong *val, target_ulong new_val,
218
+ target_ulong wr_mask)
219
+{
220
+ uint32_t base, topei = riscv_imsic_topei(imsic, page);
221
+
222
+ /* Read pending and enabled interrupt with highest priority */
223
+ if (val) {
224
+ *val = topei;
225
+ }
226
+
227
+ /* Writes ignore value and clear top pending interrupt */
228
+ if (topei && wr_mask) {
229
+ topei >>= IMSIC_TOPEI_IID_SHIFT;
230
+ base = page * imsic->num_irqs;
231
+ if (topei) {
232
+ imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING;
233
+ }
234
+
235
+ riscv_imsic_update(imsic, page);
236
+ }
237
+
238
+ return 0;
239
+}
240
+
241
+static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
242
+ uint32_t xlen, uint32_t page,
243
+ uint32_t num, bool pend, target_ulong *val,
244
+ target_ulong new_val, target_ulong wr_mask)
245
+{
246
+ uint32_t i, base;
247
+ target_ulong mask;
248
+ uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED;
249
+
250
+ if (xlen != 32) {
251
+ if (num & 0x1) {
252
+ return -EINVAL;
253
+ }
254
+ num >>= 1;
255
+ }
256
+ if (num >= (imsic->num_irqs / xlen)) {
257
+ return -EINVAL;
258
+ }
259
+
260
+ base = (page * imsic->num_irqs) + (num * xlen);
261
+
262
+ if (val) {
263
+ *val = 0;
264
+ for (i = 0; i < xlen; i++) {
265
+ mask = (target_ulong)1 << i;
266
+ *val |= (imsic->eistate[base + i] & state) ? mask : 0;
267
+ }
268
+ }
269
+
270
+ for (i = 0; i < xlen; i++) {
271
+ /* Bit0 of eip0 and eie0 are read-only zero */
272
+ if (!num && !i) {
273
+ continue;
274
+ }
275
+
276
+ mask = (target_ulong)1 << i;
277
+ if (wr_mask & mask) {
278
+ if (new_val & mask) {
279
+ imsic->eistate[base + i] |= state;
280
+ } else {
281
+ imsic->eistate[base + i] &= ~state;
282
+ }
283
+ }
284
+ }
285
+
286
+ riscv_imsic_update(imsic, page);
287
+ return 0;
288
+}
289
+
290
+static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val,
291
+ target_ulong new_val, target_ulong wr_mask)
292
+{
293
+ RISCVIMSICState *imsic = arg;
294
+ uint32_t isel, priv, virt, vgein, xlen, page;
295
+
296
+ priv = AIA_IREG_PRIV(reg);
297
+ virt = AIA_IREG_VIRT(reg);
298
+ isel = AIA_IREG_ISEL(reg);
299
+ vgein = AIA_IREG_VGEIN(reg);
300
+ xlen = AIA_IREG_XLEN(reg);
301
+
302
+ if (imsic->mmode) {
303
+ if (priv == PRV_M && !virt) {
304
+ page = 0;
305
+ } else {
306
+ goto err;
307
+ }
308
+ } else {
309
+ if (priv == PRV_S) {
310
+ if (virt) {
311
+ if (vgein && vgein < imsic->num_pages) {
312
+ page = vgein;
313
+ } else {
314
+ goto err;
315
+ }
316
+ } else {
317
+ page = 0;
318
+ }
319
+ } else {
320
+ goto err;
321
+ }
322
+ }
323
+
324
+ switch (isel) {
325
+ case ISELECT_IMSIC_EIDELIVERY:
326
+ return riscv_imsic_eidelivery_rmw(imsic, page, val,
327
+ new_val, wr_mask);
328
+ case ISELECT_IMSIC_EITHRESHOLD:
329
+ return riscv_imsic_eithreshold_rmw(imsic, page, val,
330
+ new_val, wr_mask);
331
+ case ISELECT_IMSIC_TOPEI:
332
+ return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask);
333
+ case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63:
334
+ return riscv_imsic_eix_rmw(imsic, xlen, page,
335
+ isel - ISELECT_IMSIC_EIP0,
336
+ true, val, new_val, wr_mask);
337
+ case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63:
338
+ return riscv_imsic_eix_rmw(imsic, xlen, page,
339
+ isel - ISELECT_IMSIC_EIE0,
340
+ false, val, new_val, wr_mask);
341
+ default:
342
+ break;
343
+ };
344
+
345
+err:
346
+ qemu_log_mask(LOG_GUEST_ERROR,
347
+ "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n",
348
+ __func__, priv, virt, isel, vgein);
349
+ return -EINVAL;
350
+}
351
+
352
+static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size)
353
+{
354
+ RISCVIMSICState *imsic = opaque;
355
+
356
+ /* Reads must be 4 byte words */
357
+ if ((addr & 0x3) != 0) {
358
+ goto err;
359
+ }
360
+
361
+ /* Reads cannot be out of range */
362
+ if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
363
+ goto err;
364
+ }
365
+
366
+ return 0;
367
+
368
+err:
369
+ qemu_log_mask(LOG_GUEST_ERROR,
370
+ "%s: Invalid register read 0x%" HWADDR_PRIx "\n",
371
+ __func__, addr);
372
+ return 0;
373
+}
374
+
375
+static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
376
+ unsigned size)
377
+{
378
+ RISCVIMSICState *imsic = opaque;
379
+ uint32_t page;
380
+
381
+ /* Writes must be 4 byte words */
382
+ if ((addr & 0x3) != 0) {
383
+ goto err;
384
+ }
385
+
386
+ /* Writes cannot be out of range */
387
+ if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
388
+ goto err;
389
+ }
390
+
391
+ /* Writes only supported for MSI little-endian registers */
392
+ page = addr >> IMSIC_MMIO_PAGE_SHIFT;
393
+ if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
394
+ if (value && (value < imsic->num_irqs)) {
395
+ imsic->eistate[(page * imsic->num_irqs) + value] |=
396
+ IMSIC_EISTATE_PENDING;
397
+ }
398
+ }
399
+
400
+ /* Update CPU external interrupt status */
401
+ riscv_imsic_update(imsic, page);
402
+
403
+ return;
404
+
405
+err:
406
+ qemu_log_mask(LOG_GUEST_ERROR,
407
+ "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
408
+ __func__, addr);
409
+}
410
+
411
+static const MemoryRegionOps riscv_imsic_ops = {
412
+ .read = riscv_imsic_read,
413
+ .write = riscv_imsic_write,
414
+ .endianness = DEVICE_LITTLE_ENDIAN,
415
+ .valid = {
416
+ .min_access_size = 4,
417
+ .max_access_size = 4
418
+ }
419
+};
420
+
421
+static void riscv_imsic_realize(DeviceState *dev, Error **errp)
422
+{
423
+ RISCVIMSICState *imsic = RISCV_IMSIC(dev);
424
+ RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid));
425
+ CPUState *cpu = qemu_get_cpu(imsic->hartid);
426
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
427
+
428
+ imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
429
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
430
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
431
+ imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
432
+
433
+ memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
434
+ imsic, TYPE_RISCV_IMSIC,
435
+ IMSIC_MMIO_SIZE(imsic->num_pages));
436
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
437
+
438
+ /* Claim the CPU interrupt to be triggered by this IMSIC */
439
+ if (riscv_cpu_claim_interrupts(rcpu,
440
+ (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
441
+ error_setg(errp, "%s already claimed",
442
+ (imsic->mmode) ? "MEIP" : "SEIP");
443
+ return;
444
+ }
445
+
446
+ /* Create output IRQ lines */
447
+ imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
448
+ qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
449
+
450
+ /* Force select AIA feature and setup CSR read-modify-write callback */
451
+ if (env) {
452
+ riscv_set_feature(env, RISCV_FEATURE_AIA);
453
+ if (!imsic->mmode) {
454
+ riscv_cpu_set_geilen(env, imsic->num_pages - 1);
455
+ }
456
+ riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
457
+ riscv_imsic_rmw, imsic);
458
+ }
459
+
460
+ msi_nonbroken = true;
461
+}
462
+
463
+static Property riscv_imsic_properties[] = {
464
+ DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0),
465
+ DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0),
466
+ DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0),
467
+ DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
468
+ DEFINE_PROP_END_OF_LIST(),
469
+};
470
+
471
+static const VMStateDescription vmstate_riscv_imsic = {
472
+ .name = "riscv_imsic",
473
+ .version_id = 1,
474
+ .minimum_version_id = 1,
475
+ .fields = (VMStateField[]) {
476
+ VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
477
+ num_pages, 0,
478
+ vmstate_info_uint32, uint32_t),
479
+ VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
480
+ num_pages, 0,
481
+ vmstate_info_uint32, uint32_t),
482
+ VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
483
+ num_eistate, 0,
484
+ vmstate_info_uint32, uint32_t),
485
+ VMSTATE_END_OF_LIST()
486
+ }
487
+};
488
+
489
+static void riscv_imsic_class_init(ObjectClass *klass, void *data)
490
+{
491
+ DeviceClass *dc = DEVICE_CLASS(klass);
492
+
493
+ device_class_set_props(dc, riscv_imsic_properties);
494
+ dc->realize = riscv_imsic_realize;
495
+ dc->vmsd = &vmstate_riscv_imsic;
496
+}
497
+
498
+static const TypeInfo riscv_imsic_info = {
499
+ .name = TYPE_RISCV_IMSIC,
500
+ .parent = TYPE_SYS_BUS_DEVICE,
501
+ .instance_size = sizeof(RISCVIMSICState),
502
+ .class_init = riscv_imsic_class_init,
503
+};
504
+
505
+static void riscv_imsic_register_types(void)
506
+{
507
+ type_register_static(&riscv_imsic_info);
508
+}
509
+
510
+type_init(riscv_imsic_register_types)
511
+
512
+/*
513
+ * Create IMSIC device.
514
+ */
515
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
516
+ uint32_t num_pages, uint32_t num_ids)
517
+{
518
+ DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC);
519
+ CPUState *cpu = qemu_get_cpu(hartid);
520
+ uint32_t i;
521
+
522
+ assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1)));
523
+ if (mmode) {
524
+ assert(num_pages == 1);
525
+ } else {
526
+ assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1));
527
+ }
528
+ assert(IMSIC_MIN_ID <= num_ids);
529
+ assert(num_ids <= IMSIC_MAX_ID);
530
+ assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID);
531
+
532
+ qdev_prop_set_bit(dev, "mmode", mmode);
533
+ qdev_prop_set_uint32(dev, "hartid", hartid);
534
+ qdev_prop_set_uint32(dev, "num-pages", num_pages);
535
+ qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1);
536
+
537
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
538
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
539
+
540
+ for (i = 0; i < num_pages; i++) {
541
+ if (!i) {
542
+ qdev_connect_gpio_out_named(dev, NULL, i,
543
+ qdev_get_gpio_in(DEVICE(cpu),
544
+ (mmode) ? IRQ_M_EXT : IRQ_S_EXT));
545
+ } else {
546
+ qdev_connect_gpio_out_named(dev, NULL, i,
547
+ qdev_get_gpio_in(DEVICE(cpu),
548
+ IRQ_LOCAL_MAX + i - 1));
549
+ }
550
+ }
551
+
552
+ return dev;
553
+}
554
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
555
index XXXXXXX..XXXXXXX 100644
556
--- a/hw/intc/Kconfig
557
+++ b/hw/intc/Kconfig
558
@@ -XXX,XX +XXX,XX @@ config RISCV_ACLINT
559
config RISCV_APLIC
560
bool
561
562
+config RISCV_IMSIC
563
+ bool
564
+
565
config SIFIVE_PLIC
566
bool
567
568
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
569
index XXXXXXX..XXXXXXX 100644
570
--- a/hw/intc/meson.build
571
+++ b/hw/intc/meson.build
572
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
573
specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
574
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
575
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
576
+specific_ss.add(when: 'CONFIG_RISCV_IMSIC', if_true: files('riscv_imsic.c'))
577
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
578
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c'))
579
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],
580
--
581
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <anup.patel@wdc.com>
2
1
3
We extend virt machine to emulate both AIA IMSIC and AIA APLIC
4
devices only when "aia=aplic-imsic" parameter is passed along
5
with machine name in the QEMU command-line. The AIA IMSIC is
6
only a per-HART MSI controller so we use AIA APLIC in MSI-mode
7
to forward all wired interrupts as MSIs to the AIA IMSIC.
8
9
We also provide "aia-guests=<xyz>" parameter which can be used
10
to specify number of VS-level AIA IMSIC Guests MMIO pages for
11
each HART.
12
13
Signed-off-by: Anup Patel <anup.patel@wdc.com>
14
Signed-off-by: Anup Patel <anup@brainfault.org>
15
Acked-by: Alistair Francis <alistair.francis@wdc.com>
16
Message-Id: <20220220085526.808674-4-anup@brainfault.org>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
19
include/hw/riscv/virt.h | 17 +-
20
hw/riscv/virt.c | 439 ++++++++++++++++++++++++++++++++--------
21
hw/riscv/Kconfig | 1 +
22
3 files changed, 373 insertions(+), 84 deletions(-)
23
24
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/hw/riscv/virt.h
27
+++ b/include/hw/riscv/virt.h
28
@@ -XXX,XX +XXX,XX @@
29
#include "hw/block/flash.h"
30
#include "qom/object.h"
31
32
-#define VIRT_CPUS_MAX 32
33
-#define VIRT_SOCKETS_MAX 8
34
+#define VIRT_CPUS_MAX_BITS 3
35
+#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
36
+#define VIRT_SOCKETS_MAX_BITS 2
37
+#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
38
39
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
40
typedef struct RISCVVirtState RISCVVirtState;
41
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
42
typedef enum RISCVVirtAIAType {
43
VIRT_AIA_TYPE_NONE = 0,
44
VIRT_AIA_TYPE_APLIC,
45
+ VIRT_AIA_TYPE_APLIC_IMSIC,
46
} RISCVVirtAIAType;
47
48
struct RISCVVirtState {
49
@@ -XXX,XX +XXX,XX @@ struct RISCVVirtState {
50
int fdt_size;
51
bool have_aclint;
52
RISCVVirtAIAType aia_type;
53
+ int aia_guests;
54
};
55
56
enum {
57
@@ -XXX,XX +XXX,XX @@ enum {
58
VIRT_UART0,
59
VIRT_VIRTIO,
60
VIRT_FW_CFG,
61
+ VIRT_IMSIC_M,
62
+ VIRT_IMSIC_S,
63
VIRT_FLASH,
64
VIRT_DRAM,
65
VIRT_PCIE_MMIO,
66
@@ -XXX,XX +XXX,XX @@ enum {
67
VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
68
};
69
70
-#define VIRT_IRQCHIP_NUM_SOURCES 127
71
+#define VIRT_IRQCHIP_IPI_MSI 1
72
+#define VIRT_IRQCHIP_NUM_MSIS 255
73
+#define VIRT_IRQCHIP_NUM_SOURCES VIRTIO_NDEV
74
#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
75
+#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3
76
+#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U)
77
78
#define VIRT_PLIC_PRIORITY_BASE 0x04
79
#define VIRT_PLIC_PENDING_BASE 0x1000
80
@@ -XXX,XX +XXX,XX @@ enum {
81
#define FDT_PCI_INT_CELLS 1
82
#define FDT_PLIC_INT_CELLS 1
83
#define FDT_APLIC_INT_CELLS 2
84
+#define FDT_IMSIC_INT_CELLS 0
85
#define FDT_MAX_INT_CELLS 2
86
#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
87
1 + FDT_MAX_INT_CELLS)
88
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/hw/riscv/virt.c
91
+++ b/hw/riscv/virt.c
92
@@ -XXX,XX +XXX,XX @@
93
#include "hw/riscv/numa.h"
94
#include "hw/intc/riscv_aclint.h"
95
#include "hw/intc/riscv_aplic.h"
96
+#include "hw/intc/riscv_imsic.h"
97
#include "hw/intc/sifive_plic.h"
98
#include "hw/misc/sifive_test.h"
99
#include "chardev/char.h"
100
@@ -XXX,XX +XXX,XX @@
101
#include "hw/pci-host/gpex.h"
102
#include "hw/display/ramfb.h"
103
104
+#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
105
+#if VIRT_IMSIC_GROUP_MAX_SIZE < \
106
+ IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
107
+#error "Can't accomodate single IMSIC group in address space"
108
+#endif
109
+
110
+#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
111
+ VIRT_IMSIC_GROUP_MAX_SIZE)
112
+#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
113
+#error "Can't accomodate all IMSIC groups in address space"
114
+#endif
115
+
116
static const MemMapEntry virt_memmap[] = {
117
[VIRT_DEBUG] = { 0x0, 0x100 },
118
[VIRT_MROM] = { 0x1000, 0xf000 },
119
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry virt_memmap[] = {
120
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
121
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
122
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
123
+ [VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE },
124
+ [VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE },
125
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
126
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
127
[VIRT_DRAM] = { 0x80000000, 0x0 },
128
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
129
{
130
int cpu;
131
char *name;
132
- unsigned long addr;
133
+ unsigned long addr, size;
134
uint32_t aclint_cells_size;
135
uint32_t *aclint_mswi_cells;
136
uint32_t *aclint_sswi_cells;
137
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
138
}
139
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
140
141
- addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
142
- name = g_strdup_printf("/soc/mswi@%lx", addr);
143
- qemu_fdt_add_subnode(mc->fdt, name);
144
- qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-mswi");
145
- qemu_fdt_setprop_cells(mc->fdt, name, "reg",
146
- 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
147
- qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
148
- aclint_mswi_cells, aclint_cells_size);
149
- qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
150
- qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
151
- riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
152
- g_free(name);
153
+ if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
154
+ addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
155
+ name = g_strdup_printf("/soc/mswi@%lx", addr);
156
+ qemu_fdt_add_subnode(mc->fdt, name);
157
+ qemu_fdt_setprop_string(mc->fdt, name, "compatible",
158
+ "riscv,aclint-mswi");
159
+ qemu_fdt_setprop_cells(mc->fdt, name, "reg",
160
+ 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
161
+ qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
162
+ aclint_mswi_cells, aclint_cells_size);
163
+ qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
164
+ qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
165
+ riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
166
+ g_free(name);
167
+ }
168
169
- addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
170
- (memmap[VIRT_CLINT].size * socket);
171
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
172
+ addr = memmap[VIRT_CLINT].base +
173
+ (RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
174
+ size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
175
+ } else {
176
+ addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
177
+ (memmap[VIRT_CLINT].size * socket);
178
+ size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
179
+ }
180
name = g_strdup_printf("/soc/mtimer@%lx", addr);
181
qemu_fdt_add_subnode(mc->fdt, name);
182
qemu_fdt_setprop_string(mc->fdt, name, "compatible",
183
"riscv,aclint-mtimer");
184
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
185
0x0, addr + RISCV_ACLINT_DEFAULT_MTIME,
186
- 0x0, memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE -
187
- RISCV_ACLINT_DEFAULT_MTIME,
188
+ 0x0, size - RISCV_ACLINT_DEFAULT_MTIME,
189
0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP,
190
0x0, RISCV_ACLINT_DEFAULT_MTIME);
191
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
192
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
193
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
194
g_free(name);
195
196
- addr = memmap[VIRT_ACLINT_SSWI].base +
197
- (memmap[VIRT_ACLINT_SSWI].size * socket);
198
- name = g_strdup_printf("/soc/sswi@%lx", addr);
199
- qemu_fdt_add_subnode(mc->fdt, name);
200
- qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-sswi");
201
- qemu_fdt_setprop_cells(mc->fdt, name, "reg",
202
- 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
203
- qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
204
- aclint_sswi_cells, aclint_cells_size);
205
- qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
206
- qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
207
- riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
208
- g_free(name);
209
+ if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
210
+ addr = memmap[VIRT_ACLINT_SSWI].base +
211
+ (memmap[VIRT_ACLINT_SSWI].size * socket);
212
+ name = g_strdup_printf("/soc/sswi@%lx", addr);
213
+ qemu_fdt_add_subnode(mc->fdt, name);
214
+ qemu_fdt_setprop_string(mc->fdt, name, "compatible",
215
+ "riscv,aclint-sswi");
216
+ qemu_fdt_setprop_cells(mc->fdt, name, "reg",
217
+ 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
218
+ qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
219
+ aclint_sswi_cells, aclint_cells_size);
220
+ qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
221
+ qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
222
+ riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
223
+ g_free(name);
224
+ }
225
226
g_free(aclint_mswi_cells);
227
g_free(aclint_mtimer_cells);
228
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
229
g_free(plic_cells);
230
}
231
232
-static void create_fdt_socket_aia(RISCVVirtState *s,
233
- const MemMapEntry *memmap, int socket,
234
- uint32_t *phandle, uint32_t *intc_phandles,
235
- uint32_t *aplic_phandles)
236
+static uint32_t imsic_num_bits(uint32_t count)
237
+{
238
+ uint32_t ret = 0;
239
+
240
+ while (BIT(ret) < count) {
241
+ ret++;
242
+ }
243
+
244
+ return ret;
245
+}
246
+
247
+static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
248
+ uint32_t *phandle, uint32_t *intc_phandles,
249
+ uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
250
+{
251
+ int cpu, socket;
252
+ char *imsic_name;
253
+ MachineState *mc = MACHINE(s);
254
+ uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
255
+ uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
256
+
257
+ *msi_m_phandle = (*phandle)++;
258
+ *msi_s_phandle = (*phandle)++;
259
+ imsic_cells = g_new0(uint32_t, mc->smp.cpus * 2);
260
+ imsic_regs = g_new0(uint32_t, riscv_socket_count(mc) * 4);
261
+
262
+ /* M-level IMSIC node */
263
+ for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
264
+ imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
265
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
266
+ }
267
+ imsic_max_hart_per_socket = 0;
268
+ for (socket = 0; socket < riscv_socket_count(mc); socket++) {
269
+ imsic_addr = memmap[VIRT_IMSIC_M].base +
270
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
271
+ imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
272
+ imsic_regs[socket * 4 + 0] = 0;
273
+ imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
274
+ imsic_regs[socket * 4 + 2] = 0;
275
+ imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
276
+ if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
277
+ imsic_max_hart_per_socket = s->soc[socket].num_harts;
278
+ }
279
+ }
280
+ imsic_name = g_strdup_printf("/soc/imsics@%lx",
281
+ (unsigned long)memmap[VIRT_IMSIC_M].base);
282
+ qemu_fdt_add_subnode(mc->fdt, imsic_name);
283
+ qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
284
+ "riscv,imsics");
285
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
286
+ FDT_IMSIC_INT_CELLS);
287
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
288
+ NULL, 0);
289
+ qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
290
+ NULL, 0);
291
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
292
+ imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
293
+ qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
294
+ riscv_socket_count(mc) * sizeof(uint32_t) * 4);
295
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
296
+ VIRT_IRQCHIP_NUM_MSIS);
297
+ qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
298
+ VIRT_IRQCHIP_IPI_MSI);
299
+ if (riscv_socket_count(mc) > 1) {
300
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
301
+ imsic_num_bits(imsic_max_hart_per_socket));
302
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
303
+ imsic_num_bits(riscv_socket_count(mc)));
304
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
305
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
306
+ }
307
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_m_phandle);
308
+ g_free(imsic_name);
309
+
310
+ /* S-level IMSIC node */
311
+ for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
312
+ imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
313
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
314
+ }
315
+ imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
316
+ imsic_max_hart_per_socket = 0;
317
+ for (socket = 0; socket < riscv_socket_count(mc); socket++) {
318
+ imsic_addr = memmap[VIRT_IMSIC_S].base +
319
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
320
+ imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
321
+ s->soc[socket].num_harts;
322
+ imsic_regs[socket * 4 + 0] = 0;
323
+ imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
324
+ imsic_regs[socket * 4 + 2] = 0;
325
+ imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
326
+ if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
327
+ imsic_max_hart_per_socket = s->soc[socket].num_harts;
328
+ }
329
+ }
330
+ imsic_name = g_strdup_printf("/soc/imsics@%lx",
331
+ (unsigned long)memmap[VIRT_IMSIC_S].base);
332
+ qemu_fdt_add_subnode(mc->fdt, imsic_name);
333
+ qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
334
+ "riscv,imsics");
335
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
336
+ FDT_IMSIC_INT_CELLS);
337
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
338
+ NULL, 0);
339
+ qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
340
+ NULL, 0);
341
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
342
+ imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
343
+ qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
344
+ riscv_socket_count(mc) * sizeof(uint32_t) * 4);
345
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
346
+ VIRT_IRQCHIP_NUM_MSIS);
347
+ qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
348
+ VIRT_IRQCHIP_IPI_MSI);
349
+ if (imsic_guest_bits) {
350
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,guest-index-bits",
351
+ imsic_guest_bits);
352
+ }
353
+ if (riscv_socket_count(mc) > 1) {
354
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
355
+ imsic_num_bits(imsic_max_hart_per_socket));
356
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
357
+ imsic_num_bits(riscv_socket_count(mc)));
358
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
359
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
360
+ }
361
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_s_phandle);
362
+ g_free(imsic_name);
363
+
364
+ g_free(imsic_regs);
365
+ g_free(imsic_cells);
366
+}
367
+
368
+static void create_fdt_socket_aplic(RISCVVirtState *s,
369
+ const MemMapEntry *memmap, int socket,
370
+ uint32_t msi_m_phandle,
371
+ uint32_t msi_s_phandle,
372
+ uint32_t *phandle,
373
+ uint32_t *intc_phandles,
374
+ uint32_t *aplic_phandles)
375
{
376
int cpu;
377
char *aplic_name;
378
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aia(RISCVVirtState *s,
379
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
380
"#interrupt-cells", FDT_APLIC_INT_CELLS);
381
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
382
- qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
383
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
384
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
385
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
386
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
387
+ } else {
388
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
389
+ msi_m_phandle);
390
+ }
391
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
392
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
393
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
394
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aia(RISCVVirtState *s,
395
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
396
"#interrupt-cells", FDT_APLIC_INT_CELLS);
397
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
398
- qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
399
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
400
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
401
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
402
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
403
+ } else {
404
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
405
+ msi_s_phandle);
406
+ }
407
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
408
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
409
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
410
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
411
bool is_32_bit, uint32_t *phandle,
412
uint32_t *irq_mmio_phandle,
413
uint32_t *irq_pcie_phandle,
414
- uint32_t *irq_virtio_phandle)
415
+ uint32_t *irq_virtio_phandle,
416
+ uint32_t *msi_pcie_phandle)
417
{
418
- int socket;
419
char *clust_name;
420
- uint32_t *intc_phandles;
421
+ int socket, phandle_pos;
422
MachineState *mc = MACHINE(s);
423
- uint32_t xplic_phandles[MAX_NODES];
424
+ uint32_t msi_m_phandle = 0, msi_s_phandle = 0;
425
+ uint32_t *intc_phandles, xplic_phandles[MAX_NODES];
426
427
qemu_fdt_add_subnode(mc->fdt, "/cpus");
428
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "timebase-frequency",
429
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
430
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#address-cells", 0x1);
431
qemu_fdt_add_subnode(mc->fdt, "/cpus/cpu-map");
432
433
+ intc_phandles = g_new0(uint32_t, mc->smp.cpus);
434
+
435
+ phandle_pos = mc->smp.cpus;
436
for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
437
+ phandle_pos -= s->soc[socket].num_harts;
438
+
439
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
440
qemu_fdt_add_subnode(mc->fdt, clust_name);
441
442
- intc_phandles = g_new0(uint32_t, s->soc[socket].num_harts);
443
-
444
create_fdt_socket_cpus(s, socket, clust_name, phandle,
445
- is_32_bit, intc_phandles);
446
+ is_32_bit, &intc_phandles[phandle_pos]);
447
448
create_fdt_socket_memory(s, memmap, socket);
449
450
+ g_free(clust_name);
451
+
452
if (!kvm_enabled()) {
453
if (s->have_aclint) {
454
- create_fdt_socket_aclint(s, memmap, socket, intc_phandles);
455
+ create_fdt_socket_aclint(s, memmap, socket,
456
+ &intc_phandles[phandle_pos]);
457
} else {
458
- create_fdt_socket_clint(s, memmap, socket, intc_phandles);
459
+ create_fdt_socket_clint(s, memmap, socket,
460
+ &intc_phandles[phandle_pos]);
461
}
462
}
463
+ }
464
+
465
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
466
+ create_fdt_imsic(s, memmap, phandle, intc_phandles,
467
+ &msi_m_phandle, &msi_s_phandle);
468
+ *msi_pcie_phandle = msi_s_phandle;
469
+ }
470
+
471
+ phandle_pos = mc->smp.cpus;
472
+ for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
473
+ phandle_pos -= s->soc[socket].num_harts;
474
475
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
476
create_fdt_socket_plic(s, memmap, socket, phandle,
477
- intc_phandles, xplic_phandles);
478
+ &intc_phandles[phandle_pos], xplic_phandles);
479
} else {
480
- create_fdt_socket_aia(s, memmap, socket, phandle,
481
- intc_phandles, xplic_phandles);
482
+ create_fdt_socket_aplic(s, memmap, socket,
483
+ msi_m_phandle, msi_s_phandle, phandle,
484
+ &intc_phandles[phandle_pos], xplic_phandles);
485
}
486
-
487
- g_free(intc_phandles);
488
- g_free(clust_name);
489
}
490
491
+ g_free(intc_phandles);
492
+
493
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
494
if (socket == 0) {
495
*irq_mmio_phandle = xplic_phandles[socket];
496
@@ -XXX,XX +XXX,XX @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
497
}
498
499
static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
500
- uint32_t irq_pcie_phandle)
501
+ uint32_t irq_pcie_phandle,
502
+ uint32_t msi_pcie_phandle)
503
{
504
char *name;
505
MachineState *mc = MACHINE(s);
506
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
507
qemu_fdt_setprop_cells(mc->fdt, name, "bus-range", 0,
508
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
509
qemu_fdt_setprop(mc->fdt, name, "dma-coherent", NULL, 0);
510
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
511
+ qemu_fdt_setprop_cell(mc->fdt, name, "msi-parent", msi_pcie_phandle);
512
+ }
513
qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0,
514
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
515
qemu_fdt_setprop_sized_cells(mc->fdt, name, "ranges",
516
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
517
uint64_t mem_size, const char *cmdline, bool is_32_bit)
518
{
519
MachineState *mc = MACHINE(s);
520
- uint32_t phandle = 1, irq_mmio_phandle = 1;
521
+ uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
522
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
523
524
if (mc->dtb) {
525
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
526
qemu_fdt_setprop_cell(mc->fdt, "/soc", "#address-cells", 0x2);
527
528
create_fdt_sockets(s, memmap, is_32_bit, &phandle,
529
- &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle);
530
+ &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle,
531
+ &msi_pcie_phandle);
532
533
create_fdt_virtio(s, memmap, irq_virtio_phandle);
534
535
- create_fdt_pcie(s, memmap, irq_pcie_phandle);
536
+ create_fdt_pcie(s, memmap, irq_pcie_phandle, msi_pcie_phandle);
537
538
create_fdt_reset(s, memmap, &phandle);
539
540
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
541
return ret;
542
}
543
544
-static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type,
545
+static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
546
const MemMapEntry *memmap, int socket,
547
int base_hartid, int hart_count)
548
{
549
+ int i;
550
+ hwaddr addr;
551
+ uint32_t guest_bits;
552
DeviceState *aplic_m;
553
+ bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
554
+
555
+ if (msimode) {
556
+ /* Per-socket M-level IMSICs */
557
+ addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
558
+ for (i = 0; i < hart_count; i++) {
559
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
560
+ base_hartid + i, true, 1,
561
+ VIRT_IRQCHIP_NUM_MSIS);
562
+ }
563
+
564
+ /* Per-socket S-level IMSICs */
565
+ guest_bits = imsic_num_bits(aia_guests + 1);
566
+ addr = memmap[VIRT_IMSIC_S].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
567
+ for (i = 0; i < hart_count; i++) {
568
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(guest_bits),
569
+ base_hartid + i, false, 1 + aia_guests,
570
+ VIRT_IRQCHIP_NUM_MSIS);
571
+ }
572
+ }
573
574
/* Per-socket M-level APLIC */
575
aplic_m = riscv_aplic_create(
576
memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
577
memmap[VIRT_APLIC_M].size,
578
- base_hartid, hart_count,
579
+ (msimode) ? 0 : base_hartid,
580
+ (msimode) ? 0 : hart_count,
581
VIRT_IRQCHIP_NUM_SOURCES,
582
VIRT_IRQCHIP_NUM_PRIO_BITS,
583
- false, true, NULL);
584
+ msimode, true, NULL);
585
586
if (aplic_m) {
587
/* Per-socket S-level APLIC */
588
riscv_aplic_create(
589
memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
590
memmap[VIRT_APLIC_S].size,
591
- base_hartid, hart_count,
592
+ (msimode) ? 0 : base_hartid,
593
+ (msimode) ? 0 : hart_count,
594
VIRT_IRQCHIP_NUM_SOURCES,
595
VIRT_IRQCHIP_NUM_PRIO_BITS,
596
- false, false, aplic_m);
597
+ msimode, false, aplic_m);
598
}
599
600
return aplic_m;
601
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
602
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort);
603
604
if (!kvm_enabled()) {
605
- /* Per-socket CLINT */
606
- riscv_aclint_swi_create(
607
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
608
- base_hartid, hart_count, false);
609
- riscv_aclint_mtimer_create(
610
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size +
611
- RISCV_ACLINT_SWI_SIZE,
612
- RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
613
- RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
614
- RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
615
-
616
- /* Per-socket ACLINT SSWI */
617
if (s->have_aclint) {
618
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
619
+ /* Per-socket ACLINT MTIMER */
620
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
621
+ i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
622
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
623
+ base_hartid, hart_count,
624
+ RISCV_ACLINT_DEFAULT_MTIMECMP,
625
+ RISCV_ACLINT_DEFAULT_MTIME,
626
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
627
+ } else {
628
+ /* Per-socket ACLINT MSWI, MTIMER, and SSWI */
629
+ riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
630
+ i * memmap[VIRT_CLINT].size,
631
+ base_hartid, hart_count, false);
632
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
633
+ i * memmap[VIRT_CLINT].size +
634
+ RISCV_ACLINT_SWI_SIZE,
635
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
636
+ base_hartid, hart_count,
637
+ RISCV_ACLINT_DEFAULT_MTIMECMP,
638
+ RISCV_ACLINT_DEFAULT_MTIME,
639
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
640
+ riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
641
+ i * memmap[VIRT_ACLINT_SSWI].size,
642
+ base_hartid, hart_count, true);
643
+ }
644
+ } else {
645
+ /* Per-socket SiFive CLINT */
646
riscv_aclint_swi_create(
647
- memmap[VIRT_ACLINT_SSWI].base +
648
- i * memmap[VIRT_ACLINT_SSWI].size,
649
- base_hartid, hart_count, true);
650
+ memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
651
+ base_hartid, hart_count, false);
652
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
653
+ i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
654
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
655
+ RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
656
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
657
}
658
}
659
660
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
661
s->irqchip[i] = virt_create_plic(memmap, i,
662
base_hartid, hart_count);
663
} else {
664
- s->irqchip[i] = virt_create_aia(s->aia_type, memmap, i,
665
- base_hartid, hart_count);
666
+ s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
667
+ memmap, i, base_hartid,
668
+ hart_count);
669
}
670
671
/* Try to use different IRQCHIP instance based device type */
672
@@ -XXX,XX +XXX,XX @@ static void virt_machine_instance_init(Object *obj)
673
{
674
}
675
676
+static char *virt_get_aia_guests(Object *obj, Error **errp)
677
+{
678
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
679
+ char val[32];
680
+
681
+ sprintf(val, "%d", s->aia_guests);
682
+ return g_strdup(val);
683
+}
684
+
685
+static void virt_set_aia_guests(Object *obj, const char *val, Error **errp)
686
+{
687
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
688
+
689
+ s->aia_guests = atoi(val);
690
+ if (s->aia_guests < 0 || s->aia_guests > VIRT_IRQCHIP_MAX_GUESTS) {
691
+ error_setg(errp, "Invalid number of AIA IMSIC guests");
692
+ error_append_hint(errp, "Valid values be between 0 and %d.\n",
693
+ VIRT_IRQCHIP_MAX_GUESTS);
694
+ }
695
+}
696
+
697
static char *virt_get_aia(Object *obj, Error **errp)
698
{
699
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
700
@@ -XXX,XX +XXX,XX @@ static char *virt_get_aia(Object *obj, Error **errp)
701
case VIRT_AIA_TYPE_APLIC:
702
val = "aplic";
703
break;
704
+ case VIRT_AIA_TYPE_APLIC_IMSIC:
705
+ val = "aplic-imsic";
706
+ break;
707
default:
708
val = "none";
709
break;
710
@@ -XXX,XX +XXX,XX @@ static void virt_set_aia(Object *obj, const char *val, Error **errp)
711
s->aia_type = VIRT_AIA_TYPE_NONE;
712
} else if (!strcmp(val, "aplic")) {
713
s->aia_type = VIRT_AIA_TYPE_APLIC;
714
+ } else if (!strcmp(val, "aplic-imsic")) {
715
+ s->aia_type = VIRT_AIA_TYPE_APLIC_IMSIC;
716
} else {
717
error_setg(errp, "Invalid AIA interrupt controller type");
718
- error_append_hint(errp, "Valid values are none, and aplic.\n");
719
+ error_append_hint(errp, "Valid values are none, aplic, and "
720
+ "aplic-imsic.\n");
721
}
722
}
723
724
@@ -XXX,XX +XXX,XX @@ static void virt_set_aclint(Object *obj, bool value, Error **errp)
725
726
static void virt_machine_class_init(ObjectClass *oc, void *data)
727
{
728
+ char str[128];
729
MachineClass *mc = MACHINE_CLASS(oc);
730
731
mc->desc = "RISC-V VirtIO board";
732
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
733
object_class_property_set_description(oc, "aia",
734
"Set type of AIA interrupt "
735
"conttoller. Valid values are "
736
- "none, and aplic.");
737
+ "none, aplic, and aplic-imsic.");
738
+
739
+ object_class_property_add_str(oc, "aia-guests",
740
+ virt_get_aia_guests,
741
+ virt_set_aia_guests);
742
+ sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value "
743
+ "should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS);
744
+ object_class_property_set_description(oc, "aia-guests", str);
745
}
746
747
static const TypeInfo virt_machine_typeinfo = {
748
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
749
index XXXXXXX..XXXXXXX 100644
750
--- a/hw/riscv/Kconfig
751
+++ b/hw/riscv/Kconfig
752
@@ -XXX,XX +XXX,XX @@ config RISCV_VIRT
753
select SERIAL
754
select RISCV_ACLINT
755
select RISCV_APLIC
756
+ select RISCV_IMSIC
757
select SIFIVE_PLIC
758
select SIFIVE_TEST
759
select VIRTIO_MMIO
760
--
761
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <anup.patel@wdc.com>
2
1
3
We have two new machine options "aia" and "aia-guests" available
4
for the RISC-V virt machine so let's document these options.
5
6
Signed-off-by: Anup Patel <anup.patel@wdc.com>
7
Signed-off-by: Anup Patel <anup@brainfault.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Frank Chang <frank.chang@sifive.com>
10
Message-Id: <20220220085526.808674-5-anup@brainfault.org>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
docs/system/riscv/virt.rst | 16 ++++++++++++++++
14
1 file changed, 16 insertions(+)
15
16
diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst
17
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/riscv/virt.rst
19
+++ b/docs/system/riscv/virt.rst
20
@@ -XXX,XX +XXX,XX @@ The following machine-specific options are supported:
21
When this option is "on", ACLINT devices will be emulated instead of
22
SiFive CLINT. When not specified, this option is assumed to be "off".
23
24
+- aia=[none|aplic|aplic-imsic]
25
+
26
+ This option allows selecting interrupt controller defined by the AIA
27
+ (advanced interrupt architecture) specification. The "aia=aplic" selects
28
+ APLIC (advanced platform level interrupt controller) to handle wired
29
+ interrupts whereas the "aia=aplic-imsic" selects APLIC and IMSIC (incoming
30
+ message signaled interrupt controller) to handle both wired interrupts and
31
+ MSIs. When not specified, this option is assumed to be "none" which selects
32
+ SiFive PLIC to handle wired interrupts.
33
+
34
+- aia-guests=nnn
35
+
36
+ The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest
37
+ having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified,
38
+ the default number of per-HART VS-level AIA IMSIC pages is 0.
39
+
40
Running Linux kernel
41
--------------------
42
43
--
44
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <anup.patel@wdc.com>
2
1
3
To facilitate software development of RISC-V systems with large number
4
of HARTs, we increase the maximum number of allowed CPUs to 512 (2^9).
5
6
We also add a detailed source level comments about limit defines which
7
impact the physical address space utilization.
8
9
Signed-off-by: Anup Patel <anup.patel@wdc.com>
10
Signed-off-by: Anup Patel <anup@brainfault.org>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Frank Chang <frank.chang@sifive.com>
13
Message-Id: <20220220085526.808674-6-anup@brainfault.org>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
include/hw/riscv/virt.h | 2 +-
17
hw/riscv/virt.c | 10 ++++++++++
18
2 files changed, 11 insertions(+), 1 deletion(-)
19
20
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/riscv/virt.h
23
+++ b/include/hw/riscv/virt.h
24
@@ -XXX,XX +XXX,XX @@
25
#include "hw/block/flash.h"
26
#include "qom/object.h"
27
28
-#define VIRT_CPUS_MAX_BITS 3
29
+#define VIRT_CPUS_MAX_BITS 9
30
#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
31
#define VIRT_SOCKETS_MAX_BITS 2
32
#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
33
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/riscv/virt.c
36
+++ b/hw/riscv/virt.c
37
@@ -XXX,XX +XXX,XX @@
38
#include "hw/pci-host/gpex.h"
39
#include "hw/display/ramfb.h"
40
41
+/*
42
+ * The virt machine physical address space used by some of the devices
43
+ * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
44
+ * number of CPUs, and number of IMSIC guest files.
45
+ *
46
+ * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
47
+ * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
48
+ * of virt machine physical address space.
49
+ */
50
+
51
#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
52
#if VIRT_IMSIC_GROUP_MAX_SIZE < \
53
IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
54
--
55
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
2
1
3
This patch updates the SPI_DEVICE, SPI_HOST0, SPI_HOST1
4
base addresses. Also adds these as unimplemented devices.
5
6
The address references can be found [1].
7
8
[1] https://github.com/lowRISC/opentitan/blob/6c317992fbd646818b34f2a2dbf44bc850e461e4/hw/top_earlgrey/sw/autogen/top_earlgrey_memory.h#L107
9
10
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
13
Message-Id: <20220218063839.405082-1-alistair.francis@opensource.wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
include/hw/riscv/opentitan.h | 4 +++-
17
hw/riscv/opentitan.c | 12 +++++++++---
18
2 files changed, 12 insertions(+), 4 deletions(-)
19
20
diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/riscv/opentitan.h
23
+++ b/include/hw/riscv/opentitan.h
24
@@ -XXX,XX +XXX,XX @@ enum {
25
IBEX_DEV_FLASH,
26
IBEX_DEV_FLASH_VIRTUAL,
27
IBEX_DEV_UART,
28
+ IBEX_DEV_SPI_DEVICE,
29
+ IBEX_DEV_SPI_HOST0,
30
+ IBEX_DEV_SPI_HOST1,
31
IBEX_DEV_GPIO,
32
- IBEX_DEV_SPI,
33
IBEX_DEV_I2C,
34
IBEX_DEV_PATTGEN,
35
IBEX_DEV_TIMER,
36
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/hw/riscv/opentitan.c
39
+++ b/hw/riscv/opentitan.c
40
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry ibex_memmap[] = {
41
[IBEX_DEV_FLASH] = { 0x20000000, 0x80000 },
42
[IBEX_DEV_UART] = { 0x40000000, 0x1000 },
43
[IBEX_DEV_GPIO] = { 0x40040000, 0x1000 },
44
- [IBEX_DEV_SPI] = { 0x40050000, 0x1000 },
45
+ [IBEX_DEV_SPI_DEVICE] = { 0x40050000, 0x1000 },
46
[IBEX_DEV_I2C] = { 0x40080000, 0x1000 },
47
[IBEX_DEV_PATTGEN] = { 0x400e0000, 0x1000 },
48
[IBEX_DEV_TIMER] = { 0x40100000, 0x1000 },
49
[IBEX_DEV_SENSOR_CTRL] = { 0x40110000, 0x1000 },
50
[IBEX_DEV_OTP_CTRL] = { 0x40130000, 0x4000 },
51
[IBEX_DEV_USBDEV] = { 0x40150000, 0x1000 },
52
+ [IBEX_DEV_SPI_HOST0] = { 0x40300000, 0x1000 },
53
+ [IBEX_DEV_SPI_HOST1] = { 0x40310000, 0x1000 },
54
[IBEX_DEV_PWRMGR] = { 0x40400000, 0x1000 },
55
[IBEX_DEV_RSTMGR] = { 0x40410000, 0x1000 },
56
[IBEX_DEV_CLKMGR] = { 0x40420000, 0x1000 },
57
@@ -XXX,XX +XXX,XX @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
58
59
create_unimplemented_device("riscv.lowrisc.ibex.gpio",
60
memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size);
61
- create_unimplemented_device("riscv.lowrisc.ibex.spi",
62
- memmap[IBEX_DEV_SPI].base, memmap[IBEX_DEV_SPI].size);
63
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_device",
64
+ memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size);
65
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_host0",
66
+ memmap[IBEX_DEV_SPI_HOST0].base, memmap[IBEX_DEV_SPI_HOST0].size);
67
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_host1",
68
+ memmap[IBEX_DEV_SPI_HOST1].base, memmap[IBEX_DEV_SPI_HOST1].size);
69
create_unimplemented_device("riscv.lowrisc.ibex.i2c",
70
memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size);
71
create_unimplemented_device("riscv.lowrisc.ibex.pattgen",
72
--
73
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
1
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-Id: <20220211043920.28981-2-liweiwei@iscas.ac.cn>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/cpu.h | 4 ++++
12
target/riscv/cpu.c | 12 ++++++++++++
13
2 files changed, 16 insertions(+)
14
15
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.h
18
+++ b/target/riscv/cpu.h
19
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
20
bool ext_svinval;
21
bool ext_svnapot;
22
bool ext_svpbmt;
23
+ bool ext_zdinx;
24
bool ext_zfh;
25
bool ext_zfhmin;
26
+ bool ext_zfinx;
27
+ bool ext_zhinx;
28
+ bool ext_zhinxmin;
29
bool ext_zve32f;
30
bool ext_zve64f;
31
32
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu.c
35
+++ b/target/riscv/cpu.c
36
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
37
cpu->cfg.ext_d = true;
38
}
39
40
+ if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
41
+ cpu->cfg.ext_zhinxmin) {
42
+ cpu->cfg.ext_zfinx = true;
43
+ }
44
+
45
/* Set the ISA extensions, checks should have happened above */
46
if (cpu->cfg.ext_i) {
47
ext |= RVI;
48
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
49
if (cpu->cfg.ext_j) {
50
ext |= RVJ;
51
}
52
+ if (cpu->cfg.ext_zfinx && ((ext & (RVF | RVD)) || cpu->cfg.ext_zfh ||
53
+ cpu->cfg.ext_zfhmin)) {
54
+ error_setg(errp,
55
+ "'Zfinx' cannot be supported together with 'F', 'D', 'Zfh',"
56
+ " 'Zfhmin'");
57
+ return;
58
+ }
59
60
set_misa(env, env->misa_mxl, ext);
61
}
62
--
63
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
1
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-Id: <20220211043920.28981-3-liweiwei@iscas.ac.cn>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/cpu_helper.c | 6 +++++-
11
target/riscv/csr.c | 25 ++++++++++++++++++++-----
12
target/riscv/translate.c | 4 ++++
13
3 files changed, 29 insertions(+), 6 deletions(-)
14
15
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_helper.c
18
+++ b/target/riscv/cpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_vector_enabled(CPURISCVState *env)
20
21
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
22
{
23
- uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
24
+ uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
25
MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
26
MSTATUS64_UXL | MSTATUS_VS;
27
+
28
+ if (riscv_has_ext(env, RVF)) {
29
+ mstatus_mask |= MSTATUS_FS;
30
+ }
31
bool current_virt = riscv_cpu_virt_enabled(env);
32
33
g_assert(riscv_has_ext(env, RVH));
34
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/csr.c
37
+++ b/target/riscv/csr.c
38
@@ -XXX,XX +XXX,XX @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
39
static RISCVException fs(CPURISCVState *env, int csrno)
40
{
41
#if !defined(CONFIG_USER_ONLY)
42
- if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
43
+ if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
44
+ !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
45
return RISCV_EXCP_ILLEGAL_INST;
46
}
47
#endif
48
@@ -XXX,XX +XXX,XX @@ static RISCVException write_fflags(CPURISCVState *env, int csrno,
49
target_ulong val)
50
{
51
#if !defined(CONFIG_USER_ONLY)
52
- env->mstatus |= MSTATUS_FS;
53
+ if (riscv_has_ext(env, RVF)) {
54
+ env->mstatus |= MSTATUS_FS;
55
+ }
56
#endif
57
riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
58
return RISCV_EXCP_NONE;
59
@@ -XXX,XX +XXX,XX @@ static RISCVException write_frm(CPURISCVState *env, int csrno,
60
target_ulong val)
61
{
62
#if !defined(CONFIG_USER_ONLY)
63
- env->mstatus |= MSTATUS_FS;
64
+ if (riscv_has_ext(env, RVF)) {
65
+ env->mstatus |= MSTATUS_FS;
66
+ }
67
#endif
68
env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
69
return RISCV_EXCP_NONE;
70
@@ -XXX,XX +XXX,XX @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno,
71
target_ulong val)
72
{
73
#if !defined(CONFIG_USER_ONLY)
74
- env->mstatus |= MSTATUS_FS;
75
+ if (riscv_has_ext(env, RVF)) {
76
+ env->mstatus |= MSTATUS_FS;
77
+ }
78
#endif
79
env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
80
riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
81
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
82
tlb_flush(env_cpu(env));
83
}
84
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
85
- MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
86
+ MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
87
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
88
MSTATUS_TW | MSTATUS_VS;
89
90
+ if (riscv_has_ext(env, RVF)) {
91
+ mask |= MSTATUS_FS;
92
+ }
93
+
94
if (xl != MXL_RV32 || env->debugger) {
95
/*
96
* RV32: MPV and GVA are not in mstatus. The current plan is to
97
@@ -XXX,XX +XXX,XX @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
98
return RISCV_EXCP_NONE;
99
}
100
101
+ if (!(val & RVF)) {
102
+ env->mstatus &= ~MSTATUS_FS;
103
+ }
104
+
105
/* flush translation cache */
106
tb_flush(env_cpu(env));
107
env->misa_ext = val;
108
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/riscv/translate.c
111
+++ b/target/riscv/translate.c
112
@@ -XXX,XX +XXX,XX @@ static void mark_fs_dirty(DisasContext *ctx)
113
{
114
TCGv tmp;
115
116
+ if (!has_ext(ctx, RVF)) {
117
+ return;
118
+ }
119
+
120
if (ctx->mstatus_fs != MSTATUS_FS) {
121
/* Remember the state change for the rest of the TB. */
122
ctx->mstatus_fs = MSTATUS_FS;
123
--
124
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
1
3
- update extension check REQUIRE_ZFINX_OR_F
4
- update single float point register read/write
5
- disable nanbox_s check
6
7
Co-authored-by: ardxwe <ardxwe@gmail.com>
8
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-Id: <20220211043920.28981-4-liweiwei@iscas.ac.cn>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/helper.h | 2 +-
16
target/riscv/internals.h | 16 +-
17
target/riscv/fpu_helper.c | 89 +++----
18
target/riscv/translate.c | 93 ++++++-
19
target/riscv/insn_trans/trans_rvf.c.inc | 314 ++++++++++++++++--------
20
5 files changed, 369 insertions(+), 145 deletions(-)
21
22
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/helper.h
25
+++ b/target/riscv/helper.h
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
27
DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
28
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
29
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
30
-DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
31
+DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64)
32
33
/* Floating Point - Double Precision */
34
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
35
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/internals.h
38
+++ b/target/riscv/internals.h
39
@@ -XXX,XX +XXX,XX @@ enum {
40
RISCV_FRM_ROD = 8, /* Round to Odd */
41
};
42
43
-static inline uint64_t nanbox_s(float32 f)
44
+static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
45
{
46
- return f | MAKE_64BIT_MASK(32, 32);
47
+ /* the value is sign-extended instead of NaN-boxing for zfinx */
48
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
49
+ return (int32_t)f;
50
+ } else {
51
+ return f | MAKE_64BIT_MASK(32, 32);
52
+ }
53
}
54
55
-static inline float32 check_nanbox_s(uint64_t f)
56
+static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
57
{
58
+ /* Disable NaN-boxing check when enable zfinx */
59
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
60
+ return (uint32_t)f;
61
+ }
62
+
63
uint64_t mask = MAKE_64BIT_MASK(32, 32);
64
65
if (likely((f & mask) == mask)) {
66
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/fpu_helper.c
69
+++ b/target/riscv/fpu_helper.c
70
@@ -XXX,XX +XXX,XX @@ static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
71
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
72
uint64_t rs3, int flags)
73
{
74
- float32 frs1 = check_nanbox_s(rs1);
75
- float32 frs2 = check_nanbox_s(rs2);
76
- float32 frs3 = check_nanbox_s(rs3);
77
- return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status));
78
+ float32 frs1 = check_nanbox_s(env, rs1);
79
+ float32 frs2 = check_nanbox_s(env, rs2);
80
+ float32 frs3 = check_nanbox_s(env, rs3);
81
+ return nanbox_s(env, float32_muladd(frs1, frs2, frs3, flags,
82
+ &env->fp_status));
83
}
84
85
uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
86
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
87
88
uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
89
{
90
- float32 frs1 = check_nanbox_s(rs1);
91
- float32 frs2 = check_nanbox_s(rs2);
92
- return nanbox_s(float32_add(frs1, frs2, &env->fp_status));
93
+ float32 frs1 = check_nanbox_s(env, rs1);
94
+ float32 frs2 = check_nanbox_s(env, rs2);
95
+ return nanbox_s(env, float32_add(frs1, frs2, &env->fp_status));
96
}
97
98
uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
99
{
100
- float32 frs1 = check_nanbox_s(rs1);
101
- float32 frs2 = check_nanbox_s(rs2);
102
- return nanbox_s(float32_sub(frs1, frs2, &env->fp_status));
103
+ float32 frs1 = check_nanbox_s(env, rs1);
104
+ float32 frs2 = check_nanbox_s(env, rs2);
105
+ return nanbox_s(env, float32_sub(frs1, frs2, &env->fp_status));
106
}
107
108
uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
109
{
110
- float32 frs1 = check_nanbox_s(rs1);
111
- float32 frs2 = check_nanbox_s(rs2);
112
- return nanbox_s(float32_mul(frs1, frs2, &env->fp_status));
113
+ float32 frs1 = check_nanbox_s(env, rs1);
114
+ float32 frs2 = check_nanbox_s(env, rs2);
115
+ return nanbox_s(env, float32_mul(frs1, frs2, &env->fp_status));
116
}
117
118
uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
119
{
120
- float32 frs1 = check_nanbox_s(rs1);
121
- float32 frs2 = check_nanbox_s(rs2);
122
- return nanbox_s(float32_div(frs1, frs2, &env->fp_status));
123
+ float32 frs1 = check_nanbox_s(env, rs1);
124
+ float32 frs2 = check_nanbox_s(env, rs2);
125
+ return nanbox_s(env, float32_div(frs1, frs2, &env->fp_status));
126
}
127
128
uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
129
{
130
- float32 frs1 = check_nanbox_s(rs1);
131
- float32 frs2 = check_nanbox_s(rs2);
132
- return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
133
+ float32 frs1 = check_nanbox_s(env, rs1);
134
+ float32 frs2 = check_nanbox_s(env, rs2);
135
+ return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
136
float32_minnum(frs1, frs2, &env->fp_status) :
137
float32_minimum_number(frs1, frs2, &env->fp_status));
138
}
139
140
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
141
{
142
- float32 frs1 = check_nanbox_s(rs1);
143
- float32 frs2 = check_nanbox_s(rs2);
144
- return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
145
+ float32 frs1 = check_nanbox_s(env, rs1);
146
+ float32 frs2 = check_nanbox_s(env, rs2);
147
+ return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
148
float32_maxnum(frs1, frs2, &env->fp_status) :
149
float32_maximum_number(frs1, frs2, &env->fp_status));
150
}
151
152
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
153
{
154
- float32 frs1 = check_nanbox_s(rs1);
155
- return nanbox_s(float32_sqrt(frs1, &env->fp_status));
156
+ float32 frs1 = check_nanbox_s(env, rs1);
157
+ return nanbox_s(env, float32_sqrt(frs1, &env->fp_status));
158
}
159
160
target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
161
{
162
- float32 frs1 = check_nanbox_s(rs1);
163
- float32 frs2 = check_nanbox_s(rs2);
164
+ float32 frs1 = check_nanbox_s(env, rs1);
165
+ float32 frs2 = check_nanbox_s(env, rs2);
166
return float32_le(frs1, frs2, &env->fp_status);
167
}
168
169
target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
170
{
171
- float32 frs1 = check_nanbox_s(rs1);
172
- float32 frs2 = check_nanbox_s(rs2);
173
+ float32 frs1 = check_nanbox_s(env, rs1);
174
+ float32 frs2 = check_nanbox_s(env, rs2);
175
return float32_lt(frs1, frs2, &env->fp_status);
176
}
177
178
target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
179
{
180
- float32 frs1 = check_nanbox_s(rs1);
181
- float32 frs2 = check_nanbox_s(rs2);
182
+ float32 frs1 = check_nanbox_s(env, rs1);
183
+ float32 frs2 = check_nanbox_s(env, rs2);
184
return float32_eq_quiet(frs1, frs2, &env->fp_status);
185
}
186
187
target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1)
188
{
189
- float32 frs1 = check_nanbox_s(rs1);
190
+ float32 frs1 = check_nanbox_s(env, rs1);
191
return float32_to_int32(frs1, &env->fp_status);
192
}
193
194
target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1)
195
{
196
- float32 frs1 = check_nanbox_s(rs1);
197
+ float32 frs1 = check_nanbox_s(env, rs1);
198
return (int32_t)float32_to_uint32(frs1, &env->fp_status);
199
}
200
201
target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1)
202
{
203
- float32 frs1 = check_nanbox_s(rs1);
204
+ float32 frs1 = check_nanbox_s(env, rs1);
205
return float32_to_int64(frs1, &env->fp_status);
206
}
207
208
target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1)
209
{
210
- float32 frs1 = check_nanbox_s(rs1);
211
+ float32 frs1 = check_nanbox_s(env, rs1);
212
return float32_to_uint64(frs1, &env->fp_status);
213
}
214
215
uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
216
{
217
- return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status));
218
+ return nanbox_s(env, int32_to_float32((int32_t)rs1, &env->fp_status));
219
}
220
221
uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
222
{
223
- return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status));
224
+ return nanbox_s(env, uint32_to_float32((uint32_t)rs1, &env->fp_status));
225
}
226
227
uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1)
228
{
229
- return nanbox_s(int64_to_float32(rs1, &env->fp_status));
230
+ return nanbox_s(env, int64_to_float32(rs1, &env->fp_status));
231
}
232
233
uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1)
234
{
235
- return nanbox_s(uint64_to_float32(rs1, &env->fp_status));
236
+ return nanbox_s(env, uint64_to_float32(rs1, &env->fp_status));
237
}
238
239
-target_ulong helper_fclass_s(uint64_t rs1)
240
+target_ulong helper_fclass_s(CPURISCVState *env, uint64_t rs1)
241
{
242
- float32 frs1 = check_nanbox_s(rs1);
243
+ float32 frs1 = check_nanbox_s(env, rs1);
244
return fclass_s(frs1);
245
}
246
247
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
248
249
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
250
{
251
- return nanbox_s(float64_to_float32(rs1, &env->fp_status));
252
+ return nanbox_s(env, float64_to_float32(rs1, &env->fp_status));
253
}
254
255
uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
256
{
257
- float32 frs1 = check_nanbox_s(rs1);
258
+ float32 frs1 = check_nanbox_s(env, rs1);
259
return float32_to_float64(frs1, &env->fp_status);
260
}
261
262
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
263
264
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
265
{
266
- float32 frs1 = check_nanbox_s(rs1);
267
+ float32 frs1 = check_nanbox_s(env, rs1);
268
return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
269
}
270
271
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
272
{
273
float16 frs1 = check_nanbox_h(rs1);
274
- return nanbox_s(float16_to_float32(frs1, true, &env->fp_status));
275
+ return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status));
276
}
277
278
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
279
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
280
index XXXXXXX..XXXXXXX 100644
281
--- a/target/riscv/translate.c
282
+++ b/target/riscv/translate.c
283
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
284
TCGv zero;
285
/* Space for 3 operands plus 1 extra for address computation. */
286
TCGv temp[4];
287
+ /* Space for 4 operands(1 dest and <=3 src) for float point computation */
288
+ TCGv_i64 ftemp[4];
289
+ uint8_t nftemp;
290
/* PointerMasking extension */
291
bool pm_mask_enabled;
292
bool pm_base_enabled;
293
@@ -XXX,XX +XXX,XX @@ static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
294
}
295
}
296
297
+static TCGv_i64 ftemp_new(DisasContext *ctx)
298
+{
299
+ assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp));
300
+ return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64();
301
+}
302
+
303
+static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
304
+{
305
+ if (!ctx->cfg_ptr->ext_zfinx) {
306
+ return cpu_fpr[reg_num];
307
+ }
308
+
309
+ if (reg_num == 0) {
310
+ return tcg_constant_i64(0);
311
+ }
312
+ switch (get_xl(ctx)) {
313
+ case MXL_RV32:
314
+#ifdef TARGET_RISCV32
315
+ {
316
+ TCGv_i64 t = ftemp_new(ctx);
317
+ tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]);
318
+ return t;
319
+ }
320
+#else
321
+ /* fall through */
322
+ case MXL_RV64:
323
+ return cpu_gpr[reg_num];
324
+#endif
325
+ default:
326
+ g_assert_not_reached();
327
+ }
328
+}
329
+
330
+static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
331
+{
332
+ if (!ctx->cfg_ptr->ext_zfinx) {
333
+ return cpu_fpr[reg_num];
334
+ }
335
+
336
+ if (reg_num == 0) {
337
+ return ftemp_new(ctx);
338
+ }
339
+
340
+ switch (get_xl(ctx)) {
341
+ case MXL_RV32:
342
+ return ftemp_new(ctx);
343
+#ifdef TARGET_RISCV64
344
+ case MXL_RV64:
345
+ return cpu_gpr[reg_num];
346
+#endif
347
+ default:
348
+ g_assert_not_reached();
349
+ }
350
+}
351
+
352
+/* assume t is nanboxing (for normal) or sign-extended (for zfinx) */
353
+static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
354
+{
355
+ if (!ctx->cfg_ptr->ext_zfinx) {
356
+ tcg_gen_mov_i64(cpu_fpr[reg_num], t);
357
+ return;
358
+ }
359
+ if (reg_num != 0) {
360
+ switch (get_xl(ctx)) {
361
+ case MXL_RV32:
362
+#ifdef TARGET_RISCV32
363
+ tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t);
364
+ break;
365
+#else
366
+ /* fall through */
367
+ case MXL_RV64:
368
+ tcg_gen_mov_i64(cpu_gpr[reg_num], t);
369
+ break;
370
+#endif
371
+ default:
372
+ g_assert_not_reached();
373
+ }
374
+ }
375
+}
376
+
377
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
378
{
379
target_ulong next_pc;
380
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
381
ctx->cs = cs;
382
ctx->ntemp = 0;
383
memset(ctx->temp, 0, sizeof(ctx->temp));
384
+ ctx->nftemp = 0;
385
+ memset(ctx->ftemp, 0, sizeof(ctx->ftemp));
386
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
387
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
388
ctx->zero = tcg_constant_tl(0);
389
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
390
DisasContext *ctx = container_of(dcbase, DisasContext, base);
391
CPURISCVState *env = cpu->env_ptr;
392
uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
393
+ int i;
394
395
ctx->ol = ctx->xl;
396
decode_opc(env, ctx, opcode16);
397
ctx->base.pc_next = ctx->pc_succ_insn;
398
399
- for (int i = ctx->ntemp - 1; i >= 0; --i) {
400
+ for (i = ctx->ntemp - 1; i >= 0; --i) {
401
tcg_temp_free(ctx->temp[i]);
402
ctx->temp[i] = NULL;
403
}
404
ctx->ntemp = 0;
405
+ for (i = ctx->nftemp - 1; i >= 0; --i) {
406
+ tcg_temp_free_i64(ctx->ftemp[i]);
407
+ ctx->ftemp[i] = NULL;
408
+ }
409
+ ctx->nftemp = 0;
410
411
if (ctx->base.is_jmp == DISAS_NEXT) {
412
target_ulong page_start;
413
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
414
index XXXXXXX..XXXXXXX 100644
415
--- a/target/riscv/insn_trans/trans_rvf.c.inc
416
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
417
@@ -XXX,XX +XXX,XX @@
418
419
#define REQUIRE_FPU do {\
420
if (ctx->mstatus_fs == 0) \
421
- return false; \
422
+ if (!ctx->cfg_ptr->ext_zfinx) \
423
+ return false; \
424
+} while (0)
425
+
426
+#define REQUIRE_ZFINX_OR_F(ctx) do {\
427
+ if (!ctx->cfg_ptr->ext_zfinx) { \
428
+ REQUIRE_EXT(ctx, RVF); \
429
+ } \
430
} while (0)
431
432
static bool trans_flw(DisasContext *ctx, arg_flw *a)
433
@@ -XXX,XX +XXX,XX @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
434
static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
435
{
436
REQUIRE_FPU;
437
- REQUIRE_EXT(ctx, RVF);
438
+ REQUIRE_ZFINX_OR_F(ctx);
439
+
440
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
441
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
442
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
443
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
444
+
445
gen_set_rm(ctx, a->rm);
446
- gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
447
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
448
+ gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3);
449
+ gen_set_fpr_hs(ctx, a->rd, dest);
450
mark_fs_dirty(ctx);
451
return true;
452
}
453
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
454
static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
455
{
456
REQUIRE_FPU;
457
- REQUIRE_EXT(ctx, RVF);
458
+ REQUIRE_ZFINX_OR_F(ctx);
459
+
460
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
461
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
462
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
463
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
464
+
465
gen_set_rm(ctx, a->rm);
466
- gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
467
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
468
+ gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3);
469
+ gen_set_fpr_hs(ctx, a->rd, dest);
470
mark_fs_dirty(ctx);
471
return true;
472
}
473
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
474
static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
475
{
476
REQUIRE_FPU;
477
- REQUIRE_EXT(ctx, RVF);
478
+ REQUIRE_ZFINX_OR_F(ctx);
479
+
480
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
481
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
482
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
483
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
484
+
485
gen_set_rm(ctx, a->rm);
486
- gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
487
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
488
+ gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3);
489
+ gen_set_fpr_hs(ctx, a->rd, dest);
490
mark_fs_dirty(ctx);
491
return true;
492
}
493
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
494
static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
495
{
496
REQUIRE_FPU;
497
- REQUIRE_EXT(ctx, RVF);
498
+ REQUIRE_ZFINX_OR_F(ctx);
499
+
500
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
501
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
502
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
503
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
504
+
505
gen_set_rm(ctx, a->rm);
506
- gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
507
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
508
+ gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3);
509
+ gen_set_fpr_hs(ctx, a->rd, dest);
510
mark_fs_dirty(ctx);
511
return true;
512
}
513
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
514
static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
515
{
516
REQUIRE_FPU;
517
- REQUIRE_EXT(ctx, RVF);
518
+ REQUIRE_ZFINX_OR_F(ctx);
519
+
520
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
521
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
522
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
523
524
gen_set_rm(ctx, a->rm);
525
- gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env,
526
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
527
+ gen_helper_fadd_s(dest, cpu_env, src1, src2);
528
+ gen_set_fpr_hs(ctx, a->rd, dest);
529
mark_fs_dirty(ctx);
530
return true;
531
}
532
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
533
static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
534
{
535
REQUIRE_FPU;
536
- REQUIRE_EXT(ctx, RVF);
537
+ REQUIRE_ZFINX_OR_F(ctx);
538
+
539
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
540
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
541
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
542
543
gen_set_rm(ctx, a->rm);
544
- gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env,
545
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
546
+ gen_helper_fsub_s(dest, cpu_env, src1, src2);
547
+ gen_set_fpr_hs(ctx, a->rd, dest);
548
mark_fs_dirty(ctx);
549
return true;
550
}
551
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
552
static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
553
{
554
REQUIRE_FPU;
555
- REQUIRE_EXT(ctx, RVF);
556
+ REQUIRE_ZFINX_OR_F(ctx);
557
+
558
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
559
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
560
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
561
562
gen_set_rm(ctx, a->rm);
563
- gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env,
564
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
565
+ gen_helper_fmul_s(dest, cpu_env, src1, src2);
566
+ gen_set_fpr_hs(ctx, a->rd, dest);
567
mark_fs_dirty(ctx);
568
return true;
569
}
570
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
571
static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
572
{
573
REQUIRE_FPU;
574
- REQUIRE_EXT(ctx, RVF);
575
+ REQUIRE_ZFINX_OR_F(ctx);
576
+
577
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
578
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
579
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
580
581
gen_set_rm(ctx, a->rm);
582
- gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env,
583
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
584
+ gen_helper_fdiv_s(dest, cpu_env, src1, src2);
585
+ gen_set_fpr_hs(ctx, a->rd, dest);
586
mark_fs_dirty(ctx);
587
return true;
588
}
589
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
590
static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
591
{
592
REQUIRE_FPU;
593
- REQUIRE_EXT(ctx, RVF);
594
+ REQUIRE_ZFINX_OR_F(ctx);
595
+
596
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
597
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
598
599
gen_set_rm(ctx, a->rm);
600
- gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
601
+ gen_helper_fsqrt_s(dest, cpu_env, src1);
602
+ gen_set_fpr_hs(ctx, a->rd, dest);
603
mark_fs_dirty(ctx);
604
return true;
605
}
606
@@ -XXX,XX +XXX,XX @@ static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
607
static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
608
{
609
REQUIRE_FPU;
610
- REQUIRE_EXT(ctx, RVF);
611
+ REQUIRE_ZFINX_OR_F(ctx);
612
+
613
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
614
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
615
616
if (a->rs1 == a->rs2) { /* FMOV */
617
- gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
618
+ if (!ctx->cfg_ptr->ext_zfinx) {
619
+ gen_check_nanbox_s(dest, src1);
620
+ } else {
621
+ tcg_gen_ext32s_i64(dest, src1);
622
+ }
623
} else { /* FSGNJ */
624
- TCGv_i64 rs1 = tcg_temp_new_i64();
625
- TCGv_i64 rs2 = tcg_temp_new_i64();
626
-
627
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
628
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
629
-
630
- /* This formulation retains the nanboxing of rs2. */
631
- tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31);
632
- tcg_temp_free_i64(rs1);
633
- tcg_temp_free_i64(rs2);
634
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
635
+
636
+ if (!ctx->cfg_ptr->ext_zfinx) {
637
+ TCGv_i64 rs1 = tcg_temp_new_i64();
638
+ TCGv_i64 rs2 = tcg_temp_new_i64();
639
+ gen_check_nanbox_s(rs1, src1);
640
+ gen_check_nanbox_s(rs2, src2);
641
+
642
+ /* This formulation retains the nanboxing of rs2 in normal 'F'. */
643
+ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31);
644
+
645
+ tcg_temp_free_i64(rs1);
646
+ tcg_temp_free_i64(rs2);
647
+ } else {
648
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 31);
649
+ tcg_gen_ext32s_i64(dest, dest);
650
+ }
651
}
652
+ gen_set_fpr_hs(ctx, a->rd, dest);
653
mark_fs_dirty(ctx);
654
return true;
655
}
656
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
657
TCGv_i64 rs1, rs2, mask;
658
659
REQUIRE_FPU;
660
- REQUIRE_EXT(ctx, RVF);
661
+ REQUIRE_ZFINX_OR_F(ctx);
662
663
- rs1 = tcg_temp_new_i64();
664
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
665
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
666
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
667
668
+ rs1 = tcg_temp_new_i64();
669
+ if (!ctx->cfg_ptr->ext_zfinx) {
670
+ gen_check_nanbox_s(rs1, src1);
671
+ } else {
672
+ tcg_gen_mov_i64(rs1, src1);
673
+ }
674
if (a->rs1 == a->rs2) { /* FNEG */
675
- tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1));
676
+ tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1));
677
} else {
678
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
679
rs2 = tcg_temp_new_i64();
680
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
681
+ if (!ctx->cfg_ptr->ext_zfinx) {
682
+ gen_check_nanbox_s(rs2, src2);
683
+ } else {
684
+ tcg_gen_mov_i64(rs2, src2);
685
+ }
686
687
/*
688
* Replace bit 31 in rs1 with inverse in rs2.
689
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
690
*/
691
mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
692
tcg_gen_nor_i64(rs2, rs2, mask);
693
- tcg_gen_and_i64(rs1, mask, rs1);
694
- tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
695
+ tcg_gen_and_i64(dest, mask, rs1);
696
+ tcg_gen_or_i64(dest, dest, rs2);
697
698
tcg_temp_free_i64(rs2);
699
}
700
+ /* signed-extended intead of nanboxing for result if enable zfinx */
701
+ if (ctx->cfg_ptr->ext_zfinx) {
702
+ tcg_gen_ext32s_i64(dest, dest);
703
+ }
704
+ gen_set_fpr_hs(ctx, a->rd, dest);
705
tcg_temp_free_i64(rs1);
706
-
707
mark_fs_dirty(ctx);
708
return true;
709
}
710
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
711
TCGv_i64 rs1, rs2;
712
713
REQUIRE_FPU;
714
- REQUIRE_EXT(ctx, RVF);
715
+ REQUIRE_ZFINX_OR_F(ctx);
716
717
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
718
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
719
rs1 = tcg_temp_new_i64();
720
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
721
+
722
+ if (!ctx->cfg_ptr->ext_zfinx) {
723
+ gen_check_nanbox_s(rs1, src1);
724
+ } else {
725
+ tcg_gen_mov_i64(rs1, src1);
726
+ }
727
728
if (a->rs1 == a->rs2) { /* FABS */
729
- tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1));
730
+ tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1));
731
} else {
732
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
733
rs2 = tcg_temp_new_i64();
734
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
735
+
736
+ if (!ctx->cfg_ptr->ext_zfinx) {
737
+ gen_check_nanbox_s(rs2, src2);
738
+ } else {
739
+ tcg_gen_mov_i64(rs2, src2);
740
+ }
741
742
/*
743
* Xor bit 31 in rs1 with that in rs2.
744
* This formulation retains the nanboxing of rs1.
745
*/
746
- tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1));
747
- tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
748
+ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1));
749
+ tcg_gen_xor_i64(dest, rs1, dest);
750
751
tcg_temp_free_i64(rs2);
752
}
753
+ /* signed-extended intead of nanboxing for result if enable zfinx */
754
+ if (ctx->cfg_ptr->ext_zfinx) {
755
+ tcg_gen_ext32s_i64(dest, dest);
756
+ }
757
tcg_temp_free_i64(rs1);
758
-
759
+ gen_set_fpr_hs(ctx, a->rd, dest);
760
mark_fs_dirty(ctx);
761
return true;
762
}
763
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
764
static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
765
{
766
REQUIRE_FPU;
767
- REQUIRE_EXT(ctx, RVF);
768
+ REQUIRE_ZFINX_OR_F(ctx);
769
+
770
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
771
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
772
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
773
774
- gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
775
- cpu_fpr[a->rs2]);
776
+ gen_helper_fmin_s(dest, cpu_env, src1, src2);
777
+ gen_set_fpr_hs(ctx, a->rd, dest);
778
mark_fs_dirty(ctx);
779
return true;
780
}
781
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
782
static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
783
{
784
REQUIRE_FPU;
785
- REQUIRE_EXT(ctx, RVF);
786
+ REQUIRE_ZFINX_OR_F(ctx);
787
+
788
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
789
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
790
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
791
792
- gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
793
- cpu_fpr[a->rs2]);
794
+ gen_helper_fmax_s(dest, cpu_env, src1, src2);
795
+ gen_set_fpr_hs(ctx, a->rd, dest);
796
mark_fs_dirty(ctx);
797
return true;
798
}
799
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
800
static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
801
{
802
REQUIRE_FPU;
803
- REQUIRE_EXT(ctx, RVF);
804
+ REQUIRE_ZFINX_OR_F(ctx);
805
806
TCGv dest = dest_gpr(ctx, a->rd);
807
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
808
809
gen_set_rm(ctx, a->rm);
810
- gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]);
811
+ gen_helper_fcvt_w_s(dest, cpu_env, src1);
812
gen_set_gpr(ctx, a->rd, dest);
813
return true;
814
}
815
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
816
static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
817
{
818
REQUIRE_FPU;
819
- REQUIRE_EXT(ctx, RVF);
820
+ REQUIRE_ZFINX_OR_F(ctx);
821
822
TCGv dest = dest_gpr(ctx, a->rd);
823
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
824
825
gen_set_rm(ctx, a->rm);
826
- gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]);
827
+ gen_helper_fcvt_wu_s(dest, cpu_env, src1);
828
gen_set_gpr(ctx, a->rd, dest);
829
return true;
830
}
831
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
832
{
833
/* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
834
REQUIRE_FPU;
835
- REQUIRE_EXT(ctx, RVF);
836
+ REQUIRE_ZFINX_OR_F(ctx);
837
838
TCGv dest = dest_gpr(ctx, a->rd);
839
-
840
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
841
#if defined(TARGET_RISCV64)
842
- tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]);
843
+ tcg_gen_ext32s_tl(dest, src1);
844
#else
845
- tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
846
+ tcg_gen_extrl_i64_i32(dest, src1);
847
#endif
848
849
gen_set_gpr(ctx, a->rd, dest);
850
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
851
static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
852
{
853
REQUIRE_FPU;
854
- REQUIRE_EXT(ctx, RVF);
855
+ REQUIRE_ZFINX_OR_F(ctx);
856
857
TCGv dest = dest_gpr(ctx, a->rd);
858
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
859
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
860
861
- gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
862
+ gen_helper_feq_s(dest, cpu_env, src1, src2);
863
gen_set_gpr(ctx, a->rd, dest);
864
return true;
865
}
866
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
867
static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
868
{
869
REQUIRE_FPU;
870
- REQUIRE_EXT(ctx, RVF);
871
+ REQUIRE_ZFINX_OR_F(ctx);
872
873
TCGv dest = dest_gpr(ctx, a->rd);
874
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
875
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
876
877
- gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
878
+ gen_helper_flt_s(dest, cpu_env, src1, src2);
879
gen_set_gpr(ctx, a->rd, dest);
880
return true;
881
}
882
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
883
static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
884
{
885
REQUIRE_FPU;
886
- REQUIRE_EXT(ctx, RVF);
887
+ REQUIRE_ZFINX_OR_F(ctx);
888
889
TCGv dest = dest_gpr(ctx, a->rd);
890
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
891
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
892
893
- gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
894
+ gen_helper_fle_s(dest, cpu_env, src1, src2);
895
gen_set_gpr(ctx, a->rd, dest);
896
return true;
897
}
898
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
899
static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
900
{
901
REQUIRE_FPU;
902
- REQUIRE_EXT(ctx, RVF);
903
+ REQUIRE_ZFINX_OR_F(ctx);
904
905
TCGv dest = dest_gpr(ctx, a->rd);
906
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
907
908
- gen_helper_fclass_s(dest, cpu_fpr[a->rs1]);
909
+ gen_helper_fclass_s(dest, cpu_env, src1);
910
gen_set_gpr(ctx, a->rd, dest);
911
return true;
912
}
913
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
914
static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
915
{
916
REQUIRE_FPU;
917
- REQUIRE_EXT(ctx, RVF);
918
+ REQUIRE_ZFINX_OR_F(ctx);
919
920
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
921
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
922
923
gen_set_rm(ctx, a->rm);
924
- gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src);
925
-
926
+ gen_helper_fcvt_s_w(dest, cpu_env, src);
927
+ gen_set_fpr_hs(ctx, a->rd, dest);
928
mark_fs_dirty(ctx);
929
return true;
930
}
931
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
932
static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
933
{
934
REQUIRE_FPU;
935
- REQUIRE_EXT(ctx, RVF);
936
+ REQUIRE_ZFINX_OR_F(ctx);
937
938
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
939
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
940
941
gen_set_rm(ctx, a->rm);
942
- gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src);
943
-
944
+ gen_helper_fcvt_s_wu(dest, cpu_env, src);
945
+ gen_set_fpr_hs(ctx, a->rd, dest);
946
mark_fs_dirty(ctx);
947
return true;
948
}
949
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
950
{
951
/* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
952
REQUIRE_FPU;
953
- REQUIRE_EXT(ctx, RVF);
954
+ REQUIRE_ZFINX_OR_F(ctx);
955
956
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
957
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
958
959
- tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src);
960
- gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
961
-
962
+ tcg_gen_extu_tl_i64(dest, src);
963
+ gen_nanbox_s(dest, dest);
964
+ gen_set_fpr_hs(ctx, a->rd, dest);
965
mark_fs_dirty(ctx);
966
return true;
967
}
968
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
969
{
970
REQUIRE_64BIT(ctx);
971
REQUIRE_FPU;
972
- REQUIRE_EXT(ctx, RVF);
973
+ REQUIRE_ZFINX_OR_F(ctx);
974
975
TCGv dest = dest_gpr(ctx, a->rd);
976
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
977
978
gen_set_rm(ctx, a->rm);
979
- gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]);
980
+ gen_helper_fcvt_l_s(dest, cpu_env, src1);
981
gen_set_gpr(ctx, a->rd, dest);
982
return true;
983
}
984
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
985
{
986
REQUIRE_64BIT(ctx);
987
REQUIRE_FPU;
988
- REQUIRE_EXT(ctx, RVF);
989
+ REQUIRE_ZFINX_OR_F(ctx);
990
991
TCGv dest = dest_gpr(ctx, a->rd);
992
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
993
994
gen_set_rm(ctx, a->rm);
995
- gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]);
996
+ gen_helper_fcvt_lu_s(dest, cpu_env, src1);
997
gen_set_gpr(ctx, a->rd, dest);
998
return true;
999
}
1000
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
1001
{
1002
REQUIRE_64BIT(ctx);
1003
REQUIRE_FPU;
1004
- REQUIRE_EXT(ctx, RVF);
1005
+ REQUIRE_ZFINX_OR_F(ctx);
1006
1007
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
1008
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
1009
1010
gen_set_rm(ctx, a->rm);
1011
- gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src);
1012
-
1013
+ gen_helper_fcvt_s_l(dest, cpu_env, src);
1014
+ gen_set_fpr_hs(ctx, a->rd, dest);
1015
mark_fs_dirty(ctx);
1016
return true;
1017
}
1018
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
1019
{
1020
REQUIRE_64BIT(ctx);
1021
REQUIRE_FPU;
1022
- REQUIRE_EXT(ctx, RVF);
1023
+ REQUIRE_ZFINX_OR_F(ctx);
1024
1025
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
1026
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
1027
1028
gen_set_rm(ctx, a->rm);
1029
- gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src);
1030
-
1031
+ gen_helper_fcvt_s_lu(dest, cpu_env, src);
1032
+ gen_set_fpr_hs(ctx, a->rd, dest);
1033
mark_fs_dirty(ctx);
1034
return true;
1035
}
1036
--
1037
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
1
3
-- update extension check REQUIRE_ZDINX_OR_D
4
-- update double float point register read/write
5
6
Co-authored-by: ardxwe <ardxwe@gmail.com>
7
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-Id: <20220211043920.28981-5-liweiwei@iscas.ac.cn>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/translate.c | 52 +++++
15
target/riscv/insn_trans/trans_rvd.c.inc | 285 +++++++++++++++++-------
16
2 files changed, 259 insertions(+), 78 deletions(-)
17
18
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/translate.c
21
+++ b/target/riscv/translate.c
22
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
23
}
24
}
25
26
+static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num)
27
+{
28
+ if (!ctx->cfg_ptr->ext_zfinx) {
29
+ return cpu_fpr[reg_num];
30
+ }
31
+
32
+ if (reg_num == 0) {
33
+ return tcg_constant_i64(0);
34
+ }
35
+ switch (get_xl(ctx)) {
36
+ case MXL_RV32:
37
+ {
38
+ TCGv_i64 t = ftemp_new(ctx);
39
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
40
+ return t;
41
+ }
42
+#ifdef TARGET_RISCV64
43
+ case MXL_RV64:
44
+ return cpu_gpr[reg_num];
45
+#endif
46
+ default:
47
+ g_assert_not_reached();
48
+ }
49
+}
50
+
51
static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
52
{
53
if (!ctx->cfg_ptr->ext_zfinx) {
54
@@ -XXX,XX +XXX,XX @@ static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
55
}
56
}
57
58
+static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
59
+{
60
+ if (!ctx->cfg_ptr->ext_zfinx) {
61
+ tcg_gen_mov_i64(cpu_fpr[reg_num], t);
62
+ return;
63
+ }
64
+
65
+ if (reg_num != 0) {
66
+ switch (get_xl(ctx)) {
67
+ case MXL_RV32:
68
+#ifdef TARGET_RISCV32
69
+ tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
70
+ break;
71
+#else
72
+ tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
73
+ tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
74
+ break;
75
+ case MXL_RV64:
76
+ tcg_gen_mov_i64(cpu_gpr[reg_num], t);
77
+ break;
78
+#endif
79
+ default:
80
+ g_assert_not_reached();
81
+ }
82
+ }
83
+}
84
+
85
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
86
{
87
target_ulong next_pc;
88
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/riscv/insn_trans/trans_rvd.c.inc
91
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
92
@@ -XXX,XX +XXX,XX @@
93
* this program. If not, see <http://www.gnu.org/licenses/>.
94
*/
95
96
+#define REQUIRE_ZDINX_OR_D(ctx) do { \
97
+ if (!ctx->cfg_ptr->ext_zdinx) { \
98
+ REQUIRE_EXT(ctx, RVD); \
99
+ } \
100
+} while (0)
101
+
102
+#define REQUIRE_EVEN(ctx, reg) do { \
103
+ if (ctx->cfg_ptr->ext_zdinx && (get_xl(ctx) == MXL_RV32) && \
104
+ ((reg) & 0x1)) { \
105
+ return false; \
106
+ } \
107
+} while (0)
108
+
109
static bool trans_fld(DisasContext *ctx, arg_fld *a)
110
{
111
TCGv addr;
112
@@ -XXX,XX +XXX,XX @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
113
static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
114
{
115
REQUIRE_FPU;
116
- REQUIRE_EXT(ctx, RVD);
117
+ REQUIRE_ZDINX_OR_D(ctx);
118
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
119
+
120
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
121
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
122
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
123
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
124
+
125
gen_set_rm(ctx, a->rm);
126
- gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
127
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
128
+ gen_helper_fmadd_d(dest, cpu_env, src1, src2, src3);
129
+ gen_set_fpr_d(ctx, a->rd, dest);
130
mark_fs_dirty(ctx);
131
return true;
132
}
133
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
134
static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
135
{
136
REQUIRE_FPU;
137
- REQUIRE_EXT(ctx, RVD);
138
+ REQUIRE_ZDINX_OR_D(ctx);
139
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
140
+
141
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
142
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
143
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
144
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
145
+
146
gen_set_rm(ctx, a->rm);
147
- gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
148
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
149
+ gen_helper_fmsub_d(dest, cpu_env, src1, src2, src3);
150
+ gen_set_fpr_d(ctx, a->rd, dest);
151
mark_fs_dirty(ctx);
152
return true;
153
}
154
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
155
static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
156
{
157
REQUIRE_FPU;
158
- REQUIRE_EXT(ctx, RVD);
159
+ REQUIRE_ZDINX_OR_D(ctx);
160
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
161
+
162
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
163
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
164
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
165
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
166
+
167
gen_set_rm(ctx, a->rm);
168
- gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
169
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
170
+ gen_helper_fnmsub_d(dest, cpu_env, src1, src2, src3);
171
+ gen_set_fpr_d(ctx, a->rd, dest);
172
mark_fs_dirty(ctx);
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
176
static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
177
{
178
REQUIRE_FPU;
179
- REQUIRE_EXT(ctx, RVD);
180
+ REQUIRE_ZDINX_OR_D(ctx);
181
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
182
+
183
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
184
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
185
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
186
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
187
+
188
gen_set_rm(ctx, a->rm);
189
- gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
190
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
191
+ gen_helper_fnmadd_d(dest, cpu_env, src1, src2, src3);
192
+ gen_set_fpr_d(ctx, a->rd, dest);
193
mark_fs_dirty(ctx);
194
return true;
195
}
196
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
197
static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
198
{
199
REQUIRE_FPU;
200
- REQUIRE_EXT(ctx, RVD);
201
+ REQUIRE_ZDINX_OR_D(ctx);
202
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
203
204
- gen_set_rm(ctx, a->rm);
205
- gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
206
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
207
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
208
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
209
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
210
211
+ gen_set_rm(ctx, a->rm);
212
+ gen_helper_fadd_d(dest, cpu_env, src1, src2);
213
+ gen_set_fpr_d(ctx, a->rd, dest);
214
mark_fs_dirty(ctx);
215
return true;
216
}
217
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
218
static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
219
{
220
REQUIRE_FPU;
221
- REQUIRE_EXT(ctx, RVD);
222
+ REQUIRE_ZDINX_OR_D(ctx);
223
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
224
225
- gen_set_rm(ctx, a->rm);
226
- gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
227
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
228
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
229
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
230
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
231
232
+ gen_set_rm(ctx, a->rm);
233
+ gen_helper_fsub_d(dest, cpu_env, src1, src2);
234
+ gen_set_fpr_d(ctx, a->rd, dest);
235
mark_fs_dirty(ctx);
236
return true;
237
}
238
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
239
static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
240
{
241
REQUIRE_FPU;
242
- REQUIRE_EXT(ctx, RVD);
243
+ REQUIRE_ZDINX_OR_D(ctx);
244
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
245
246
- gen_set_rm(ctx, a->rm);
247
- gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
248
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
249
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
250
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
251
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
252
253
+ gen_set_rm(ctx, a->rm);
254
+ gen_helper_fmul_d(dest, cpu_env, src1, src2);
255
+ gen_set_fpr_d(ctx, a->rd, dest);
256
mark_fs_dirty(ctx);
257
return true;
258
}
259
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
260
static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
261
{
262
REQUIRE_FPU;
263
- REQUIRE_EXT(ctx, RVD);
264
+ REQUIRE_ZDINX_OR_D(ctx);
265
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
266
267
- gen_set_rm(ctx, a->rm);
268
- gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
269
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
270
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
271
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
272
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
273
274
+ gen_set_rm(ctx, a->rm);
275
+ gen_helper_fdiv_d(dest, cpu_env, src1, src2);
276
+ gen_set_fpr_d(ctx, a->rd, dest);
277
mark_fs_dirty(ctx);
278
return true;
279
}
280
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
281
static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
282
{
283
REQUIRE_FPU;
284
- REQUIRE_EXT(ctx, RVD);
285
+ REQUIRE_ZDINX_OR_D(ctx);
286
+ REQUIRE_EVEN(ctx, a->rd | a->rs1);
287
288
- gen_set_rm(ctx, a->rm);
289
- gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
290
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
291
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
292
293
+ gen_set_rm(ctx, a->rm);
294
+ gen_helper_fsqrt_d(dest, cpu_env, src1);
295
+ gen_set_fpr_d(ctx, a->rd, dest);
296
mark_fs_dirty(ctx);
297
return true;
298
}
299
300
static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
301
{
302
+ REQUIRE_FPU;
303
+ REQUIRE_ZDINX_OR_D(ctx);
304
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
305
+
306
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
307
if (a->rs1 == a->rs2) { /* FMOV */
308
- tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
309
+ dest = get_fpr_d(ctx, a->rs1);
310
} else {
311
- tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
312
- cpu_fpr[a->rs1], 0, 63);
313
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
314
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
315
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 63);
316
}
317
+ gen_set_fpr_d(ctx, a->rd, dest);
318
mark_fs_dirty(ctx);
319
return true;
320
}
321
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
322
static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
323
{
324
REQUIRE_FPU;
325
- REQUIRE_EXT(ctx, RVD);
326
+ REQUIRE_ZDINX_OR_D(ctx);
327
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
328
+
329
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
330
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
331
+
332
if (a->rs1 == a->rs2) { /* FNEG */
333
- tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
334
+ tcg_gen_xori_i64(dest, src1, INT64_MIN);
335
} else {
336
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
337
TCGv_i64 t0 = tcg_temp_new_i64();
338
- tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
339
- tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
340
+ tcg_gen_not_i64(t0, src2);
341
+ tcg_gen_deposit_i64(dest, t0, src1, 0, 63);
342
tcg_temp_free_i64(t0);
343
}
344
+ gen_set_fpr_d(ctx, a->rd, dest);
345
mark_fs_dirty(ctx);
346
return true;
347
}
348
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
349
static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
350
{
351
REQUIRE_FPU;
352
- REQUIRE_EXT(ctx, RVD);
353
+ REQUIRE_ZDINX_OR_D(ctx);
354
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
355
+
356
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
357
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
358
+
359
if (a->rs1 == a->rs2) { /* FABS */
360
- tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
361
+ tcg_gen_andi_i64(dest, src1, ~INT64_MIN);
362
} else {
363
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
364
TCGv_i64 t0 = tcg_temp_new_i64();
365
- tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
366
- tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
367
+ tcg_gen_andi_i64(t0, src2, INT64_MIN);
368
+ tcg_gen_xor_i64(dest, src1, t0);
369
tcg_temp_free_i64(t0);
370
}
371
+ gen_set_fpr_d(ctx, a->rd, dest);
372
mark_fs_dirty(ctx);
373
return true;
374
}
375
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
376
static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
377
{
378
REQUIRE_FPU;
379
- REQUIRE_EXT(ctx, RVD);
380
+ REQUIRE_ZDINX_OR_D(ctx);
381
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
382
383
- gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
384
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
385
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
386
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
387
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
388
389
+ gen_helper_fmin_d(dest, cpu_env, src1, src2);
390
+ gen_set_fpr_d(ctx, a->rd, dest);
391
mark_fs_dirty(ctx);
392
return true;
393
}
394
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
395
static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
396
{
397
REQUIRE_FPU;
398
- REQUIRE_EXT(ctx, RVD);
399
+ REQUIRE_ZDINX_OR_D(ctx);
400
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
401
402
- gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
403
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
404
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
405
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
406
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
407
408
+ gen_helper_fmax_d(dest, cpu_env, src1, src2);
409
+ gen_set_fpr_d(ctx, a->rd, dest);
410
mark_fs_dirty(ctx);
411
return true;
412
}
413
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
414
static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
415
{
416
REQUIRE_FPU;
417
- REQUIRE_EXT(ctx, RVD);
418
+ REQUIRE_ZDINX_OR_D(ctx);
419
+ REQUIRE_EVEN(ctx, a->rs1);
420
421
- gen_set_rm(ctx, a->rm);
422
- gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
423
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
424
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
425
426
+ gen_set_rm(ctx, a->rm);
427
+ gen_helper_fcvt_s_d(dest, cpu_env, src1);
428
+ gen_set_fpr_hs(ctx, a->rd, dest);
429
mark_fs_dirty(ctx);
430
return true;
431
}
432
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
433
static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
434
{
435
REQUIRE_FPU;
436
- REQUIRE_EXT(ctx, RVD);
437
+ REQUIRE_ZDINX_OR_D(ctx);
438
+ REQUIRE_EVEN(ctx, a->rd);
439
440
- gen_set_rm(ctx, a->rm);
441
- gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
442
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
443
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
444
445
+ gen_set_rm(ctx, a->rm);
446
+ gen_helper_fcvt_d_s(dest, cpu_env, src1);
447
+ gen_set_fpr_d(ctx, a->rd, dest);
448
mark_fs_dirty(ctx);
449
return true;
450
}
451
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
452
static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
453
{
454
REQUIRE_FPU;
455
- REQUIRE_EXT(ctx, RVD);
456
+ REQUIRE_ZDINX_OR_D(ctx);
457
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
458
459
TCGv dest = dest_gpr(ctx, a->rd);
460
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
461
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
462
463
- gen_helper_feq_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
464
+ gen_helper_feq_d(dest, cpu_env, src1, src2);
465
gen_set_gpr(ctx, a->rd, dest);
466
return true;
467
}
468
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
469
static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
470
{
471
REQUIRE_FPU;
472
- REQUIRE_EXT(ctx, RVD);
473
+ REQUIRE_ZDINX_OR_D(ctx);
474
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
475
476
TCGv dest = dest_gpr(ctx, a->rd);
477
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
478
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
479
480
- gen_helper_flt_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
481
+ gen_helper_flt_d(dest, cpu_env, src1, src2);
482
gen_set_gpr(ctx, a->rd, dest);
483
return true;
484
}
485
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
486
static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
487
{
488
REQUIRE_FPU;
489
- REQUIRE_EXT(ctx, RVD);
490
+ REQUIRE_ZDINX_OR_D(ctx);
491
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
492
493
TCGv dest = dest_gpr(ctx, a->rd);
494
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
495
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
496
497
- gen_helper_fle_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
498
+ gen_helper_fle_d(dest, cpu_env, src1, src2);
499
gen_set_gpr(ctx, a->rd, dest);
500
return true;
501
}
502
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
503
static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
504
{
505
REQUIRE_FPU;
506
- REQUIRE_EXT(ctx, RVD);
507
+ REQUIRE_ZDINX_OR_D(ctx);
508
+ REQUIRE_EVEN(ctx, a->rs1);
509
510
TCGv dest = dest_gpr(ctx, a->rd);
511
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
512
513
- gen_helper_fclass_d(dest, cpu_fpr[a->rs1]);
514
+ gen_helper_fclass_d(dest, src1);
515
gen_set_gpr(ctx, a->rd, dest);
516
return true;
517
}
518
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
519
static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
520
{
521
REQUIRE_FPU;
522
- REQUIRE_EXT(ctx, RVD);
523
+ REQUIRE_ZDINX_OR_D(ctx);
524
+ REQUIRE_EVEN(ctx, a->rs1);
525
526
TCGv dest = dest_gpr(ctx, a->rd);
527
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
528
529
gen_set_rm(ctx, a->rm);
530
- gen_helper_fcvt_w_d(dest, cpu_env, cpu_fpr[a->rs1]);
531
+ gen_helper_fcvt_w_d(dest, cpu_env, src1);
532
gen_set_gpr(ctx, a->rd, dest);
533
return true;
534
}
535
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
536
static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
537
{
538
REQUIRE_FPU;
539
- REQUIRE_EXT(ctx, RVD);
540
+ REQUIRE_ZDINX_OR_D(ctx);
541
+ REQUIRE_EVEN(ctx, a->rs1);
542
543
TCGv dest = dest_gpr(ctx, a->rd);
544
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
545
546
gen_set_rm(ctx, a->rm);
547
- gen_helper_fcvt_wu_d(dest, cpu_env, cpu_fpr[a->rs1]);
548
+ gen_helper_fcvt_wu_d(dest, cpu_env, src1);
549
gen_set_gpr(ctx, a->rd, dest);
550
return true;
551
}
552
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
553
static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
554
{
555
REQUIRE_FPU;
556
- REQUIRE_EXT(ctx, RVD);
557
+ REQUIRE_ZDINX_OR_D(ctx);
558
+ REQUIRE_EVEN(ctx, a->rd);
559
560
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
561
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
562
563
gen_set_rm(ctx, a->rm);
564
- gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, src);
565
+ gen_helper_fcvt_d_w(dest, cpu_env, src);
566
+ gen_set_fpr_d(ctx, a->rd, dest);
567
568
mark_fs_dirty(ctx);
569
return true;
570
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
571
static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
572
{
573
REQUIRE_FPU;
574
- REQUIRE_EXT(ctx, RVD);
575
+ REQUIRE_ZDINX_OR_D(ctx);
576
+ REQUIRE_EVEN(ctx, a->rd);
577
578
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
579
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
580
581
gen_set_rm(ctx, a->rm);
582
- gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, src);
583
+ gen_helper_fcvt_d_wu(dest, cpu_env, src);
584
+ gen_set_fpr_d(ctx, a->rd, dest);
585
586
mark_fs_dirty(ctx);
587
return true;
588
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
589
{
590
REQUIRE_64BIT(ctx);
591
REQUIRE_FPU;
592
- REQUIRE_EXT(ctx, RVD);
593
+ REQUIRE_ZDINX_OR_D(ctx);
594
+ REQUIRE_EVEN(ctx, a->rs1);
595
596
TCGv dest = dest_gpr(ctx, a->rd);
597
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
598
599
gen_set_rm(ctx, a->rm);
600
- gen_helper_fcvt_l_d(dest, cpu_env, cpu_fpr[a->rs1]);
601
+ gen_helper_fcvt_l_d(dest, cpu_env, src1);
602
gen_set_gpr(ctx, a->rd, dest);
603
return true;
604
}
605
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
606
{
607
REQUIRE_64BIT(ctx);
608
REQUIRE_FPU;
609
- REQUIRE_EXT(ctx, RVD);
610
+ REQUIRE_ZDINX_OR_D(ctx);
611
+ REQUIRE_EVEN(ctx, a->rs1);
612
613
TCGv dest = dest_gpr(ctx, a->rd);
614
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
615
616
gen_set_rm(ctx, a->rm);
617
- gen_helper_fcvt_lu_d(dest, cpu_env, cpu_fpr[a->rs1]);
618
+ gen_helper_fcvt_lu_d(dest, cpu_env, src1);
619
gen_set_gpr(ctx, a->rd, dest);
620
return true;
621
}
622
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
623
{
624
REQUIRE_64BIT(ctx);
625
REQUIRE_FPU;
626
- REQUIRE_EXT(ctx, RVD);
627
+ REQUIRE_ZDINX_OR_D(ctx);
628
+ REQUIRE_EVEN(ctx, a->rd);
629
630
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
631
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
632
633
gen_set_rm(ctx, a->rm);
634
- gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, src);
635
+ gen_helper_fcvt_d_l(dest, cpu_env, src);
636
+ gen_set_fpr_d(ctx, a->rd, dest);
637
638
mark_fs_dirty(ctx);
639
return true;
640
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
641
{
642
REQUIRE_64BIT(ctx);
643
REQUIRE_FPU;
644
- REQUIRE_EXT(ctx, RVD);
645
+ REQUIRE_ZDINX_OR_D(ctx);
646
+ REQUIRE_EVEN(ctx, a->rd);
647
648
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
649
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
650
651
gen_set_rm(ctx, a->rm);
652
- gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, src);
653
+ gen_helper_fcvt_d_lu(dest, cpu_env, src);
654
+ gen_set_fpr_d(ctx, a->rd, dest);
655
656
mark_fs_dirty(ctx);
657
return true;
658
--
659
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
1
3
- update extension check REQUIRE_ZHINX_OR_ZFH and REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN
4
- update half float point register read/write
5
- disable nanbox_h check
6
7
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-Id: <20220211043920.28981-6-liweiwei@iscas.ac.cn>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/helper.h | 2 +-
15
target/riscv/internals.h | 16 +-
16
target/riscv/fpu_helper.c | 89 +++---
17
target/riscv/insn_trans/trans_rvzfh.c.inc | 332 +++++++++++++++-------
18
4 files changed, 296 insertions(+), 143 deletions(-)
19
20
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/helper.h
23
+++ b/target/riscv/helper.h
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl)
25
DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
26
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
27
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
28
-DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
29
+DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
30
31
/* Special functions */
32
DEF_HELPER_2(csrr, tl, env, int)
33
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/internals.h
36
+++ b/target/riscv/internals.h
37
@@ -XXX,XX +XXX,XX @@ static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
38
}
39
}
40
41
-static inline uint64_t nanbox_h(float16 f)
42
+static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
43
{
44
- return f | MAKE_64BIT_MASK(16, 48);
45
+ /* the value is sign-extended instead of NaN-boxing for zfinx */
46
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
47
+ return (int16_t)f;
48
+ } else {
49
+ return f | MAKE_64BIT_MASK(16, 48);
50
+ }
51
}
52
53
-static inline float16 check_nanbox_h(uint64_t f)
54
+static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
55
{
56
+ /* Disable nanbox check when enable zfinx */
57
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
58
+ return (uint16_t)f;
59
+ }
60
+
61
uint64_t mask = MAKE_64BIT_MASK(16, 48);
62
63
if (likely((f & mask) == mask)) {
64
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/riscv/fpu_helper.c
67
+++ b/target/riscv/fpu_helper.c
68
@@ -XXX,XX +XXX,XX @@ void helper_set_rod_rounding_mode(CPURISCVState *env)
69
static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
70
uint64_t rs3, int flags)
71
{
72
- float16 frs1 = check_nanbox_h(rs1);
73
- float16 frs2 = check_nanbox_h(rs2);
74
- float16 frs3 = check_nanbox_h(rs3);
75
- return nanbox_h(float16_muladd(frs1, frs2, frs3, flags, &env->fp_status));
76
+ float16 frs1 = check_nanbox_h(env, rs1);
77
+ float16 frs2 = check_nanbox_h(env, rs2);
78
+ float16 frs3 = check_nanbox_h(env, rs3);
79
+ return nanbox_h(env, float16_muladd(frs1, frs2, frs3, flags,
80
+ &env->fp_status));
81
}
82
83
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
84
@@ -XXX,XX +XXX,XX @@ target_ulong helper_fclass_d(uint64_t frs1)
85
86
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
87
{
88
- float16 frs1 = check_nanbox_h(rs1);
89
- float16 frs2 = check_nanbox_h(rs2);
90
- return nanbox_h(float16_add(frs1, frs2, &env->fp_status));
91
+ float16 frs1 = check_nanbox_h(env, rs1);
92
+ float16 frs2 = check_nanbox_h(env, rs2);
93
+ return nanbox_h(env, float16_add(frs1, frs2, &env->fp_status));
94
}
95
96
uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
97
{
98
- float16 frs1 = check_nanbox_h(rs1);
99
- float16 frs2 = check_nanbox_h(rs2);
100
- return nanbox_h(float16_sub(frs1, frs2, &env->fp_status));
101
+ float16 frs1 = check_nanbox_h(env, rs1);
102
+ float16 frs2 = check_nanbox_h(env, rs2);
103
+ return nanbox_h(env, float16_sub(frs1, frs2, &env->fp_status));
104
}
105
106
uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
107
{
108
- float16 frs1 = check_nanbox_h(rs1);
109
- float16 frs2 = check_nanbox_h(rs2);
110
- return nanbox_h(float16_mul(frs1, frs2, &env->fp_status));
111
+ float16 frs1 = check_nanbox_h(env, rs1);
112
+ float16 frs2 = check_nanbox_h(env, rs2);
113
+ return nanbox_h(env, float16_mul(frs1, frs2, &env->fp_status));
114
}
115
116
uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
117
{
118
- float16 frs1 = check_nanbox_h(rs1);
119
- float16 frs2 = check_nanbox_h(rs2);
120
- return nanbox_h(float16_div(frs1, frs2, &env->fp_status));
121
+ float16 frs1 = check_nanbox_h(env, rs1);
122
+ float16 frs2 = check_nanbox_h(env, rs2);
123
+ return nanbox_h(env, float16_div(frs1, frs2, &env->fp_status));
124
}
125
126
uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
127
{
128
- float16 frs1 = check_nanbox_h(rs1);
129
- float16 frs2 = check_nanbox_h(rs2);
130
- return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
131
+ float16 frs1 = check_nanbox_h(env, rs1);
132
+ float16 frs2 = check_nanbox_h(env, rs2);
133
+ return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
134
float16_minnum(frs1, frs2, &env->fp_status) :
135
float16_minimum_number(frs1, frs2, &env->fp_status));
136
}
137
138
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
139
{
140
- float16 frs1 = check_nanbox_h(rs1);
141
- float16 frs2 = check_nanbox_h(rs2);
142
- return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
143
+ float16 frs1 = check_nanbox_h(env, rs1);
144
+ float16 frs2 = check_nanbox_h(env, rs2);
145
+ return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
146
float16_maxnum(frs1, frs2, &env->fp_status) :
147
float16_maximum_number(frs1, frs2, &env->fp_status));
148
}
149
150
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
151
{
152
- float16 frs1 = check_nanbox_h(rs1);
153
- return nanbox_h(float16_sqrt(frs1, &env->fp_status));
154
+ float16 frs1 = check_nanbox_h(env, rs1);
155
+ return nanbox_h(env, float16_sqrt(frs1, &env->fp_status));
156
}
157
158
target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
159
{
160
- float16 frs1 = check_nanbox_h(rs1);
161
- float16 frs2 = check_nanbox_h(rs2);
162
+ float16 frs1 = check_nanbox_h(env, rs1);
163
+ float16 frs2 = check_nanbox_h(env, rs2);
164
return float16_le(frs1, frs2, &env->fp_status);
165
}
166
167
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
168
{
169
- float16 frs1 = check_nanbox_h(rs1);
170
- float16 frs2 = check_nanbox_h(rs2);
171
+ float16 frs1 = check_nanbox_h(env, rs1);
172
+ float16 frs2 = check_nanbox_h(env, rs2);
173
return float16_lt(frs1, frs2, &env->fp_status);
174
}
175
176
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
177
{
178
- float16 frs1 = check_nanbox_h(rs1);
179
- float16 frs2 = check_nanbox_h(rs2);
180
+ float16 frs1 = check_nanbox_h(env, rs1);
181
+ float16 frs2 = check_nanbox_h(env, rs2);
182
return float16_eq_quiet(frs1, frs2, &env->fp_status);
183
}
184
185
-target_ulong helper_fclass_h(uint64_t rs1)
186
+target_ulong helper_fclass_h(CPURISCVState *env, uint64_t rs1)
187
{
188
- float16 frs1 = check_nanbox_h(rs1);
189
+ float16 frs1 = check_nanbox_h(env, rs1);
190
return fclass_h(frs1);
191
}
192
193
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
194
{
195
- float16 frs1 = check_nanbox_h(rs1);
196
+ float16 frs1 = check_nanbox_h(env, rs1);
197
return float16_to_int32(frs1, &env->fp_status);
198
}
199
200
target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1)
201
{
202
- float16 frs1 = check_nanbox_h(rs1);
203
+ float16 frs1 = check_nanbox_h(env, rs1);
204
return (int32_t)float16_to_uint32(frs1, &env->fp_status);
205
}
206
207
target_ulong helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1)
208
{
209
- float16 frs1 = check_nanbox_h(rs1);
210
+ float16 frs1 = check_nanbox_h(env, rs1);
211
return float16_to_int64(frs1, &env->fp_status);
212
}
213
214
target_ulong helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1)
215
{
216
- float16 frs1 = check_nanbox_h(rs1);
217
+ float16 frs1 = check_nanbox_h(env, rs1);
218
return float16_to_uint64(frs1, &env->fp_status);
219
}
220
221
uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1)
222
{
223
- return nanbox_h(int32_to_float16((int32_t)rs1, &env->fp_status));
224
+ return nanbox_h(env, int32_to_float16((int32_t)rs1, &env->fp_status));
225
}
226
227
uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1)
228
{
229
- return nanbox_h(uint32_to_float16((uint32_t)rs1, &env->fp_status));
230
+ return nanbox_h(env, uint32_to_float16((uint32_t)rs1, &env->fp_status));
231
}
232
233
uint64_t helper_fcvt_h_l(CPURISCVState *env, target_ulong rs1)
234
{
235
- return nanbox_h(int64_to_float16(rs1, &env->fp_status));
236
+ return nanbox_h(env, int64_to_float16(rs1, &env->fp_status));
237
}
238
239
uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
240
{
241
- return nanbox_h(uint64_to_float16(rs1, &env->fp_status));
242
+ return nanbox_h(env, uint64_to_float16(rs1, &env->fp_status));
243
}
244
245
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
246
{
247
float32 frs1 = check_nanbox_s(env, rs1);
248
- return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
249
+ return nanbox_h(env, float32_to_float16(frs1, true, &env->fp_status));
250
}
251
252
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
253
{
254
- float16 frs1 = check_nanbox_h(rs1);
255
+ float16 frs1 = check_nanbox_h(env, rs1);
256
return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status));
257
}
258
259
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
260
{
261
- return nanbox_h(float64_to_float16(rs1, true, &env->fp_status));
262
+ return nanbox_h(env, float64_to_float16(rs1, true, &env->fp_status));
263
}
264
265
uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
266
{
267
- float16 frs1 = check_nanbox_h(rs1);
268
+ float16 frs1 = check_nanbox_h(env, rs1);
269
return float16_to_float64(frs1, true, &env->fp_status);
270
}
271
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
272
index XXXXXXX..XXXXXXX 100644
273
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
274
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
275
@@ -XXX,XX +XXX,XX @@
276
} \
277
} while (0)
278
279
+#define REQUIRE_ZHINX_OR_ZFH(ctx) do { \
280
+ if (!ctx->cfg_ptr->ext_zhinx && !ctx->cfg_ptr->ext_zfh) { \
281
+ return false; \
282
+ } \
283
+} while (0)
284
+
285
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
286
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
287
return false; \
288
} \
289
} while (0)
290
291
+#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \
292
+ if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \
293
+ ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \
294
+ return false; \
295
+ } \
296
+} while (0)
297
+
298
static bool trans_flh(DisasContext *ctx, arg_flh *a)
299
{
300
TCGv_i64 dest;
301
@@ -XXX,XX +XXX,XX @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
302
static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
303
{
304
REQUIRE_FPU;
305
- REQUIRE_ZFH(ctx);
306
+ REQUIRE_ZHINX_OR_ZFH(ctx);
307
+
308
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
309
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
310
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
311
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
312
313
gen_set_rm(ctx, a->rm);
314
- gen_helper_fmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
315
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
316
+ gen_helper_fmadd_h(dest, cpu_env, src1, src2, src3);
317
+ gen_set_fpr_hs(ctx, a->rd, dest);
318
mark_fs_dirty(ctx);
319
return true;
320
}
321
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
322
static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
323
{
324
REQUIRE_FPU;
325
- REQUIRE_ZFH(ctx);
326
+ REQUIRE_ZHINX_OR_ZFH(ctx);
327
+
328
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
329
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
330
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
331
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
332
333
gen_set_rm(ctx, a->rm);
334
- gen_helper_fmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
335
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
336
+ gen_helper_fmsub_h(dest, cpu_env, src1, src2, src3);
337
+ gen_set_fpr_hs(ctx, a->rd, dest);
338
mark_fs_dirty(ctx);
339
return true;
340
}
341
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
342
static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
343
{
344
REQUIRE_FPU;
345
- REQUIRE_ZFH(ctx);
346
+ REQUIRE_ZHINX_OR_ZFH(ctx);
347
+
348
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
349
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
350
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
351
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
352
353
gen_set_rm(ctx, a->rm);
354
- gen_helper_fnmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
355
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
356
+ gen_helper_fnmsub_h(dest, cpu_env, src1, src2, src3);
357
+ gen_set_fpr_hs(ctx, a->rd, dest);
358
mark_fs_dirty(ctx);
359
return true;
360
}
361
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
362
static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
363
{
364
REQUIRE_FPU;
365
- REQUIRE_ZFH(ctx);
366
+ REQUIRE_ZHINX_OR_ZFH(ctx);
367
+
368
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
369
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
370
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
371
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
372
373
gen_set_rm(ctx, a->rm);
374
- gen_helper_fnmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
375
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
376
+ gen_helper_fnmadd_h(dest, cpu_env, src1, src2, src3);
377
+ gen_set_fpr_hs(ctx, a->rd, dest);
378
mark_fs_dirty(ctx);
379
return true;
380
}
381
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
382
static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
383
{
384
REQUIRE_FPU;
385
- REQUIRE_ZFH(ctx);
386
+ REQUIRE_ZHINX_OR_ZFH(ctx);
387
+
388
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
389
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
390
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
391
392
gen_set_rm(ctx, a->rm);
393
- gen_helper_fadd_h(cpu_fpr[a->rd], cpu_env,
394
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
395
+ gen_helper_fadd_h(dest, cpu_env, src1, src2);
396
+ gen_set_fpr_hs(ctx, a->rd, dest);
397
mark_fs_dirty(ctx);
398
return true;
399
}
400
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
401
static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
402
{
403
REQUIRE_FPU;
404
- REQUIRE_ZFH(ctx);
405
+ REQUIRE_ZHINX_OR_ZFH(ctx);
406
+
407
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
408
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
409
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
410
411
gen_set_rm(ctx, a->rm);
412
- gen_helper_fsub_h(cpu_fpr[a->rd], cpu_env,
413
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
414
+ gen_helper_fsub_h(dest, cpu_env, src1, src2);
415
+ gen_set_fpr_hs(ctx, a->rd, dest);
416
mark_fs_dirty(ctx);
417
return true;
418
}
419
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
420
static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
421
{
422
REQUIRE_FPU;
423
- REQUIRE_ZFH(ctx);
424
+ REQUIRE_ZHINX_OR_ZFH(ctx);
425
+
426
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
427
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
428
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
429
430
gen_set_rm(ctx, a->rm);
431
- gen_helper_fmul_h(cpu_fpr[a->rd], cpu_env,
432
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
433
+ gen_helper_fmul_h(dest, cpu_env, src1, src2);
434
+ gen_set_fpr_hs(ctx, a->rd, dest);
435
mark_fs_dirty(ctx);
436
return true;
437
}
438
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
439
static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
440
{
441
REQUIRE_FPU;
442
- REQUIRE_ZFH(ctx);
443
+ REQUIRE_ZHINX_OR_ZFH(ctx);
444
+
445
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
446
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
447
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
448
449
gen_set_rm(ctx, a->rm);
450
- gen_helper_fdiv_h(cpu_fpr[a->rd], cpu_env,
451
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
452
+ gen_helper_fdiv_h(dest, cpu_env, src1, src2);
453
+ gen_set_fpr_hs(ctx, a->rd, dest);
454
mark_fs_dirty(ctx);
455
return true;
456
}
457
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
458
static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
459
{
460
REQUIRE_FPU;
461
- REQUIRE_ZFH(ctx);
462
+ REQUIRE_ZHINX_OR_ZFH(ctx);
463
+
464
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
465
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
466
467
gen_set_rm(ctx, a->rm);
468
- gen_helper_fsqrt_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
469
+ gen_helper_fsqrt_h(dest, cpu_env, src1);
470
+ gen_set_fpr_hs(ctx, a->rd, dest);
471
mark_fs_dirty(ctx);
472
return true;
473
}
474
@@ -XXX,XX +XXX,XX @@ static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
475
static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a)
476
{
477
REQUIRE_FPU;
478
- REQUIRE_ZFH(ctx);
479
+ REQUIRE_ZHINX_OR_ZFH(ctx);
480
+
481
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
482
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
483
484
if (a->rs1 == a->rs2) { /* FMOV */
485
- gen_check_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
486
+ if (!ctx->cfg_ptr->ext_zfinx) {
487
+ gen_check_nanbox_h(dest, src1);
488
+ } else {
489
+ tcg_gen_ext16s_i64(dest, src1);
490
+ }
491
} else {
492
- TCGv_i64 rs1 = tcg_temp_new_i64();
493
- TCGv_i64 rs2 = tcg_temp_new_i64();
494
-
495
- gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
496
- gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
497
-
498
- /* This formulation retains the nanboxing of rs2. */
499
- tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 15);
500
- tcg_temp_free_i64(rs1);
501
- tcg_temp_free_i64(rs2);
502
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
503
+
504
+ if (!ctx->cfg_ptr->ext_zfinx) {
505
+ TCGv_i64 rs1 = tcg_temp_new_i64();
506
+ TCGv_i64 rs2 = tcg_temp_new_i64();
507
+ gen_check_nanbox_h(rs1, src1);
508
+ gen_check_nanbox_h(rs2, src2);
509
+
510
+ /* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */
511
+ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15);
512
+
513
+ tcg_temp_free_i64(rs1);
514
+ tcg_temp_free_i64(rs2);
515
+ } else {
516
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 15);
517
+ tcg_gen_ext16s_i64(dest, dest);
518
+ }
519
}
520
-
521
+ gen_set_fpr_hs(ctx, a->rd, dest);
522
mark_fs_dirty(ctx);
523
return true;
524
}
525
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
526
TCGv_i64 rs1, rs2, mask;
527
528
REQUIRE_FPU;
529
- REQUIRE_ZFH(ctx);
530
+ REQUIRE_ZHINX_OR_ZFH(ctx);
531
+
532
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
533
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
534
535
rs1 = tcg_temp_new_i64();
536
- gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
537
+ if (!ctx->cfg_ptr->ext_zfinx) {
538
+ gen_check_nanbox_h(rs1, src1);
539
+ } else {
540
+ tcg_gen_mov_i64(rs1, src1);
541
+ }
542
543
if (a->rs1 == a->rs2) { /* FNEG */
544
- tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(15, 1));
545
+ tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(15, 1));
546
} else {
547
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
548
rs2 = tcg_temp_new_i64();
549
- gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
550
+
551
+ if (!ctx->cfg_ptr->ext_zfinx) {
552
+ gen_check_nanbox_h(rs2, src2);
553
+ } else {
554
+ tcg_gen_mov_i64(rs2, src2);
555
+ }
556
557
/*
558
* Replace bit 15 in rs1 with inverse in rs2.
559
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
560
mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1));
561
tcg_gen_not_i64(rs2, rs2);
562
tcg_gen_andc_i64(rs2, rs2, mask);
563
- tcg_gen_and_i64(rs1, mask, rs1);
564
- tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
565
+ tcg_gen_and_i64(dest, mask, rs1);
566
+ tcg_gen_or_i64(dest, dest, rs2);
567
568
tcg_temp_free_i64(mask);
569
tcg_temp_free_i64(rs2);
570
}
571
+ /* signed-extended intead of nanboxing for result if enable zfinx */
572
+ if (ctx->cfg_ptr->ext_zfinx) {
573
+ tcg_gen_ext16s_i64(dest, dest);
574
+ }
575
+ tcg_temp_free_i64(rs1);
576
mark_fs_dirty(ctx);
577
return true;
578
}
579
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
580
TCGv_i64 rs1, rs2;
581
582
REQUIRE_FPU;
583
- REQUIRE_ZFH(ctx);
584
+ REQUIRE_ZHINX_OR_ZFH(ctx);
585
+
586
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
587
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
588
589
rs1 = tcg_temp_new_i64();
590
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
591
+ if (!ctx->cfg_ptr->ext_zfinx) {
592
+ gen_check_nanbox_h(rs1, src1);
593
+ } else {
594
+ tcg_gen_mov_i64(rs1, src1);
595
+ }
596
597
if (a->rs1 == a->rs2) { /* FABS */
598
- tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(15, 1));
599
+ tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(15, 1));
600
} else {
601
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
602
rs2 = tcg_temp_new_i64();
603
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
604
+
605
+ if (!ctx->cfg_ptr->ext_zfinx) {
606
+ gen_check_nanbox_h(rs2, src2);
607
+ } else {
608
+ tcg_gen_mov_i64(rs2, src2);
609
+ }
610
611
/*
612
* Xor bit 15 in rs1 with that in rs2.
613
* This formulation retains the nanboxing of rs1.
614
*/
615
- tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(15, 1));
616
- tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
617
+ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1));
618
+ tcg_gen_xor_i64(dest, rs1, dest);
619
620
tcg_temp_free_i64(rs2);
621
}
622
-
623
+ /* signed-extended intead of nanboxing for result if enable zfinx */
624
+ if (ctx->cfg_ptr->ext_zfinx) {
625
+ tcg_gen_ext16s_i64(dest, dest);
626
+ }
627
+ tcg_temp_free_i64(rs1);
628
mark_fs_dirty(ctx);
629
return true;
630
}
631
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
632
static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
633
{
634
REQUIRE_FPU;
635
- REQUIRE_ZFH(ctx);
636
+ REQUIRE_ZHINX_OR_ZFH(ctx);
637
+
638
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
639
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
640
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
641
642
- gen_helper_fmin_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
643
- cpu_fpr[a->rs2]);
644
+ gen_helper_fmin_h(dest, cpu_env, src1, src2);
645
+ gen_set_fpr_hs(ctx, a->rd, dest);
646
mark_fs_dirty(ctx);
647
return true;
648
}
649
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
650
static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
651
{
652
REQUIRE_FPU;
653
- REQUIRE_ZFH(ctx);
654
+ REQUIRE_ZHINX_OR_ZFH(ctx);
655
656
- gen_helper_fmax_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
657
- cpu_fpr[a->rs2]);
658
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
659
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
660
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
661
+
662
+ gen_helper_fmax_h(dest, cpu_env, src1, src2);
663
+ gen_set_fpr_hs(ctx, a->rd, dest);
664
mark_fs_dirty(ctx);
665
return true;
666
}
667
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
668
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
669
{
670
REQUIRE_FPU;
671
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
672
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
673
+
674
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
675
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
676
677
gen_set_rm(ctx, a->rm);
678
- gen_helper_fcvt_s_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
679
+ gen_helper_fcvt_s_h(dest, cpu_env, src1);
680
+ gen_set_fpr_hs(ctx, a->rd, dest);
681
682
mark_fs_dirty(ctx);
683
684
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
685
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
686
{
687
REQUIRE_FPU;
688
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
689
- REQUIRE_EXT(ctx, RVD);
690
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
691
+ REQUIRE_ZDINX_OR_D(ctx);
692
+
693
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
694
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
695
696
gen_set_rm(ctx, a->rm);
697
- gen_helper_fcvt_d_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
698
+ gen_helper_fcvt_d_h(dest, cpu_env, src1);
699
+ gen_set_fpr_d(ctx, a->rd, dest);
700
701
mark_fs_dirty(ctx);
702
703
-
704
return true;
705
}
706
707
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
708
{
709
REQUIRE_FPU;
710
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
711
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
712
713
- gen_set_rm(ctx, a->rm);
714
- gen_helper_fcvt_h_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
715
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
716
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
717
718
+ gen_set_rm(ctx, a->rm);
719
+ gen_helper_fcvt_h_s(dest, cpu_env, src1);
720
+ gen_set_fpr_hs(ctx, a->rd, dest);
721
mark_fs_dirty(ctx);
722
723
return true;
724
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
725
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
726
{
727
REQUIRE_FPU;
728
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
729
- REQUIRE_EXT(ctx, RVD);
730
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
731
+ REQUIRE_ZDINX_OR_D(ctx);
732
733
- gen_set_rm(ctx, a->rm);
734
- gen_helper_fcvt_h_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
735
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
736
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
737
738
+ gen_set_rm(ctx, a->rm);
739
+ gen_helper_fcvt_h_d(dest, cpu_env, src1);
740
+ gen_set_fpr_hs(ctx, a->rd, dest);
741
mark_fs_dirty(ctx);
742
743
return true;
744
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
745
static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
746
{
747
REQUIRE_FPU;
748
- REQUIRE_ZFH(ctx);
749
+ REQUIRE_ZHINX_OR_ZFH(ctx);
750
751
TCGv dest = dest_gpr(ctx, a->rd);
752
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
753
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
754
755
- gen_helper_feq_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
756
+ gen_helper_feq_h(dest, cpu_env, src1, src2);
757
gen_set_gpr(ctx, a->rd, dest);
758
return true;
759
}
760
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
761
static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
762
{
763
REQUIRE_FPU;
764
- REQUIRE_ZFH(ctx);
765
+ REQUIRE_ZHINX_OR_ZFH(ctx);
766
767
TCGv dest = dest_gpr(ctx, a->rd);
768
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
769
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
770
771
- gen_helper_flt_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
772
+ gen_helper_flt_h(dest, cpu_env, src1, src2);
773
gen_set_gpr(ctx, a->rd, dest);
774
775
return true;
776
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
777
static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
778
{
779
REQUIRE_FPU;
780
- REQUIRE_ZFH(ctx);
781
+ REQUIRE_ZHINX_OR_ZFH(ctx);
782
783
TCGv dest = dest_gpr(ctx, a->rd);
784
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
785
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
786
787
- gen_helper_fle_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
788
+ gen_helper_fle_h(dest, cpu_env, src1, src2);
789
gen_set_gpr(ctx, a->rd, dest);
790
return true;
791
}
792
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
793
static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
794
{
795
REQUIRE_FPU;
796
- REQUIRE_ZFH(ctx);
797
+ REQUIRE_ZHINX_OR_ZFH(ctx);
798
799
TCGv dest = dest_gpr(ctx, a->rd);
800
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
801
802
- gen_helper_fclass_h(dest, cpu_fpr[a->rs1]);
803
+ gen_helper_fclass_h(dest, cpu_env, src1);
804
gen_set_gpr(ctx, a->rd, dest);
805
return true;
806
}
807
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
808
static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
809
{
810
REQUIRE_FPU;
811
- REQUIRE_ZFH(ctx);
812
+ REQUIRE_ZHINX_OR_ZFH(ctx);
813
814
TCGv dest = dest_gpr(ctx, a->rd);
815
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
816
817
gen_set_rm(ctx, a->rm);
818
- gen_helper_fcvt_w_h(dest, cpu_env, cpu_fpr[a->rs1]);
819
+ gen_helper_fcvt_w_h(dest, cpu_env, src1);
820
gen_set_gpr(ctx, a->rd, dest);
821
return true;
822
}
823
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
824
static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
825
{
826
REQUIRE_FPU;
827
- REQUIRE_ZFH(ctx);
828
+ REQUIRE_ZHINX_OR_ZFH(ctx);
829
830
TCGv dest = dest_gpr(ctx, a->rd);
831
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
832
833
gen_set_rm(ctx, a->rm);
834
- gen_helper_fcvt_wu_h(dest, cpu_env, cpu_fpr[a->rs1]);
835
+ gen_helper_fcvt_wu_h(dest, cpu_env, src1);
836
gen_set_gpr(ctx, a->rd, dest);
837
return true;
838
}
839
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
840
static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
841
{
842
REQUIRE_FPU;
843
- REQUIRE_ZFH(ctx);
844
+ REQUIRE_ZHINX_OR_ZFH(ctx);
845
846
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
847
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
848
849
gen_set_rm(ctx, a->rm);
850
- gen_helper_fcvt_h_w(cpu_fpr[a->rd], cpu_env, t0);
851
+ gen_helper_fcvt_h_w(dest, cpu_env, t0);
852
+ gen_set_fpr_hs(ctx, a->rd, dest);
853
854
mark_fs_dirty(ctx);
855
return true;
856
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
857
static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
858
{
859
REQUIRE_FPU;
860
- REQUIRE_ZFH(ctx);
861
+ REQUIRE_ZHINX_OR_ZFH(ctx);
862
863
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
864
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
865
866
gen_set_rm(ctx, a->rm);
867
- gen_helper_fcvt_h_wu(cpu_fpr[a->rd], cpu_env, t0);
868
+ gen_helper_fcvt_h_wu(dest, cpu_env, t0);
869
+ gen_set_fpr_hs(ctx, a->rd, dest);
870
871
mark_fs_dirty(ctx);
872
return true;
873
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a)
874
{
875
REQUIRE_64BIT(ctx);
876
REQUIRE_FPU;
877
- REQUIRE_ZFH(ctx);
878
+ REQUIRE_ZHINX_OR_ZFH(ctx);
879
880
TCGv dest = dest_gpr(ctx, a->rd);
881
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
882
883
gen_set_rm(ctx, a->rm);
884
- gen_helper_fcvt_l_h(dest, cpu_env, cpu_fpr[a->rs1]);
885
+ gen_helper_fcvt_l_h(dest, cpu_env, src1);
886
gen_set_gpr(ctx, a->rd, dest);
887
return true;
888
}
889
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a)
890
{
891
REQUIRE_64BIT(ctx);
892
REQUIRE_FPU;
893
- REQUIRE_ZFH(ctx);
894
+ REQUIRE_ZHINX_OR_ZFH(ctx);
895
896
TCGv dest = dest_gpr(ctx, a->rd);
897
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
898
899
gen_set_rm(ctx, a->rm);
900
- gen_helper_fcvt_lu_h(dest, cpu_env, cpu_fpr[a->rs1]);
901
+ gen_helper_fcvt_lu_h(dest, cpu_env, src1);
902
gen_set_gpr(ctx, a->rd, dest);
903
return true;
904
}
905
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a)
906
{
907
REQUIRE_64BIT(ctx);
908
REQUIRE_FPU;
909
- REQUIRE_ZFH(ctx);
910
+ REQUIRE_ZHINX_OR_ZFH(ctx);
911
912
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
913
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
914
915
gen_set_rm(ctx, a->rm);
916
- gen_helper_fcvt_h_l(cpu_fpr[a->rd], cpu_env, t0);
917
+ gen_helper_fcvt_h_l(dest, cpu_env, t0);
918
+ gen_set_fpr_hs(ctx, a->rd, dest);
919
920
mark_fs_dirty(ctx);
921
return true;
922
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a)
923
{
924
REQUIRE_64BIT(ctx);
925
REQUIRE_FPU;
926
- REQUIRE_ZFH(ctx);
927
+ REQUIRE_ZHINX_OR_ZFH(ctx);
928
929
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
930
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
931
932
gen_set_rm(ctx, a->rm);
933
- gen_helper_fcvt_h_lu(cpu_fpr[a->rd], cpu_env, t0);
934
+ gen_helper_fcvt_h_lu(dest, cpu_env, t0);
935
+ gen_set_fpr_hs(ctx, a->rd, dest);
936
937
mark_fs_dirty(ctx);
938
return true;
939
--
940
2.35.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Mikhail Tyutin <m.tyutin@yadro.com>
2
2
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
3
Fix incorrect register name in RISC-V disassembler for fmv,fabs,fneg instructions
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
5
Signed-off-by: Mikhail Tyutin <m.tyutin@yadro.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-Id: <20220211043920.28981-7-liweiwei@iscas.ac.cn>
7
Message-Id: <3454991f-7f64-24c3-9a36-f5fa2cc389e1@yadro.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
9
---
11
target/riscv/cpu.c | 5 +++++
10
disas/riscv.c | 19 ++++++++++---------
12
1 file changed, 5 insertions(+)
11
1 file changed, 10 insertions(+), 9 deletions(-)
13
12
14
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
13
diff --git a/disas/riscv.c b/disas/riscv.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu.c
15
--- a/disas/riscv.c
17
+++ b/target/riscv/cpu.c
16
+++ b/disas/riscv.c
18
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_properties[] = {
17
@@ -XXX,XX +XXX,XX @@ static const char rv_vreg_name_sym[32][4] = {
19
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
18
#define rv_fmt_rd_offset "O\t0,o"
20
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
19
#define rv_fmt_rd_rs1_rs2 "O\t0,1,2"
21
20
#define rv_fmt_frd_rs1 "O\t3,1"
22
+ DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
21
+#define rv_fmt_frd_frs1 "O\t3,4"
23
+ DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
22
#define rv_fmt_rd_frs1 "O\t0,4"
24
+ DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
23
#define rv_fmt_rd_frs1_frs2 "O\t0,4,5"
25
+ DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
24
#define rv_fmt_frd_frs1_frs2 "O\t3,4,5"
26
+
25
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data opcode_data[] = {
27
/* Vendor-specific custom extensions */
26
{ "snez", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 },
28
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
27
{ "sltz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
29
28
{ "sgtz", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 },
29
- { "fmv.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
30
- { "fabs.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
31
- { "fneg.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
32
- { "fmv.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
33
- { "fabs.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
34
- { "fneg.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
35
- { "fmv.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
36
- { "fabs.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
37
- { "fneg.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 },
38
+ { "fmv.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
39
+ { "fabs.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
40
+ { "fneg.s", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
41
+ { "fmv.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
42
+ { "fabs.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
43
+ { "fneg.d", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
44
+ { "fmv.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
45
+ { "fabs.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
46
+ { "fneg.q", rv_codec_r, rv_fmt_frd_frs1, NULL, 0, 0, 0 },
47
{ "beqz", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 },
48
{ "bnez", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 },
49
{ "blez", rv_codec_sb, rv_fmt_rs2_offset, NULL, 0, 0, 0 },
30
--
50
--
31
2.35.1
51
2.39.2
diff view generated by jsdifflib