1
From: Alistair Francis <alistair.francis@wdc.com>
1
The following changes since commit c5ea91da443b458352c1b629b490ee6631775cb4:
2
2
3
The following changes since commit 2c89b5af5e72ab8c9d544c6e30399528b2238827:
3
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging (2023-09-08 10:06:25 -0400)
4
5
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20220120-1' into staging (2022-01-20 16:13:17 +0000)
6
4
7
are available in the Git repository at:
5
are available in the Git repository at:
8
6
9
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20220121-1
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20230911
10
8
11
for you to fetch changes up to f297245f6a780f496fb171af6fcd21ff3e6783c3:
9
for you to fetch changes up to e7a03409f29e2da59297d55afbaec98c96e43e3a:
12
10
13
target/riscv: Relax UXL field for debugging (2022-01-21 15:52:57 +1000)
11
target/riscv: don't read CSR in riscv_csrrw_do64 (2023-09-11 11:45:55 +1000)
14
12
15
----------------------------------------------------------------
13
----------------------------------------------------------------
16
Third RISC-V PR for QEMU 7.0
14
First RISC-V PR for 8.2
17
15
18
* Fixes for OpenTitan timer
16
* Remove 'host' CPU from TCG
19
* Correction of OpenTitan PLIC stride length
17
* riscv_htif Fixup printing on big endian hosts
20
* RISC-V KVM support
18
* Add zmmul isa string
21
* Device tree code cleanup
19
* Add smepmp isa string
22
* Support for the Zve64f and Zve32f extensions
20
* Fix page_check_range use in fault-only-first
23
* OpenSBI binary loading support for the Spike machine
21
* Use existing lookup tables for MixColumns
24
* Removal of OpenSBI ELFs
22
* Add RISC-V vector cryptographic instruction set support
25
* Support for the UXL field in xstatus
23
* Implement WARL behaviour for mcountinhibit/mcounteren
24
* Add Zihintntl extension ISA string to DTS
25
* Fix zfa fleq.d and fltq.d
26
* Fix upper/lower mtime write calculation
27
* Make rtc variable names consistent
28
* Use abi type for linux-user target_ucontext
29
* Add RISC-V KVM AIA Support
30
* Fix riscv,pmu DT node path in the virt machine
31
* Update CSR bits name for svadu extension
32
* Mark zicond non-experimental
33
* Fix satp_mode_finalize() when satp_mode.supported = 0
34
* Fix non-KVM --enable-debug build
35
* Add new extensions to hwprobe
36
* Use accelerated helper for AES64KS1I
37
* Allocate itrigger timers only once
38
* Respect mseccfg.RLB for pmpaddrX changes
39
* Align the AIA model to v1.0 ratified spec
40
* Don't read the CSR in riscv_csrrw_do64
26
41
27
----------------------------------------------------------------
42
----------------------------------------------------------------
28
Anup Patel (3):
43
Akihiko Odaki (1):
29
hw/riscv: spike: Allow using binary firmware as bios
44
target/riscv: Allocate itrigger timers only once
30
hw/riscv: Remove macros for ELF BIOS image names
31
roms/opensbi: Remove ELF images
32
45
33
Frank Chang (17):
46
Ard Biesheuvel (2):
34
target/riscv: rvv-1.0: Add Zve64f extension into RISC-V
47
target/riscv: Use existing lookup tables for MixColumns
35
target/riscv: rvv-1.0: Add Zve64f support for configuration insns
48
target/riscv: Use accelerated helper for AES64KS1I
36
target/riscv: rvv-1.0: Add Zve64f support for load and store insns
37
target/riscv: rvv-1.0: Add Zve64f support for vmulh variant insns
38
target/riscv: rvv-1.0: Add Zve64f support for vsmul.vv and vsmul.vx insns
39
target/riscv: rvv-1.0: Add Zve64f support for scalar fp insns
40
target/riscv: rvv-1.0: Add Zve64f support for single-width fp reduction insns
41
target/riscv: rvv-1.0: Add Zve64f support for widening type-convert insns
42
target/riscv: rvv-1.0: Add Zve64f support for narrowing type-convert insns
43
target/riscv: rvv-1.0: Allow Zve64f extension to be turned on
44
target/riscv: rvv-1.0: Add Zve32f extension into RISC-V
45
target/riscv: rvv-1.0: Add Zve32f support for configuration insns
46
target/riscv: rvv-1.0: Add Zve32f support for scalar fp insns
47
target/riscv: rvv-1.0: Add Zve32f support for single-width fp reduction insns
48
target/riscv: rvv-1.0: Add Zve32f support for widening type-convert insns
49
target/riscv: rvv-1.0: Add Zve32f support for narrowing type-convert insns
50
target/riscv: rvv-1.0: Allow Zve32f extension to be turned on
51
49
52
LIU Zhiwei (23):
50
Conor Dooley (1):
53
target/riscv: Adjust pmpcfg access with mxl
51
hw/riscv: virt: Fix riscv,pmu DT node path
54
target/riscv: Don't save pc when exception return
55
target/riscv: Sign extend link reg for jal and jalr
56
target/riscv: Sign extend pc for different XLEN
57
target/riscv: Create xl field in env
58
target/riscv: Ignore the pc bits above XLEN
59
target/riscv: Extend pc for runtime pc write
60
target/riscv: Use gdb xml according to max mxlen
61
target/riscv: Relax debug check for pm write
62
target/riscv: Adjust csr write mask with XLEN
63
target/riscv: Create current pm fields in env
64
target/riscv: Alloc tcg global for cur_pm[mask|base]
65
target/riscv: Calculate address according to XLEN
66
target/riscv: Split pm_enabled into mask and base
67
target/riscv: Split out the vill from vtype
68
target/riscv: Adjust vsetvl according to XLEN
69
target/riscv: Remove VILL field in VTYPE
70
target/riscv: Fix check range for first fault only
71
target/riscv: Adjust vector address with mask
72
target/riscv: Adjust scalar reg in vector with XLEN
73
target/riscv: Set default XLEN for hypervisor
74
target/riscv: Enable uxl field write
75
target/riscv: Relax UXL field for debugging
76
52
77
Thomas Huth (1):
53
Daniel Henrique Barboza (6):
78
softmmu/device_tree: Silence compiler warning with --enable-sanitizers
54
target/riscv/cpu.c: do not run 'host' CPU with TCG
55
target/riscv/cpu.c: add zmmul isa string
56
target/riscv/cpu.c: add smepmp isa string
57
target/riscv: fix satp_mode_finalize() when satp_mode.supported = 0
58
hw/riscv/virt.c: fix non-KVM --enable-debug build
59
hw/intc/riscv_aplic.c fix non-KVM --enable-debug build
79
60
80
Wilfred Mallawa (3):
61
Dickon Hood (2):
81
hw: timer: ibex_timer: Fixup reading w/o register
62
target/riscv: Refactor translation of vector-widening instruction
82
riscv: opentitan: fixup plic stride len
63
target/riscv: Add Zvbb ISA extension support
83
hw: timer: ibex_timer: update/add reg address
84
64
85
Yanan Wang (1):
65
Jason Chien (3):
86
softmmu/device_tree: Remove redundant pointer assignment
66
target/riscv: Add Zihintntl extension ISA string to DTS
67
hw/intc: Fix upper/lower mtime write calculation
68
hw/intc: Make rtc variable names consistent
87
69
88
Yifei Jiang (13):
70
Kiran Ostrolenk (4):
89
update-linux-headers: Add asm-riscv/kvm.h
71
target/riscv: Refactor some of the generic vector functionality
90
target/riscv: Add target/riscv/kvm.c to place the public kvm interface
72
target/riscv: Refactor vector-vector translation macro
91
target/riscv: Implement function kvm_arch_init_vcpu
73
target/riscv: Refactor some of the generic vector functionality
92
target/riscv: Implement kvm_arch_get_registers
74
target/riscv: Add Zvknh ISA extension support
93
target/riscv: Implement kvm_arch_put_registers
94
target/riscv: Support start kernel directly by KVM
95
target/riscv: Support setting external interrupt by KVM
96
target/riscv: Handle KVM_EXIT_RISCV_SBI exit
97
target/riscv: Add host cpu type
98
target/riscv: Add kvm_riscv_get/put_regs_timer
99
target/riscv: Implement virtual time adjusting with vm state changing
100
target/riscv: Support virtual time context synchronization
101
target/riscv: enable riscv kvm accel
102
75
103
meson.build | 2 +
76
LIU Zhiwei (3):
104
include/hw/char/riscv_htif.h | 5 +-
77
target/riscv: Fix page_check_range use in fault-only-first
105
include/hw/riscv/boot.h | 3 +-
78
target/riscv: Fix zfa fleq.d and fltq.d
106
include/hw/riscv/spike.h | 1 +
79
linux-user/riscv: Use abi type for target_ucontext
107
include/hw/timer/ibex_timer.h | 1 -
108
linux-headers/asm-riscv/kvm.h | 128 ++++++
109
target/riscv/cpu.h | 58 ++-
110
target/riscv/cpu_bits.h | 3 +
111
target/riscv/helper.h | 4 +-
112
target/riscv/kvm_riscv.h | 25 ++
113
target/riscv/sbi_ecall_interface.h | 72 ++++
114
hw/char/riscv_htif.c | 33 +-
115
hw/intc/sifive_plic.c | 20 +-
116
hw/riscv/boot.c | 16 +-
117
hw/riscv/opentitan.c | 2 +-
118
hw/riscv/spike.c | 45 ++-
119
hw/riscv/virt.c | 83 ++--
120
hw/timer/ibex_timer.c | 25 +-
121
softmmu/device_tree.c | 11 +-
122
target/riscv/cpu.c | 77 +++-
123
target/riscv/cpu_helper.c | 99 ++---
124
target/riscv/csr.c | 90 ++++-
125
target/riscv/gdbstub.c | 71 +++-
126
target/riscv/kvm-stub.c | 30 ++
127
target/riscv/kvm.c | 535 +++++++++++++++++++++++++
128
target/riscv/machine.c | 46 ++-
129
target/riscv/op_helper.c | 7 +-
130
target/riscv/pmp.c | 12 +-
131
target/riscv/translate.c | 94 +++--
132
target/riscv/vector_helper.c | 39 +-
133
target/riscv/insn_trans/trans_privileged.c.inc | 9 +-
134
target/riscv/insn_trans/trans_rva.c.inc | 9 +-
135
target/riscv/insn_trans/trans_rvd.c.inc | 19 +-
136
target/riscv/insn_trans/trans_rvf.c.inc | 19 +-
137
target/riscv/insn_trans/trans_rvi.c.inc | 39 +-
138
target/riscv/insn_trans/trans_rvv.c.inc | 225 +++++++++--
139
.gitlab-ci.d/opensbi.yml | 2 -
140
pc-bios/meson.build | 2 -
141
pc-bios/opensbi-riscv32-generic-fw_dynamic.elf | Bin 838904 -> 0 bytes
142
pc-bios/opensbi-riscv64-generic-fw_dynamic.elf | Bin 934696 -> 0 bytes
143
roms/Makefile | 2 -
144
target/riscv/meson.build | 1 +
145
42 files changed, 1608 insertions(+), 356 deletions(-)
146
create mode 100644 linux-headers/asm-riscv/kvm.h
147
create mode 100644 target/riscv/kvm_riscv.h
148
create mode 100644 target/riscv/sbi_ecall_interface.h
149
create mode 100644 target/riscv/kvm-stub.c
150
create mode 100644 target/riscv/kvm.c
151
delete mode 100644 pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
152
delete mode 100644 pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
153
80
81
Lawrence Hunter (2):
82
target/riscv: Add Zvbc ISA extension support
83
target/riscv: Add Zvksh ISA extension support
84
85
Leon Schuermann (1):
86
target/riscv/pmp.c: respect mseccfg.RLB for pmpaddrX changes
87
88
Max Chou (3):
89
crypto: Create sm4_subword
90
crypto: Add SM4 constant parameter CK
91
target/riscv: Add Zvksed ISA extension support
92
93
Nazar Kazakov (4):
94
target/riscv: Remove redundant "cpu_vl == 0" checks
95
target/riscv: Move vector translation checks
96
target/riscv: Add Zvkned ISA extension support
97
target/riscv: Add Zvkg ISA extension support
98
99
Nikita Shubin (1):
100
target/riscv: don't read CSR in riscv_csrrw_do64
101
102
Rob Bradford (1):
103
target/riscv: Implement WARL behaviour for mcountinhibit/mcounteren
104
105
Robbin Ehn (1):
106
linux-user/riscv: Add new extensions to hwprobe
107
108
Thomas Huth (2):
109
hw/char/riscv_htif: Fix printing of console characters on big endian hosts
110
hw/char/riscv_htif: Fix the console syscall on big endian hosts
111
112
Tommy Wu (1):
113
target/riscv: Align the AIA model to v1.0 ratified spec
114
115
Vineet Gupta (1):
116
riscv: zicond: make non-experimental
117
118
Weiwei Li (1):
119
target/riscv: Update CSR bits name for svadu extension
120
121
Yong-Xuan Wang (5):
122
target/riscv: support the AIA device emulation with KVM enabled
123
target/riscv: check the in-kernel irqchip support
124
target/riscv: Create an KVM AIA irqchip
125
target/riscv: update APLIC and IMSIC to support KVM AIA
126
target/riscv: select KVM AIA in riscv virt machine
127
128
include/crypto/aes.h | 7 +
129
include/crypto/sm4.h | 9 +
130
target/riscv/cpu_bits.h | 8 +-
131
target/riscv/cpu_cfg.h | 9 +
132
target/riscv/debug.h | 3 +-
133
target/riscv/helper.h | 98 +++
134
target/riscv/kvm_riscv.h | 5 +
135
target/riscv/vector_internals.h | 228 +++++++
136
target/riscv/insn32.decode | 58 ++
137
crypto/aes.c | 4 +-
138
crypto/sm4.c | 10 +
139
hw/char/riscv_htif.c | 12 +-
140
hw/intc/riscv_aclint.c | 11 +-
141
hw/intc/riscv_aplic.c | 52 +-
142
hw/intc/riscv_imsic.c | 25 +-
143
hw/riscv/virt.c | 374 ++++++------
144
linux-user/riscv/signal.c | 4 +-
145
linux-user/syscall.c | 14 +-
146
target/arm/tcg/crypto_helper.c | 10 +-
147
target/riscv/cpu.c | 83 ++-
148
target/riscv/cpu_helper.c | 6 +-
149
target/riscv/crypto_helper.c | 51 +-
150
target/riscv/csr.c | 54 +-
151
target/riscv/debug.c | 15 +-
152
target/riscv/kvm.c | 201 ++++++-
153
target/riscv/pmp.c | 4 +
154
target/riscv/translate.c | 1 +
155
target/riscv/vcrypto_helper.c | 970 ++++++++++++++++++++++++++++++
156
target/riscv/vector_helper.c | 245 +-------
157
target/riscv/vector_internals.c | 81 +++
158
target/riscv/insn_trans/trans_rvv.c.inc | 171 +++---
159
target/riscv/insn_trans/trans_rvvk.c.inc | 606 +++++++++++++++++++
160
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 +-
161
target/riscv/meson.build | 4 +-
162
34 files changed, 2785 insertions(+), 652 deletions(-)
163
create mode 100644 target/riscv/vector_internals.h
164
create mode 100644 target/riscv/vcrypto_helper.c
165
create mode 100644 target/riscv/vector_internals.c
166
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
diff view generated by jsdifflib
Deleted patch
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
2
1
3
This change fixes a bug where a write only register is read.
4
As per https://docs.opentitan.org/hw/ip/rv_timer/doc/#register-table
5
the 'INTR_TEST0' register is write only.
6
7
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 20220110051606.4031241-1-alistair.francis@opensource.wdc.com
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
include/hw/timer/ibex_timer.h | 1 -
15
hw/timer/ibex_timer.c | 14 +++++---------
16
2 files changed, 5 insertions(+), 10 deletions(-)
17
18
diff --git a/include/hw/timer/ibex_timer.h b/include/hw/timer/ibex_timer.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/timer/ibex_timer.h
21
+++ b/include/hw/timer/ibex_timer.h
22
@@ -XXX,XX +XXX,XX @@ struct IbexTimerState {
23
uint32_t timer_compare_upper0;
24
uint32_t timer_intr_enable;
25
uint32_t timer_intr_state;
26
- uint32_t timer_intr_test;
27
28
uint32_t timebase_freq;
29
30
diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/timer/ibex_timer.c
33
+++ b/hw/timer/ibex_timer.c
34
@@ -XXX,XX +XXX,XX @@ static void ibex_timer_reset(DeviceState *dev)
35
s->timer_compare_upper0 = 0xFFFFFFFF;
36
s->timer_intr_enable = 0x00000000;
37
s->timer_intr_state = 0x00000000;
38
- s->timer_intr_test = 0x00000000;
39
40
ibex_timer_update_irqs(s);
41
}
42
@@ -XXX,XX +XXX,XX @@ static uint64_t ibex_timer_read(void *opaque, hwaddr addr,
43
retvalue = s->timer_intr_state;
44
break;
45
case R_INTR_TEST:
46
- retvalue = s->timer_intr_test;
47
+ qemu_log_mask(LOG_GUEST_ERROR,
48
+ "Attempted to read INTR_TEST, a write only register");
49
break;
50
default:
51
qemu_log_mask(LOG_GUEST_ERROR,
52
@@ -XXX,XX +XXX,XX @@ static void ibex_timer_write(void *opaque, hwaddr addr,
53
s->timer_intr_state &= ~val;
54
break;
55
case R_INTR_TEST:
56
- s->timer_intr_test = val;
57
- if (s->timer_intr_enable &
58
- s->timer_intr_test &
59
- R_INTR_ENABLE_IE_0_MASK) {
60
+ if (s->timer_intr_enable & val & R_INTR_ENABLE_IE_0_MASK) {
61
s->timer_intr_state |= R_INTR_STATE_IS_0_MASK;
62
qemu_set_irq(s->irq, true);
63
}
64
@@ -XXX,XX +XXX,XX @@ static int ibex_timer_post_load(void *opaque, int version_id)
65
66
static const VMStateDescription vmstate_ibex_timer = {
67
.name = TYPE_IBEX_TIMER,
68
- .version_id = 1,
69
- .minimum_version_id = 1,
70
+ .version_id = 2,
71
+ .minimum_version_id = 2,
72
.post_load = ibex_timer_post_load,
73
.fields = (VMStateField[]) {
74
VMSTATE_UINT32(timer_ctrl, IbexTimerState),
75
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_ibex_timer = {
76
VMSTATE_UINT32(timer_compare_upper0, IbexTimerState),
77
VMSTATE_UINT32(timer_intr_enable, IbexTimerState),
78
VMSTATE_UINT32(timer_intr_state, IbexTimerState),
79
- VMSTATE_UINT32(timer_intr_test, IbexTimerState),
80
VMSTATE_END_OF_LIST()
81
}
82
};
83
--
84
2.31.1
85
86
diff view generated by jsdifflib
Deleted patch
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
2
1
3
The following change was made to rectify incorrectly set stride length
4
on the PLIC [1]. Where it should be 32bit and not 24bit (0x18). This was
5
discovered whilst attempting to fix a bug where a timer_interrupt was
6
not serviced on TockOS-OpenTitan.
7
8
[1] https://docs.opentitan.org/hw/top_earlgrey/ip_autogen/rv_plic/doc/
9
10
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Tested-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
14
Message-id: 20220111071025.4169189-1-alistair.francis@opensource.wdc.com
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
hw/riscv/opentitan.c | 2 +-
18
1 file changed, 1 insertion(+), 1 deletion(-)
19
20
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/riscv/opentitan.c
23
+++ b/hw/riscv/opentitan.c
24
@@ -XXX,XX +XXX,XX @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
25
qdev_prop_set_uint32(DEVICE(&s->plic), "priority-base", 0x00);
26
qdev_prop_set_uint32(DEVICE(&s->plic), "pending-base", 0x1000);
27
qdev_prop_set_uint32(DEVICE(&s->plic), "enable-base", 0x2000);
28
- qdev_prop_set_uint32(DEVICE(&s->plic), "enable-stride", 0x18);
29
+ qdev_prop_set_uint32(DEVICE(&s->plic), "enable-stride", 32);
30
qdev_prop_set_uint32(DEVICE(&s->plic), "context-base", 0x200000);
31
qdev_prop_set_uint32(DEVICE(&s->plic), "context-stride", 8);
32
qdev_prop_set_uint32(DEVICE(&s->plic), "aperture-size", memmap[IBEX_DEV_PLIC].size);
33
--
34
2.31.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
2
1
3
The following changes:
4
1. Fixes the incorrectly set CTRL register address. As
5
per [1] https://docs.opentitan.org/hw/ip/rv_timer/doc/#register-table
6
7
The CTRL register is @ 0x04.
8
9
This was found when attempting to fixup a bug where a timer_interrupt
10
was not serviced on TockOS-OpenTitan.
11
12
2. Adds ALERT_TEST register as documented on [1], adding repective
13
switch cases to error handle and later implement functionality.
14
15
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Tested-by: Alistair Francis <alistair.francis@wdc.com>
18
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
19
Message-id: 20220111071025.4169189-2-alistair.francis@opensource.wdc.com
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
22
hw/timer/ibex_timer.c | 11 ++++++++++-
23
1 file changed, 10 insertions(+), 1 deletion(-)
24
25
diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/timer/ibex_timer.c
28
+++ b/hw/timer/ibex_timer.c
29
@@ -XXX,XX +XXX,XX @@
30
#include "target/riscv/cpu.h"
31
#include "migration/vmstate.h"
32
33
-REG32(CTRL, 0x00)
34
+REG32(ALERT_TEST, 0x00)
35
+ FIELD(ALERT_TEST, FATAL_FAULT, 0, 1)
36
+REG32(CTRL, 0x04)
37
FIELD(CTRL, ACTIVE, 0, 1)
38
REG32(CFG0, 0x100)
39
FIELD(CFG0, PRESCALE, 0, 12)
40
@@ -XXX,XX +XXX,XX @@ static uint64_t ibex_timer_read(void *opaque, hwaddr addr,
41
uint64_t retvalue = 0;
42
43
switch (addr >> 2) {
44
+ case R_ALERT_TEST:
45
+ qemu_log_mask(LOG_GUEST_ERROR,
46
+ "Attempted to read ALERT_TEST, a write only register");
47
+ break;
48
case R_CTRL:
49
retvalue = s->timer_ctrl;
50
break;
51
@@ -XXX,XX +XXX,XX @@ static void ibex_timer_write(void *opaque, hwaddr addr,
52
uint32_t val = val64;
53
54
switch (addr >> 2) {
55
+ case R_ALERT_TEST:
56
+ qemu_log_mask(LOG_UNIMP, "Alert triggering not supported");
57
+ break;
58
case R_CTRL:
59
s->timer_ctrl = val;
60
break;
61
--
62
2.31.1
63
64
diff view generated by jsdifflib
Deleted patch
1
From: Yifei Jiang <jiangyifei@huawei.com>
2
1
3
Add asm-riscv/kvm.h for RISC-V KVM.
4
5
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
9
Message-id: 20220112081329.1835-2-jiangyifei@huawei.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
linux-headers/asm-riscv/kvm.h | 128 ++++++++++++++++++++++++++++++++++
13
1 file changed, 128 insertions(+)
14
create mode 100644 linux-headers/asm-riscv/kvm.h
15
16
diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h
17
new file mode 100644
18
index XXXXXXX..XXXXXXX
19
--- /dev/null
20
+++ b/linux-headers/asm-riscv/kvm.h
21
@@ -XXX,XX +XXX,XX @@
22
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
23
+/*
24
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
25
+ *
26
+ * Authors:
27
+ * Anup Patel <anup.patel@wdc.com>
28
+ */
29
+
30
+#ifndef __LINUX_KVM_RISCV_H
31
+#define __LINUX_KVM_RISCV_H
32
+
33
+#ifndef __ASSEMBLY__
34
+
35
+#include <linux/types.h>
36
+#include <asm/ptrace.h>
37
+
38
+#define __KVM_HAVE_READONLY_MEM
39
+
40
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
41
+
42
+#define KVM_INTERRUPT_SET    -1U
43
+#define KVM_INTERRUPT_UNSET    -2U
44
+
45
+/* for KVM_GET_REGS and KVM_SET_REGS */
46
+struct kvm_regs {
47
+};
48
+
49
+/* for KVM_GET_FPU and KVM_SET_FPU */
50
+struct kvm_fpu {
51
+};
52
+
53
+/* KVM Debug exit structure */
54
+struct kvm_debug_exit_arch {
55
+};
56
+
57
+/* for KVM_SET_GUEST_DEBUG */
58
+struct kvm_guest_debug_arch {
59
+};
60
+
61
+/* definition of registers in kvm_run */
62
+struct kvm_sync_regs {
63
+};
64
+
65
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
66
+struct kvm_sregs {
67
+};
68
+
69
+/* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
70
+struct kvm_riscv_config {
71
+    unsigned long isa;
72
+};
73
+
74
+/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
75
+struct kvm_riscv_core {
76
+    struct user_regs_struct regs;
77
+    unsigned long mode;
78
+};
79
+
80
+/* Possible privilege modes for kvm_riscv_core */
81
+#define KVM_RISCV_MODE_S    1
82
+#define KVM_RISCV_MODE_U    0
83
+
84
+/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
85
+struct kvm_riscv_csr {
86
+    unsigned long sstatus;
87
+    unsigned long sie;
88
+    unsigned long stvec;
89
+    unsigned long sscratch;
90
+    unsigned long sepc;
91
+    unsigned long scause;
92
+    unsigned long stval;
93
+    unsigned long sip;
94
+    unsigned long satp;
95
+    unsigned long scounteren;
96
+};
97
+
98
+/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
99
+struct kvm_riscv_timer {
100
+    __u64 frequency;
101
+    __u64 time;
102
+    __u64 compare;
103
+    __u64 state;
104
+};
105
+
106
+/* Possible states for kvm_riscv_timer */
107
+#define KVM_RISCV_TIMER_STATE_OFF    0
108
+#define KVM_RISCV_TIMER_STATE_ON    1
109
+
110
+#define KVM_REG_SIZE(id)        \
111
+    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
112
+
113
+/* If you need to interpret the index values, here is the key: */
114
+#define KVM_REG_RISCV_TYPE_MASK        0x00000000FF000000
115
+#define KVM_REG_RISCV_TYPE_SHIFT    24
116
+
117
+/* Config registers are mapped as type 1 */
118
+#define KVM_REG_RISCV_CONFIG        (0x01 << KVM_REG_RISCV_TYPE_SHIFT)
119
+#define KVM_REG_RISCV_CONFIG_REG(name)    \
120
+    (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long))
121
+
122
+/* Core registers are mapped as type 2 */
123
+#define KVM_REG_RISCV_CORE        (0x02 << KVM_REG_RISCV_TYPE_SHIFT)
124
+#define KVM_REG_RISCV_CORE_REG(name)    \
125
+        (offsetof(struct kvm_riscv_core, name) / sizeof(unsigned long))
126
+
127
+/* Control and status registers are mapped as type 3 */
128
+#define KVM_REG_RISCV_CSR        (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
129
+#define KVM_REG_RISCV_CSR_REG(name)    \
130
+        (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
131
+
132
+/* Timer registers are mapped as type 4 */
133
+#define KVM_REG_RISCV_TIMER        (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
134
+#define KVM_REG_RISCV_TIMER_REG(name)    \
135
+        (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64))
136
+
137
+/* F extension registers are mapped as type 5 */
138
+#define KVM_REG_RISCV_FP_F        (0x05 << KVM_REG_RISCV_TYPE_SHIFT)
139
+#define KVM_REG_RISCV_FP_F_REG(name)    \
140
+        (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32))
141
+
142
+/* D extension registers are mapped as type 6 */
143
+#define KVM_REG_RISCV_FP_D        (0x06 << KVM_REG_RISCV_TYPE_SHIFT)
144
+#define KVM_REG_RISCV_FP_D_REG(name)    \
145
+        (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64))
146
+
147
+#endif
148
+
149
+#endif /* __LINUX_KVM_RISCV_H */
150
--
151
2.31.1
152
153
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
The 'host' CPU is available in a CONFIG_KVM build and it's currently
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
available for all accels, but is a KVM only CPU. This means that in a
5
RISC-V KVM capable host we can do things like this:
6
7
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
8
qemu-system-riscv64: H extension requires priv spec 1.12.0
9
10
This CPU does not have a priv spec because we don't filter its extensions
11
via priv spec. We shouldn't be reaching riscv_cpu_realize_tcg() at all
12
with the 'host' CPU.
13
14
We don't have a way to filter the 'host' CPU out of the available CPU
15
options (-cpu help) if the build includes both KVM and TCG. What we can
16
do is to error out during riscv_cpu_realize_tcg() if the user chooses
17
the 'host' CPU with accel=tcg:
18
19
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
20
qemu-system-riscv64: 'host' CPU is not compatible with TCG acceleration
21
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20220120122050.41546-9-zhiwei_liu@c-sky.com
24
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
25
Message-Id: <20230721133411.474105-1-dbarboza@ventanamicro.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
26
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
27
---
9
target/riscv/cpu.c | 8 ++---
28
target/riscv/cpu.c | 5 +++++
10
target/riscv/gdbstub.c | 71 +++++++++++++++++++++++++++++++-----------
29
1 file changed, 5 insertions(+)
11
2 files changed, 55 insertions(+), 24 deletions(-)
12
30
13
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
31
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
14
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu.c
33
--- a/target/riscv/cpu.c
16
+++ b/target/riscv/cpu.c
34
+++ b/target/riscv/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
35
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
18
RISCVCPU *cpu = RISCV_CPU(dev);
19
CPURISCVState *env = &cpu->env;
36
CPURISCVState *env = &cpu->env;
20
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
21
+ CPUClass *cc = CPU_CLASS(mcc);
22
int priv_version = 0;
23
Error *local_err = NULL;
37
Error *local_err = NULL;
24
38
25
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
39
+ if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) {
26
switch (env->misa_mxl_max) {
40
+ error_setg(errp, "'host' CPU is not compatible with TCG acceleration");
27
#ifdef TARGET_RISCV64
41
+ return;
28
case MXL_RV64:
29
+ cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
30
break;
31
case MXL_RV128:
32
break;
33
#endif
34
case MXL_RV32:
35
+ cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
36
break;
37
default:
38
g_assert_not_reached();
39
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
40
cc->gdb_read_register = riscv_cpu_gdb_read_register;
41
cc->gdb_write_register = riscv_cpu_gdb_write_register;
42
cc->gdb_num_core_regs = 33;
43
-#if defined(TARGET_RISCV32)
44
- cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
45
-#elif defined(TARGET_RISCV64)
46
- cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
47
-#endif
48
cc->gdb_stop_before_watchpoint = true;
49
cc->disas_set_info = riscv_cpu_disas_set_info;
50
#ifndef CONFIG_USER_ONLY
51
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/riscv/gdbstub.c
54
+++ b/target/riscv/gdbstub.c
55
@@ -XXX,XX +XXX,XX @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
56
{
57
RISCVCPU *cpu = RISCV_CPU(cs);
58
CPURISCVState *env = &cpu->env;
59
+ target_ulong tmp;
60
61
if (n < 32) {
62
- return gdb_get_regl(mem_buf, env->gpr[n]);
63
+ tmp = env->gpr[n];
64
} else if (n == 32) {
65
- return gdb_get_regl(mem_buf, env->pc);
66
+ tmp = env->pc;
67
+ } else {
68
+ return 0;
69
+ }
42
+ }
70
+
43
+
71
+ switch (env->misa_mxl_max) {
44
riscv_cpu_validate_misa_mxl(cpu, &local_err);
72
+ case MXL_RV32:
45
if (local_err != NULL) {
73
+ return gdb_get_reg32(mem_buf, tmp);
46
error_propagate(errp, local_err);
74
+ case MXL_RV64:
75
+ return gdb_get_reg64(mem_buf, tmp);
76
+ default:
77
+ g_assert_not_reached();
78
}
79
return 0;
80
}
81
@@ -XXX,XX +XXX,XX @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
82
{
83
RISCVCPU *cpu = RISCV_CPU(cs);
84
CPURISCVState *env = &cpu->env;
85
-
86
- if (n == 0) {
87
- /* discard writes to x0 */
88
- return sizeof(target_ulong);
89
- } else if (n < 32) {
90
- env->gpr[n] = ldtul_p(mem_buf);
91
- return sizeof(target_ulong);
92
+ int length = 0;
93
+ target_ulong tmp;
94
+
95
+ switch (env->misa_mxl_max) {
96
+ case MXL_RV32:
97
+ tmp = (int32_t)ldl_p(mem_buf);
98
+ length = 4;
99
+ break;
100
+ case MXL_RV64:
101
+ if (env->xl < MXL_RV64) {
102
+ tmp = (int32_t)ldq_p(mem_buf);
103
+ } else {
104
+ tmp = ldq_p(mem_buf);
105
+ }
106
+ length = 8;
107
+ break;
108
+ default:
109
+ g_assert_not_reached();
110
+ }
111
+ if (n > 0 && n < 32) {
112
+ env->gpr[n] = tmp;
113
} else if (n == 32) {
114
- env->pc = ldtul_p(mem_buf);
115
- return sizeof(target_ulong);
116
+ env->pc = tmp;
117
}
118
- return 0;
119
+
120
+ return length;
121
}
122
123
static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n)
124
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
125
cs->gdb_num_regs),
126
"riscv-vector.xml", 0);
127
}
128
-#if defined(TARGET_RISCV32)
129
- gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
130
- 1, "riscv-32bit-virtual.xml", 0);
131
-#elif defined(TARGET_RISCV64)
132
- gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual,
133
- 1, "riscv-64bit-virtual.xml", 0);
134
-#endif
135
+ switch (env->misa_mxl_max) {
136
+ case MXL_RV32:
137
+ gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
138
+ riscv_gdb_set_virtual,
139
+ 1, "riscv-32bit-virtual.xml", 0);
140
+ break;
141
+ case MXL_RV64:
142
+ gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
143
+ riscv_gdb_set_virtual,
144
+ 1, "riscv-64bit-virtual.xml", 0);
145
+ break;
146
+ default:
147
+ g_assert_not_reached();
148
+ }
149
150
gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr,
151
riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs),
152
--
47
--
153
2.31.1
48
2.41.0
154
49
155
50
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
Currently, we have to use OpenSBI firmware ELF as bios for the spike
3
The character that should be printed is stored in the 64 bit "payload"
4
machine because the HTIF console requires ELF for parsing "fromhost"
4
variable. The code currently tries to print it by taking the address
5
and "tohost" symbols.
5
of the variable and passing this pointer to qemu_chr_fe_write(). However,
6
this only works on little endian hosts where the least significant bits
7
are stored on the lowest address. To do this in a portable way, we have
8
to store the value in an uint8_t variable instead.
6
9
7
The latest OpenSBI can now optionally pick-up HTIF register address
10
Fixes: 5033606780 ("RISC-V HTIF Console")
8
from HTIF DT node so using this feature spike machine can now use
11
Signed-off-by: Thomas Huth <thuth@redhat.com>
9
OpenSBI firmware BIN as bios.
10
11
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
13
Reviewed-by: Bin Meng <bmeng@tinylab.org>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Message-Id: <20230721094720.902454-2-thuth@redhat.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
18
---
16
include/hw/char/riscv_htif.h | 5 ++++-
19
hw/char/riscv_htif.c | 3 ++-
17
include/hw/riscv/spike.h | 1 +
20
1 file changed, 2 insertions(+), 1 deletion(-)
18
hw/char/riscv_htif.c | 33 +++++++++++++++++++----------
19
hw/riscv/spike.c | 41 ++++++++++++++++++++++--------------
20
4 files changed, 52 insertions(+), 28 deletions(-)
21
21
22
diff --git a/include/hw/char/riscv_htif.h b/include/hw/char/riscv_htif.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/char/riscv_htif.h
25
+++ b/include/hw/char/riscv_htif.h
26
@@ -XXX,XX +XXX,XX @@ extern const MemoryRegionOps htif_io_ops;
27
void htif_symbol_callback(const char *st_name, int st_info, uint64_t st_value,
28
uint64_t st_size);
29
30
+/* Check if HTIF uses ELF symbols */
31
+bool htif_uses_elf_symbols(void);
32
+
33
/* legacy pre qom */
34
HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem,
35
- CPURISCVState *env, Chardev *chr);
36
+ CPURISCVState *env, Chardev *chr, uint64_t nonelf_base);
37
38
#endif
39
diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/include/hw/riscv/spike.h
42
+++ b/include/hw/riscv/spike.h
43
@@ -XXX,XX +XXX,XX @@ struct SpikeState {
44
45
enum {
46
SPIKE_MROM,
47
+ SPIKE_HTIF,
48
SPIKE_CLINT,
49
SPIKE_DRAM
50
};
51
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
22
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
52
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
53
--- a/hw/char/riscv_htif.c
24
--- a/hw/char/riscv_htif.c
54
+++ b/hw/char/riscv_htif.c
25
+++ b/hw/char/riscv_htif.c
55
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps htif_mm_ops = {
26
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
56
.write = htif_mm_write,
27
s->tohost = 0; /* clear to indicate we read */
57
};
28
return;
58
29
} else if (cmd == HTIF_CONSOLE_CMD_PUTC) {
59
+bool htif_uses_elf_symbols(void)
30
- qemu_chr_fe_write(&s->chr, (uint8_t *)&payload, 1);
60
+{
31
+ uint8_t ch = (uint8_t)payload;
61
+ return (address_symbol_set == 3) ? true : false;
32
+ qemu_chr_fe_write(&s->chr, &ch, 1);
62
+}
33
resp = 0x100 | (uint8_t)payload;
63
+
34
} else {
64
HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem,
35
qemu_log("HTIF device %d: unknown command\n", device);
65
- CPURISCVState *env, Chardev *chr)
66
+ CPURISCVState *env, Chardev *chr, uint64_t nonelf_base)
67
{
68
- uint64_t base = MIN(tohost_addr, fromhost_addr);
69
- uint64_t size = MAX(tohost_addr + 8, fromhost_addr + 8) - base;
70
- uint64_t tohost_offset = tohost_addr - base;
71
- uint64_t fromhost_offset = fromhost_addr - base;
72
+ uint64_t base, size, tohost_offset, fromhost_offset;
73
+
74
+ if (!htif_uses_elf_symbols()) {
75
+ fromhost_addr = nonelf_base;
76
+ tohost_addr = nonelf_base + 8;
77
+ }
78
+
79
+ base = MIN(tohost_addr, fromhost_addr);
80
+ size = MAX(tohost_addr + 8, fromhost_addr + 8) - base;
81
+ tohost_offset = tohost_addr - base;
82
+ fromhost_offset = fromhost_addr - base;
83
84
HTIFState *s = g_malloc0(sizeof(HTIFState));
85
s->address_space = address_space;
86
@@ -XXX,XX +XXX,XX @@ HTIFState *htif_mm_init(MemoryRegion *address_space, MemoryRegion *main_mem,
87
qemu_chr_fe_init(&s->chr, chr, &error_abort);
88
qemu_chr_fe_set_handlers(&s->chr, htif_can_recv, htif_recv, htif_event,
89
htif_be_change, s, NULL, true);
90
- if (address_symbol_set == 3) {
91
- memory_region_init_io(&s->mmio, NULL, &htif_mm_ops, s,
92
- TYPE_HTIF_UART, size);
93
- memory_region_add_subregion_overlap(address_space, base,
94
- &s->mmio, 1);
95
- }
96
+
97
+ memory_region_init_io(&s->mmio, NULL, &htif_mm_ops, s,
98
+ TYPE_HTIF_UART, size);
99
+ memory_region_add_subregion_overlap(address_space, base,
100
+ &s->mmio, 1);
101
102
return s;
103
}
104
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/hw/riscv/spike.c
107
+++ b/hw/riscv/spike.c
108
@@ -XXX,XX +XXX,XX @@
109
110
static const MemMapEntry spike_memmap[] = {
111
[SPIKE_MROM] = { 0x1000, 0xf000 },
112
+ [SPIKE_HTIF] = { 0x1000000, 0x1000 },
113
[SPIKE_CLINT] = { 0x2000000, 0x10000 },
114
[SPIKE_DRAM] = { 0x80000000, 0x0 },
115
};
116
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap,
117
118
qemu_fdt_add_subnode(fdt, "/htif");
119
qemu_fdt_setprop_string(fdt, "/htif", "compatible", "ucb,htif0");
120
+ if (!htif_uses_elf_symbols()) {
121
+ qemu_fdt_setprop_cells(fdt, "/htif", "reg",
122
+ 0x0, memmap[SPIKE_HTIF].base, 0x0, memmap[SPIKE_HTIF].size);
123
+ }
124
125
qemu_fdt_add_subnode(fdt, "/soc");
126
qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0);
127
@@ -XXX,XX +XXX,XX @@ static void create_fdt(SpikeState *s, const MemMapEntry *memmap,
128
if (cmdline) {
129
qemu_fdt_add_subnode(fdt, "/chosen");
130
qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
131
+ qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", "/htif");
132
}
133
}
134
135
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
136
memory_region_add_subregion(system_memory, memmap[SPIKE_DRAM].base,
137
machine->ram);
138
139
- /* create device tree */
140
- create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline,
141
- riscv_is_32bit(&s->soc[0]));
142
-
143
/* boot rom */
144
memory_region_init_rom(mask_rom, NULL, "riscv.spike.mrom",
145
memmap[SPIKE_MROM].size, &error_fatal);
146
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
147
htif_symbol_callback);
148
}
149
150
+ /* Load kernel */
151
if (machine->kernel_filename) {
152
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
153
firmware_end_addr);
154
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
155
kernel_entry = riscv_load_kernel(machine->kernel_filename,
156
kernel_start_addr,
157
htif_symbol_callback);
158
-
159
- if (machine->initrd_filename) {
160
- hwaddr start;
161
- hwaddr end = riscv_load_initrd(machine->initrd_filename,
162
- machine->ram_size, kernel_entry,
163
- &start);
164
- qemu_fdt_setprop_cell(s->fdt, "/chosen",
165
- "linux,initrd-start", start);
166
- qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end",
167
- end);
168
- }
169
} else {
170
/*
171
* If dynamic firmware is used, it doesn't know where is the next mode
172
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
173
kernel_entry = 0;
174
}
175
176
+ /* Create device tree */
177
+ create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline,
178
+ riscv_is_32bit(&s->soc[0]));
179
+
180
+ /* Load initrd */
181
+ if (machine->kernel_filename && machine->initrd_filename) {
182
+ hwaddr start;
183
+ hwaddr end = riscv_load_initrd(machine->initrd_filename,
184
+ machine->ram_size, kernel_entry,
185
+ &start);
186
+ qemu_fdt_setprop_cell(s->fdt, "/chosen",
187
+ "linux,initrd-start", start);
188
+ qemu_fdt_setprop_cell(s->fdt, "/chosen", "linux,initrd-end",
189
+ end);
190
+ }
191
+
192
/* Compute the fdt load address in dram */
193
fdt_load_addr = riscv_load_fdt(memmap[SPIKE_DRAM].base,
194
machine->ram_size, s->fdt);
195
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
196
197
/* initialize HTIF using symbols found in load_kernel */
198
htif_mm_init(system_memory, mask_rom,
199
- &s->soc[0].harts[0].env, serial_hd(0));
200
+ &s->soc[0].harts[0].env, serial_hd(0),
201
+ memmap[SPIKE_HTIF].base);
202
}
203
204
static void spike_machine_instance_init(Object *obj)
205
--
36
--
206
2.31.1
37
2.41.0
207
38
208
39
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
If I configure my build with --enable-sanitizers, my GCC (v8.5.0)
3
Values that have been read via cpu_physical_memory_read() from the
4
complains:
4
guest's memory have to be swapped in case the host endianess differs
5
from the guest.
5
6
6
.../softmmu/device_tree.c: In function ‘qemu_fdt_add_path’:
7
Fixes: a6e13e31d5 ("riscv_htif: Support console output via proxy syscall")
7
.../softmmu/device_tree.c:560:18: error: ‘retval’ may be used uninitialized
8
in this function [-Werror=maybe-uninitialized]
9
int namelen, retval;
10
^~~~~~
11
12
It's a false warning since the while loop is always executed at least
13
once (p has to be non-NULL, otherwise the derefence in the if-statement
14
earlier will crash). Thus let's switch to a do-while loop here instead
15
to make the compiler happy in all cases.
16
17
Signed-off-by: Thomas Huth <thuth@redhat.com>
8
Signed-off-by: Thomas Huth <thuth@redhat.com>
18
Reviewed-by: Andrew Jones <drjones@redhat.com>
19
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
20
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
21
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Reviewed-by: Yanan Wang <wangyanan55@huawei.com>
10
Reviewed-by: Bin Meng <bmeng@tinylab.org>
23
Message-id: 20220107133844.145039-1-thuth@redhat.com
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-Id: <20230721094720.902454-3-thuth@redhat.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
---
14
---
26
softmmu/device_tree.c | 4 ++--
15
hw/char/riscv_htif.c | 9 +++++----
27
1 file changed, 2 insertions(+), 2 deletions(-)
16
1 file changed, 5 insertions(+), 4 deletions(-)
28
17
29
diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c
18
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
30
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
31
--- a/softmmu/device_tree.c
20
--- a/hw/char/riscv_htif.c
32
+++ b/softmmu/device_tree.c
21
+++ b/hw/char/riscv_htif.c
33
@@ -XXX,XX +XXX,XX @@ int qemu_fdt_add_path(void *fdt, const char *path)
22
@@ -XXX,XX +XXX,XX @@
34
return -1;
23
#include "qemu/timer.h"
35
}
24
#include "qemu/error-report.h"
36
25
#include "exec/address-spaces.h"
37
- while (p) {
26
+#include "exec/tswap.h"
38
+ do {
27
#include "sysemu/dma.h"
39
name = p + 1;
28
40
p = strchr(name, '/');
29
#define RISCV_DEBUG_HTIF 0
41
namelen = p != NULL ? p - name : strlen(name);
30
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
42
@@ -XXX,XX +XXX,XX @@ int qemu_fdt_add_path(void *fdt, const char *path)
31
} else {
43
}
32
uint64_t syscall[8];
44
33
cpu_physical_memory_read(payload, syscall, sizeof(syscall));
45
parent = retval;
34
- if (syscall[0] == PK_SYS_WRITE &&
46
- }
35
- syscall[1] == HTIF_DEV_CONSOLE &&
47
+ } while (p);
36
- syscall[3] == HTIF_CONSOLE_CMD_PUTC) {
48
37
+ if (tswap64(syscall[0]) == PK_SYS_WRITE &&
49
return retval;
38
+ tswap64(syscall[1]) == HTIF_DEV_CONSOLE &&
50
}
39
+ tswap64(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) {
40
uint8_t ch;
41
- cpu_physical_memory_read(syscall[2], &ch, 1);
42
+ cpu_physical_memory_read(tswap64(syscall[2]), &ch, 1);
43
qemu_chr_fe_write(&s->chr, &ch, 1);
44
resp = 0x100 | (uint8_t)payload;
45
} else {
51
--
46
--
52
2.31.1
47
2.41.0
53
54
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Signed-off-by: Frank Chang <frank.chang@sifive.com>
3
zmmul was promoted from experimental to ratified in commit 6d00ffad4e95.
4
Add a riscv,isa string for it.
5
6
Fixes: 6d00ffad4e95 ("target/riscv: move zmmul out of the experimental properties")
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-id: 20220118014522.13613-18-frank.chang@sifive.com
10
Message-Id: <20230720132424.371132-2-dbarboza@ventanamicro.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
12
---
8
target/riscv/cpu.c | 1 +
13
target/riscv/cpu.c | 1 +
9
1 file changed, 1 insertion(+)
14
1 file changed, 1 insertion(+)
10
15
11
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
16
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/cpu.c
18
--- a/target/riscv/cpu.c
14
+++ b/target/riscv/cpu.c
19
+++ b/target/riscv/cpu.c
15
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_properties[] = {
20
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
16
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
21
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
17
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
22
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
18
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
23
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
19
+ DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
24
+ ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
20
DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
25
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
21
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
26
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
22
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
27
ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
23
--
28
--
24
2.31.1
29
2.41.0
25
26
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Signed-off-by: Frank Chang <frank.chang@sifive.com>
3
The cpu->cfg.epmp extension is still experimental, but it already has a
4
'smepmp' riscv,isa string. Add it.
5
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-id: 20220118014522.13613-11-frank.chang@sifive.com
9
Message-Id: <20230720132424.371132-3-dbarboza@ventanamicro.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
11
---
8
target/riscv/cpu.c | 1 +
12
target/riscv/cpu.c | 1 +
9
1 file changed, 1 insertion(+)
13
1 file changed, 1 insertion(+)
10
14
11
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/cpu.c
17
--- a/target/riscv/cpu.c
14
+++ b/target/riscv/cpu.c
18
+++ b/target/riscv/cpu.c
15
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_properties[] = {
19
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
16
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
20
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
17
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
21
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
18
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
22
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
19
+ DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
23
+ ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp),
20
DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
24
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
21
DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
25
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
22
26
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
23
--
27
--
24
2.31.1
28
2.41.0
25
26
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
Only check the range that has passed the address translation.
3
Commit bef6f008b98(accel/tcg: Return bool from page_check_range) converts
4
integer return value to bool type. However, it wrongly converted the use
5
of the API in riscv fault-only-first, where page_check_range < = 0, should
6
be converted to !page_check_range.
4
7
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20230729031618.821-1-zhiwei_liu@linux.alibaba.com>
8
Message-id: 20220120122050.41546-19-zhiwei_liu@c-sky.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
12
---
11
target/riscv/vector_helper.c | 4 ++--
13
target/riscv/vector_helper.c | 2 +-
12
1 file changed, 2 insertions(+), 2 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
13
15
14
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
16
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/vector_helper.c
18
--- a/target/riscv/vector_helper.c
17
+++ b/target/riscv/vector_helper.c
19
+++ b/target/riscv/vector_helper.c
18
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
20
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
19
cpu_mmu_index(env, false));
21
cpu_mmu_index(env, false));
20
if (host) {
22
if (host) {
21
#ifdef CONFIG_USER_ONLY
23
#ifdef CONFIG_USER_ONLY
22
- if (page_check_range(addr, nf << esz, PAGE_READ) < 0) {
24
- if (page_check_range(addr, offset, PAGE_READ)) {
23
+ if (page_check_range(addr, offset, PAGE_READ) < 0) {
25
+ if (!page_check_range(addr, offset, PAGE_READ)) {
24
vl = i;
26
vl = i;
25
goto ProbeSuccess;
27
goto ProbeSuccess;
26
}
28
}
27
#else
28
- probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
29
+ probe_pages(env, addr, offset, ra, MMU_DATA_LOAD);
30
#endif
31
} else {
32
vl = i;
33
--
29
--
34
2.31.1
30
2.41.0
35
36
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Ard Biesheuvel <ardb@kernel.org>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
The AES MixColumns and InvMixColumns operations are relatively
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
expensive 4x4 matrix multiplications in GF(2^8), which is why C
5
Message-id: 20220120122050.41546-4-zhiwei_liu@c-sky.com
5
implementations usually rely on precomputed lookup tables rather than
6
performing the calculations on demand.
7
8
Given that we already carry those tables in QEMU, we can just grab the
9
right value in the implementation of the RISC-V AES32 instructions. Note
10
that the tables in question are permuted according to the respective
11
Sbox, so we can omit the Sbox lookup as well in this case.
12
13
Cc: Richard Henderson <richard.henderson@linaro.org>
14
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
15
Cc: Zewen Ye <lustrew@foxmail.com>
16
Cc: Weiwei Li <liweiwei@iscas.ac.cn>
17
Cc: Junqiang Wang <wangjunqiang@iscas.ac.cn>
18
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-ID: <20230731084043.1791984-1-ardb@kernel.org>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
22
---
8
target/riscv/translate.c | 4 +---
23
include/crypto/aes.h | 7 +++++++
9
target/riscv/insn_trans/trans_rvi.c.inc | 4 +---
24
crypto/aes.c | 4 ++--
10
2 files changed, 2 insertions(+), 6 deletions(-)
25
target/riscv/crypto_helper.c | 34 ++++------------------------------
26
3 files changed, 13 insertions(+), 32 deletions(-)
11
27
12
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
28
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
13
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/translate.c
30
--- a/include/crypto/aes.h
15
+++ b/target/riscv/translate.c
31
+++ b/include/crypto/aes.h
16
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
32
@@ -XXX,XX +XXX,XX @@ void AES_decrypt(const unsigned char *in, unsigned char *out,
17
return;
33
extern const uint8_t AES_sbox[256];
34
extern const uint8_t AES_isbox[256];
35
36
+/*
37
+AES_Te0[x] = S [x].[02, 01, 01, 03];
38
+AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
39
+*/
40
+
41
+extern const uint32_t AES_Te0[256], AES_Td0[256];
42
+
43
#endif
44
diff --git a/crypto/aes.c b/crypto/aes.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/crypto/aes.c
47
+++ b/crypto/aes.c
48
@@ -XXX,XX +XXX,XX @@ AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
49
AES_Td4[x] = Si[x].[01, 01, 01, 01];
50
*/
51
52
-static const uint32_t AES_Te0[256] = {
53
+const uint32_t AES_Te0[256] = {
54
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
55
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
56
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
57
@@ -XXX,XX +XXX,XX @@ static const uint32_t AES_Te4[256] = {
58
0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
59
};
60
61
-static const uint32_t AES_Td0[256] = {
62
+const uint32_t AES_Td0[256] = {
63
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
64
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
65
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
66
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/crypto_helper.c
69
+++ b/target/riscv/crypto_helper.c
70
@@ -XXX,XX +XXX,XX @@
71
#include "crypto/aes-round.h"
72
#include "crypto/sm4.h"
73
74
-#define AES_XTIME(a) \
75
- ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
76
-
77
-#define AES_GFMUL(a, b) (( \
78
- (((b) & 0x1) ? (a) : 0) ^ \
79
- (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
80
- (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
81
- (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
82
-
83
-static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd)
84
-{
85
- uint32_t u;
86
-
87
- if (fwd) {
88
- u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) |
89
- (AES_GFMUL(x, 2) << 0);
90
- } else {
91
- u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) |
92
- (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0);
93
- }
94
- return u;
95
-}
96
-
97
#define sext32_xlen(x) (target_ulong)(int32_t)(x)
98
99
static inline target_ulong aes32_operation(target_ulong shamt,
100
@@ -XXX,XX +XXX,XX @@ static inline target_ulong aes32_operation(target_ulong shamt,
101
bool enc, bool mix)
102
{
103
uint8_t si = rs2 >> shamt;
104
- uint8_t so;
105
uint32_t mixed;
106
target_ulong res;
107
108
if (enc) {
109
- so = AES_sbox[si];
110
if (mix) {
111
- mixed = aes_mixcolumn_byte(so, true);
112
+ mixed = be32_to_cpu(AES_Te0[si]);
113
} else {
114
- mixed = so;
115
+ mixed = AES_sbox[si];
116
}
117
} else {
118
- so = AES_isbox[si];
119
if (mix) {
120
- mixed = aes_mixcolumn_byte(so, false);
121
+ mixed = be32_to_cpu(AES_Td0[si]);
122
} else {
123
- mixed = so;
124
+ mixed = AES_isbox[si];
18
}
125
}
19
}
126
}
20
- if (rd != 0) {
127
mixed = rol32(mixed, shamt);
21
- tcg_gen_movi_tl(cpu_gpr[rd], ctx->pc_succ_insn);
22
- }
23
24
+ gen_set_gpri(ctx, rd, ctx->pc_succ_insn);
25
gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
26
ctx->base.is_jmp = DISAS_NORETURN;
27
}
28
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/riscv/insn_trans/trans_rvi.c.inc
31
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
32
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
33
tcg_temp_free(t0);
34
}
35
36
- if (a->rd != 0) {
37
- tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
38
- }
39
+ gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
40
tcg_gen_lookup_and_goto_ptr();
41
42
if (misaligned) {
43
--
128
--
44
2.31.1
129
2.41.0
45
130
46
131
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
Add target/riscv/kvm.c to place kvm_arch_* function needed by
3
Take some functions/macros out of `vector_helper` and put them in a new
4
kvm/kvm-all.c.
4
module called `vector_internals`. This ensures they can be used by both
5
vector and vector-crypto helpers (latter implemented in proceeding
6
commits).
5
7
6
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
8
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
7
Signed-off-by: Mingwang Li <limingwang@huawei.com>
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Max Chou <max.chou@sifive.com>
9
Reviewed-by: Anup Patel <anup.patel@wdc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20220112081329.1835-3-jiangyifei@huawei.com
12
Message-ID: <20230711165917.2629866-2-max.chou@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
14
---
13
target/riscv/kvm.c | 133 +++++++++++++++++++++++++++++++++++++++
15
target/riscv/vector_internals.h | 182 +++++++++++++++++++++++++++++
14
target/riscv/meson.build | 1 +
16
target/riscv/vector_helper.c | 201 +-------------------------------
15
2 files changed, 134 insertions(+)
17
target/riscv/vector_internals.c | 81 +++++++++++++
16
create mode 100644 target/riscv/kvm.c
18
target/riscv/meson.build | 1 +
19
4 files changed, 265 insertions(+), 200 deletions(-)
20
create mode 100644 target/riscv/vector_internals.h
21
create mode 100644 target/riscv/vector_internals.c
17
22
18
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
23
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
19
new file mode 100644
24
new file mode 100644
20
index XXXXXXX..XXXXXXX
25
index XXXXXXX..XXXXXXX
21
--- /dev/null
26
--- /dev/null
22
+++ b/target/riscv/kvm.c
27
+++ b/target/riscv/vector_internals.h
23
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@
24
+/*
29
+/*
25
+ * RISC-V implementation of KVM hooks
30
+ * RISC-V Vector Extension Internals
26
+ *
31
+ *
27
+ * Copyright (c) 2020 Huawei Technologies Co., Ltd
32
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
28
+ *
33
+ *
29
+ * This program is free software; you can redistribute it and/or modify it
34
+ * This program is free software; you can redistribute it and/or modify it
30
+ * under the terms and conditions of the GNU General Public License,
35
+ * under the terms and conditions of the GNU General Public License,
31
+ * version 2 or later, as published by the Free Software Foundation.
36
+ * version 2 or later, as published by the Free Software Foundation.
32
+ *
37
+ *
...
...
37
+ *
42
+ *
38
+ * You should have received a copy of the GNU General Public License along with
43
+ * You should have received a copy of the GNU General Public License along with
39
+ * this program. If not, see <http://www.gnu.org/licenses/>.
44
+ * this program. If not, see <http://www.gnu.org/licenses/>.
40
+ */
45
+ */
41
+
46
+
47
+#ifndef TARGET_RISCV_VECTOR_INTERNALS_H
48
+#define TARGET_RISCV_VECTOR_INTERNALS_H
49
+
42
+#include "qemu/osdep.h"
50
+#include "qemu/osdep.h"
43
+#include <sys/ioctl.h>
51
+#include "qemu/bitops.h"
44
+
45
+#include <linux/kvm.h>
46
+
47
+#include "qemu-common.h"
48
+#include "qemu/timer.h"
49
+#include "qemu/error-report.h"
50
+#include "qemu/main-loop.h"
51
+#include "sysemu/sysemu.h"
52
+#include "sysemu/kvm.h"
53
+#include "sysemu/kvm_int.h"
54
+#include "cpu.h"
52
+#include "cpu.h"
55
+#include "trace.h"
53
+#include "tcg/tcg-gvec-desc.h"
56
+#include "hw/pci/pci.h"
54
+#include "internals.h"
57
+#include "exec/memattrs.h"
55
+
58
+#include "exec/address-spaces.h"
56
+static inline uint32_t vext_nf(uint32_t desc)
59
+#include "hw/boards.h"
57
+{
60
+#include "hw/irq.h"
58
+ return FIELD_EX32(simd_data(desc), VDATA, NF);
61
+#include "qemu/log.h"
59
+}
62
+#include "hw/loader.h"
60
+
63
+
61
+/*
64
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
62
+ * Note that vector data is stored in host-endian 64-bit chunks,
65
+ KVM_CAP_LAST_INFO
63
+ * so addressing units smaller than that needs a host-endian fixup.
66
+};
64
+ */
67
+
65
+#if HOST_BIG_ENDIAN
68
+int kvm_arch_get_registers(CPUState *cs)
66
+#define H1(x) ((x) ^ 7)
69
+{
67
+#define H1_2(x) ((x) ^ 6)
70
+ return 0;
68
+#define H1_4(x) ((x) ^ 4)
71
+}
69
+#define H2(x) ((x) ^ 3)
72
+
70
+#define H4(x) ((x) ^ 1)
73
+int kvm_arch_put_registers(CPUState *cs, int level)
71
+#define H8(x) ((x))
74
+{
72
+#else
75
+ return 0;
73
+#define H1(x) (x)
76
+}
74
+#define H1_2(x) (x)
77
+
75
+#define H1_4(x) (x)
78
+int kvm_arch_release_virq_post(int virq)
76
+#define H2(x) (x)
79
+{
77
+#define H4(x) (x)
80
+ return 0;
78
+#define H8(x) (x)
81
+}
79
+#endif
82
+
80
+
83
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
81
+/*
84
+ uint64_t address, uint32_t data, PCIDevice *dev)
82
+ * Encode LMUL to lmul as following:
85
+{
83
+ * LMUL vlmul lmul
86
+ return 0;
84
+ * 1 000 0
87
+}
85
+ * 2 001 1
88
+
86
+ * 4 010 2
89
+int kvm_arch_destroy_vcpu(CPUState *cs)
87
+ * 8 011 3
90
+{
88
+ * - 100 -
91
+ return 0;
89
+ * 1/8 101 -3
92
+}
90
+ * 1/4 110 -2
93
+
91
+ * 1/2 111 -1
94
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
92
+ */
95
+{
93
+static inline int32_t vext_lmul(uint32_t desc)
96
+ return cpu->cpu_index;
94
+{
97
+}
95
+ return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
98
+
96
+}
99
+void kvm_arch_init_irq_routing(KVMState *s)
97
+
100
+{
98
+static inline uint32_t vext_vm(uint32_t desc)
101
+}
99
+{
102
+
100
+ return FIELD_EX32(simd_data(desc), VDATA, VM);
103
+int kvm_arch_init_vcpu(CPUState *cs)
101
+}
104
+{
102
+
105
+ return 0;
103
+static inline uint32_t vext_vma(uint32_t desc)
106
+}
104
+{
107
+
105
+ return FIELD_EX32(simd_data(desc), VDATA, VMA);
108
+int kvm_arch_msi_data_to_gsi(uint32_t data)
106
+}
109
+{
107
+
110
+ abort();
108
+static inline uint32_t vext_vta(uint32_t desc)
111
+}
109
+{
112
+
110
+ return FIELD_EX32(simd_data(desc), VDATA, VTA);
113
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
111
+}
114
+ int vector, PCIDevice *dev)
112
+
115
+{
113
+static inline uint32_t vext_vta_all_1s(uint32_t desc)
116
+ return 0;
114
+{
117
+}
115
+ return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
118
+
116
+}
119
+int kvm_arch_init(MachineState *ms, KVMState *s)
117
+
120
+{
118
+/*
121
+ return 0;
119
+ * Earlier designs (pre-0.9) had a varying number of bits
122
+}
120
+ * per mask value (MLEN). In the 0.9 design, MLEN=1.
123
+
121
+ * (Section 4.5)
124
+int kvm_arch_irqchip_create(KVMState *s)
122
+ */
125
+{
123
+static inline int vext_elem_mask(void *v0, int index)
126
+ return 0;
124
+{
127
+}
125
+ int idx = index / 64;
128
+
126
+ int pos = index % 64;
129
+int kvm_arch_process_async_events(CPUState *cs)
127
+ return (((uint64_t *)v0)[idx] >> pos) & 1;
130
+{
128
+}
131
+ return 0;
129
+
132
+}
130
+/*
133
+
131
+ * Get number of total elements, including prestart, body and tail elements.
134
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
132
+ * Note that when LMUL < 1, the tail includes the elements past VLMAX that
135
+{
133
+ * are held in the same vector register.
136
+}
134
+ */
137
+
135
+static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
138
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
136
+ uint32_t esz)
139
+{
137
+{
140
+ return MEMTXATTRS_UNSPECIFIED;
138
+ uint32_t vlenb = simd_maxsz(desc);
141
+}
139
+ uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
142
+
140
+ int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
143
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
141
+ ctzl(esz) - ctzl(sew) + vext_lmul(desc);
144
+{
142
+ return (vlenb << emul) / esz;
145
+ return true;
143
+}
146
+}
144
+
147
+
145
+/* set agnostic elements to 1s */
148
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
146
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
149
+{
147
+ uint32_t tot);
150
+ return 0;
148
+
151
+}
149
+/* expand macro args before macro */
152
+
150
+#define RVVCALL(macro, ...) macro(__VA_ARGS__)
153
+bool kvm_arch_cpu_check_are_resettable(void)
151
+
154
+{
152
+/* (TD, T1, T2, TX1, TX2) */
155
+ return true;
153
+#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
154
+#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
155
+#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
156
+#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
157
+
158
+/* operation of two vector elements */
159
+typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
160
+
161
+#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
162
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
163
+{ \
164
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
165
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
166
+ *((TD *)vd + HD(i)) = OP(s2, s1); \
167
+}
168
+
169
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
170
+ CPURISCVState *env, uint32_t desc,
171
+ opivv2_fn *fn, uint32_t esz);
172
+
173
+/* generate the helpers for OPIVV */
174
+#define GEN_VEXT_VV(NAME, ESZ) \
175
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
176
+ void *vs2, CPURISCVState *env, \
177
+ uint32_t desc) \
178
+{ \
179
+ do_vext_vv(vd, v0, vs1, vs2, env, desc, \
180
+ do_##NAME, ESZ); \
181
+}
182
+
183
+typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
184
+
185
+/*
186
+ * (T1)s1 gives the real operator type.
187
+ * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
188
+ */
189
+#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
190
+static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
191
+{ \
192
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
193
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
194
+}
195
+
196
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
197
+ CPURISCVState *env, uint32_t desc,
198
+ opivx2_fn fn, uint32_t esz);
199
+
200
+/* generate the helpers for OPIVX */
201
+#define GEN_VEXT_VX(NAME, ESZ) \
202
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
203
+ void *vs2, CPURISCVState *env, \
204
+ uint32_t desc) \
205
+{ \
206
+ do_vext_vx(vd, v0, s1, vs2, env, desc, \
207
+ do_##NAME, ESZ); \
208
+}
209
+
210
+#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
211
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/riscv/vector_helper.c
214
+++ b/target/riscv/vector_helper.c
215
@@ -XXX,XX +XXX,XX @@
216
#include "fpu/softfloat.h"
217
#include "tcg/tcg-gvec-desc.h"
218
#include "internals.h"
219
+#include "vector_internals.h"
220
#include <math.h>
221
222
target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
223
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
224
return vl;
225
}
226
227
-/*
228
- * Note that vector data is stored in host-endian 64-bit chunks,
229
- * so addressing units smaller than that needs a host-endian fixup.
230
- */
231
-#if HOST_BIG_ENDIAN
232
-#define H1(x) ((x) ^ 7)
233
-#define H1_2(x) ((x) ^ 6)
234
-#define H1_4(x) ((x) ^ 4)
235
-#define H2(x) ((x) ^ 3)
236
-#define H4(x) ((x) ^ 1)
237
-#define H8(x) ((x))
238
-#else
239
-#define H1(x) (x)
240
-#define H1_2(x) (x)
241
-#define H1_4(x) (x)
242
-#define H2(x) (x)
243
-#define H4(x) (x)
244
-#define H8(x) (x)
245
-#endif
246
-
247
-static inline uint32_t vext_nf(uint32_t desc)
248
-{
249
- return FIELD_EX32(simd_data(desc), VDATA, NF);
250
-}
251
-
252
-static inline uint32_t vext_vm(uint32_t desc)
253
-{
254
- return FIELD_EX32(simd_data(desc), VDATA, VM);
255
-}
256
-
257
-/*
258
- * Encode LMUL to lmul as following:
259
- * LMUL vlmul lmul
260
- * 1 000 0
261
- * 2 001 1
262
- * 4 010 2
263
- * 8 011 3
264
- * - 100 -
265
- * 1/8 101 -3
266
- * 1/4 110 -2
267
- * 1/2 111 -1
268
- */
269
-static inline int32_t vext_lmul(uint32_t desc)
270
-{
271
- return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
272
-}
273
-
274
-static inline uint32_t vext_vta(uint32_t desc)
275
-{
276
- return FIELD_EX32(simd_data(desc), VDATA, VTA);
277
-}
278
-
279
-static inline uint32_t vext_vma(uint32_t desc)
280
-{
281
- return FIELD_EX32(simd_data(desc), VDATA, VMA);
282
-}
283
-
284
-static inline uint32_t vext_vta_all_1s(uint32_t desc)
285
-{
286
- return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
287
-}
288
-
289
/*
290
* Get the maximum number of elements can be operated.
291
*
292
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
293
return scale < 0 ? vlenb >> -scale : vlenb << scale;
294
}
295
296
-/*
297
- * Get number of total elements, including prestart, body and tail elements.
298
- * Note that when LMUL < 1, the tail includes the elements past VLMAX that
299
- * are held in the same vector register.
300
- */
301
-static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
302
- uint32_t esz)
303
-{
304
- uint32_t vlenb = simd_maxsz(desc);
305
- uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
306
- int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
307
- ctzl(esz) - ctzl(sew) + vext_lmul(desc);
308
- return (vlenb << emul) / esz;
309
-}
310
-
311
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
312
{
313
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
314
@@ -XXX,XX +XXX,XX @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
315
}
316
}
317
318
-/* set agnostic elements to 1s */
319
-static void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
320
- uint32_t tot)
321
-{
322
- if (is_agnostic == 0) {
323
- /* policy undisturbed */
324
- return;
325
- }
326
- if (tot - cnt == 0) {
327
- return;
328
- }
329
- memset(base + cnt, -1, tot - cnt);
330
-}
331
-
332
static inline void vext_set_elem_mask(void *v0, int index,
333
uint8_t value)
334
{
335
@@ -XXX,XX +XXX,XX @@ static inline void vext_set_elem_mask(void *v0, int index,
336
((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
337
}
338
339
-/*
340
- * Earlier designs (pre-0.9) had a varying number of bits
341
- * per mask value (MLEN). In the 0.9 design, MLEN=1.
342
- * (Section 4.5)
343
- */
344
-static inline int vext_elem_mask(void *v0, int index)
345
-{
346
- int idx = index / 64;
347
- int pos = index % 64;
348
- return (((uint64_t *)v0)[idx] >> pos) & 1;
349
-}
350
-
351
/* elements operations for load and store */
352
typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
353
uint32_t idx, void *vd, uintptr_t retaddr);
354
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
355
* Vector Integer Arithmetic Instructions
356
*/
357
358
-/* expand macro args before macro */
359
-#define RVVCALL(macro, ...) macro(__VA_ARGS__)
360
-
361
/* (TD, T1, T2, TX1, TX2) */
362
#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
363
#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
364
#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
365
#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
366
-#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
367
-#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
368
-#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
369
-#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
370
#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
371
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
372
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
373
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
374
#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
375
#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
376
377
-/* operation of two vector elements */
378
-typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
379
-
380
-#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
381
-static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
382
-{ \
383
- TX1 s1 = *((T1 *)vs1 + HS1(i)); \
384
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
385
- *((TD *)vd + HD(i)) = OP(s2, s1); \
386
-}
387
#define DO_SUB(N, M) (N - M)
388
#define DO_RSUB(N, M) (M - N)
389
390
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB)
391
RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB)
392
RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
393
394
-static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
395
- CPURISCVState *env, uint32_t desc,
396
- opivv2_fn *fn, uint32_t esz)
397
-{
398
- uint32_t vm = vext_vm(desc);
399
- uint32_t vl = env->vl;
400
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
401
- uint32_t vta = vext_vta(desc);
402
- uint32_t vma = vext_vma(desc);
403
- uint32_t i;
404
-
405
- for (i = env->vstart; i < vl; i++) {
406
- if (!vm && !vext_elem_mask(v0, i)) {
407
- /* set masked-off elements to 1s */
408
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
409
- continue;
410
- }
411
- fn(vd, vs1, vs2, i);
412
- }
413
- env->vstart = 0;
414
- /* set tail elements to 1s */
415
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
416
-}
417
-
418
-/* generate the helpers for OPIVV */
419
-#define GEN_VEXT_VV(NAME, ESZ) \
420
-void HELPER(NAME)(void *vd, void *v0, void *vs1, \
421
- void *vs2, CPURISCVState *env, \
422
- uint32_t desc) \
423
-{ \
424
- do_vext_vv(vd, v0, vs1, vs2, env, desc, \
425
- do_##NAME, ESZ); \
426
-}
427
-
428
GEN_VEXT_VV(vadd_vv_b, 1)
429
GEN_VEXT_VV(vadd_vv_h, 2)
430
GEN_VEXT_VV(vadd_vv_w, 4)
431
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VV(vsub_vv_h, 2)
432
GEN_VEXT_VV(vsub_vv_w, 4)
433
GEN_VEXT_VV(vsub_vv_d, 8)
434
435
-typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
436
-
437
-/*
438
- * (T1)s1 gives the real operator type.
439
- * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
440
- */
441
-#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
442
-static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
443
-{ \
444
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
445
- *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
446
-}
447
448
RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD)
449
RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD)
450
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB)
451
RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB)
452
RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
453
454
-static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
455
- CPURISCVState *env, uint32_t desc,
456
- opivx2_fn fn, uint32_t esz)
457
-{
458
- uint32_t vm = vext_vm(desc);
459
- uint32_t vl = env->vl;
460
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
461
- uint32_t vta = vext_vta(desc);
462
- uint32_t vma = vext_vma(desc);
463
- uint32_t i;
464
-
465
- for (i = env->vstart; i < vl; i++) {
466
- if (!vm && !vext_elem_mask(v0, i)) {
467
- /* set masked-off elements to 1s */
468
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
469
- continue;
470
- }
471
- fn(vd, s1, vs2, i);
472
- }
473
- env->vstart = 0;
474
- /* set tail elements to 1s */
475
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
476
-}
477
-
478
-/* generate the helpers for OPIVX */
479
-#define GEN_VEXT_VX(NAME, ESZ) \
480
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
481
- void *vs2, CPURISCVState *env, \
482
- uint32_t desc) \
483
-{ \
484
- do_vext_vx(vd, v0, s1, vs2, env, desc, \
485
- do_##NAME, ESZ); \
486
-}
487
-
488
GEN_VEXT_VX(vadd_vx_b, 1)
489
GEN_VEXT_VX(vadd_vx_h, 2)
490
GEN_VEXT_VX(vadd_vx_w, 4)
491
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
492
new file mode 100644
493
index XXXXXXX..XXXXXXX
494
--- /dev/null
495
+++ b/target/riscv/vector_internals.c
496
@@ -XXX,XX +XXX,XX @@
497
+/*
498
+ * RISC-V Vector Extension Internals
499
+ *
500
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
501
+ *
502
+ * This program is free software; you can redistribute it and/or modify it
503
+ * under the terms and conditions of the GNU General Public License,
504
+ * version 2 or later, as published by the Free Software Foundation.
505
+ *
506
+ * This program is distributed in the hope it will be useful, but WITHOUT
507
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
508
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
509
+ * more details.
510
+ *
511
+ * You should have received a copy of the GNU General Public License along with
512
+ * this program. If not, see <http://www.gnu.org/licenses/>.
513
+ */
514
+
515
+#include "vector_internals.h"
516
+
517
+/* set agnostic elements to 1s */
518
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
519
+ uint32_t tot)
520
+{
521
+ if (is_agnostic == 0) {
522
+ /* policy undisturbed */
523
+ return;
524
+ }
525
+ if (tot - cnt == 0) {
526
+ return ;
527
+ }
528
+ memset(base + cnt, -1, tot - cnt);
529
+}
530
+
531
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
532
+ CPURISCVState *env, uint32_t desc,
533
+ opivv2_fn *fn, uint32_t esz)
534
+{
535
+ uint32_t vm = vext_vm(desc);
536
+ uint32_t vl = env->vl;
537
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
538
+ uint32_t vta = vext_vta(desc);
539
+ uint32_t vma = vext_vma(desc);
540
+ uint32_t i;
541
+
542
+ for (i = env->vstart; i < vl; i++) {
543
+ if (!vm && !vext_elem_mask(v0, i)) {
544
+ /* set masked-off elements to 1s */
545
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
546
+ continue;
547
+ }
548
+ fn(vd, vs1, vs2, i);
549
+ }
550
+ env->vstart = 0;
551
+ /* set tail elements to 1s */
552
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
553
+}
554
+
555
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
556
+ CPURISCVState *env, uint32_t desc,
557
+ opivx2_fn fn, uint32_t esz)
558
+{
559
+ uint32_t vm = vext_vm(desc);
560
+ uint32_t vl = env->vl;
561
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
562
+ uint32_t vta = vext_vta(desc);
563
+ uint32_t vma = vext_vma(desc);
564
+ uint32_t i;
565
+
566
+ for (i = env->vstart; i < vl; i++) {
567
+ if (!vm && !vext_elem_mask(v0, i)) {
568
+ /* set masked-off elements to 1s */
569
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
570
+ continue;
571
+ }
572
+ fn(vd, s1, vs2, i);
573
+ }
574
+ env->vstart = 0;
575
+ /* set tail elements to 1s */
576
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
156
+}
577
+}
157
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
578
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
158
index XXXXXXX..XXXXXXX 100644
579
index XXXXXXX..XXXXXXX 100644
159
--- a/target/riscv/meson.build
580
--- a/target/riscv/meson.build
160
+++ b/target/riscv/meson.build
581
+++ b/target/riscv/meson.build
161
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
582
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
583
'gdbstub.c',
584
'op_helper.c',
585
'vector_helper.c',
586
+ 'vector_internals.c',
587
'bitmanip_helper.c',
162
'translate.c',
588
'translate.c',
163
'm128_helper.c'
589
'm128_helper.c',
164
))
165
+riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
166
167
riscv_softmmu_ss = ss.source_set()
168
riscv_softmmu_ss.add(files(
169
--
590
--
170
2.31.1
591
2.41.0
171
172
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
All Zve* extensions support all vector load and store instructions,
3
Refactor the non SEW-specific stuff out of `GEN_OPIVV_TRANS` into
4
except Zve64* extensions do not support EEW=64 for index values when
4
function `opivv_trans` (similar to `opivi_trans`). `opivv_trans` will be
5
XLEN=32.
5
used in proceeding vector-crypto commits.
6
6
7
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20220118014522.13613-4-frank.chang@sifive.com
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Message-ID: <20230711165917.2629866-3-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
14
---
12
target/riscv/insn_trans/trans_rvv.c.inc | 19 +++++++++++++++----
15
target/riscv/insn_trans/trans_rvv.c.inc | 62 +++++++++++++------------
13
1 file changed, 15 insertions(+), 4 deletions(-)
16
1 file changed, 32 insertions(+), 30 deletions(-)
14
17
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
22
@@ -XXX,XX +XXX,XX @@ GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
20
uint8_t eew)
23
GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
21
{
24
GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
22
int8_t emul = eew - s->sew + s->lmul;
25
23
- return (emul >= -3 && emul <= 3) &&
26
+static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
24
- require_align(vs2, emul) &&
27
+ gen_helper_gvec_4_ptr *fn, DisasContext *s)
25
- require_align(vd, s->lmul) &&
28
+{
26
- require_nf(vd, nf, s->lmul);
29
+ uint32_t data = 0;
27
+ bool ret = (emul >= -3 && emul <= 3) &&
30
+ TCGLabel *over = gen_new_label();
28
+ require_align(vs2, emul) &&
31
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
29
+ require_align(vd, s->lmul) &&
32
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
30
+ require_nf(vd, nf, s->lmul);
31
+
33
+
32
+ /*
34
+ data = FIELD_DP32(data, VDATA, VM, vm);
33
+ * All Zve* extensions support all vector load and store instructions,
35
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
34
+ * except Zve64* extensions do not support EEW=64 for index values
36
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
35
+ * when XLEN=32. (Section 18.2)
37
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
36
+ */
38
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
37
+ if (get_xl(s) == MXL_RV32) {
39
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
38
+ ret &= (!has_ext(s, RVV) && s->ext_zve64f ? eew != MO_64 : true);
40
+ vreg_ofs(s, vs2), cpu_env, s->cfg_ptr->vlen / 8,
39
+ }
41
+ s->cfg_ptr->vlen / 8, data, fn);
42
+ mark_vs_dirty(s);
43
+ gen_set_label(over);
44
+ return true;
45
+}
40
+
46
+
41
+ return ret;
47
/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
48
/* OPIVV without GVEC IR */
49
-#define GEN_OPIVV_TRANS(NAME, CHECK) \
50
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
51
-{ \
52
- if (CHECK(s, a)) { \
53
- uint32_t data = 0; \
54
- static gen_helper_gvec_4_ptr * const fns[4] = { \
55
- gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
56
- gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
57
- }; \
58
- TCGLabel *over = gen_new_label(); \
59
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
60
- tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
61
- \
62
- data = FIELD_DP32(data, VDATA, VM, a->vm); \
63
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
64
- data = FIELD_DP32(data, VDATA, VTA, s->vta); \
65
- data = \
66
- FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
67
- data = FIELD_DP32(data, VDATA, VMA, s->vma); \
68
- tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
69
- vreg_ofs(s, a->rs1), \
70
- vreg_ofs(s, a->rs2), cpu_env, \
71
- s->cfg_ptr->vlen / 8, \
72
- s->cfg_ptr->vlen / 8, data, \
73
- fns[s->sew]); \
74
- mark_vs_dirty(s); \
75
- gen_set_label(over); \
76
- return true; \
77
- } \
78
- return false; \
79
+#define GEN_OPIVV_TRANS(NAME, CHECK) \
80
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
81
+{ \
82
+ if (CHECK(s, a)) { \
83
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
84
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
85
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
86
+ }; \
87
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
88
+ } \
89
+ return false; \
42
}
90
}
43
91
44
/*
92
/*
45
--
93
--
46
2.31.1
94
2.41.0
47
48
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
All Zve* extensions support the vector configuration instructions.
3
Remove the redundant "vl == 0" check which is already included within the vstart >= vl check, when vl == 0.
4
4
5
Signed-off-by: Frank Chang <frank.chang@sifive.com>
5
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
7
Message-id: 20220118014522.13613-3-frank.chang@sifive.com
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20230711165917.2629866-4-max.chou@sifive.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
11
---
10
target/riscv/insn_trans/trans_rvv.c.inc | 6 ++++--
12
target/riscv/insn_trans/trans_rvv.c.inc | 31 +------------------------
11
1 file changed, 4 insertions(+), 2 deletions(-)
13
1 file changed, 1 insertion(+), 30 deletions(-)
12
14
13
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
16
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
17
@@ -XXX,XX +XXX,XX @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
19
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
18
{
20
TCGv_i32 desc;
19
TCGv s1, dst;
21
20
22
TCGLabel *over = gen_new_label();
21
- if (!require_rvv(s) || !has_ext(s, RVV)) {
23
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
22
+ if (!require_rvv(s) ||
24
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
23
+ !(has_ext(s, RVV) || s->ext_zve64f)) {
25
26
dest = tcg_temp_new_ptr();
27
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
28
TCGv_i32 desc;
29
30
TCGLabel *over = gen_new_label();
31
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
33
34
dest = tcg_temp_new_ptr();
35
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
36
TCGv_i32 desc;
37
38
TCGLabel *over = gen_new_label();
39
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
40
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
41
42
dest = tcg_temp_new_ptr();
43
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
44
TCGv_i32 desc;
45
46
TCGLabel *over = gen_new_label();
47
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
48
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
49
50
dest = tcg_temp_new_ptr();
51
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
24
return false;
52
return false;
25
}
53
}
26
54
27
@@ -XXX,XX +XXX,XX @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
55
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
56
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
57
58
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
59
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
60
uint32_t data = 0;
61
62
TCGLabel *over = gen_new_label();
63
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
64
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
65
66
dest = tcg_temp_new_ptr();
67
@@ -XXX,XX +XXX,XX @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
68
uint32_t data = 0;
69
70
TCGLabel *over = gen_new_label();
71
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
72
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
73
74
dest = tcg_temp_new_ptr();
75
@@ -XXX,XX +XXX,XX @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
76
if (checkfn(s, a)) {
77
uint32_t data = 0;
78
TCGLabel *over = gen_new_label();
79
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
80
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
81
82
data = FIELD_DP32(data, VDATA, VM, a->vm);
83
@@ -XXX,XX +XXX,XX @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
84
if (opiwv_widen_check(s, a)) {
85
uint32_t data = 0;
86
TCGLabel *over = gen_new_label();
87
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
88
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
89
90
data = FIELD_DP32(data, VDATA, VM, a->vm);
91
@@ -XXX,XX +XXX,XX @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
28
{
92
{
29
TCGv dst;
93
uint32_t data = 0;
30
94
TCGLabel *over = gen_new_label();
31
- if (!require_rvv(s) || !has_ext(s, RVV)) {
95
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
+ if (!require_rvv(s) ||
96
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
33
+ !(has_ext(s, RVV) || s->ext_zve64f)) {
97
34
return false;
98
data = FIELD_DP32(data, VDATA, VM, vm);
35
}
99
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
36
100
gen_helper_##NAME##_w, \
101
}; \
102
TCGLabel *over = gen_new_label(); \
103
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
104
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
105
\
106
data = FIELD_DP32(data, VDATA, VM, a->vm); \
107
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
108
gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
109
};
110
TCGLabel *over = gen_new_label();
111
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
112
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
113
114
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
115
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
116
vext_check_ss(s, a->rd, 0, 1)) {
117
TCGv s1;
118
TCGLabel *over = gen_new_label();
119
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
120
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
121
122
s1 = get_gpr(s, a->rs1, EXT_SIGN);
123
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
124
gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
125
};
126
TCGLabel *over = gen_new_label();
127
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
128
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
129
130
s1 = tcg_constant_i64(simm);
131
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
132
}; \
133
TCGLabel *over = gen_new_label(); \
134
gen_set_rm(s, RISCV_FRM_DYN); \
135
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
136
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
137
\
138
data = FIELD_DP32(data, VDATA, VM, a->vm); \
139
@@ -XXX,XX +XXX,XX @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
140
TCGv_i64 t1;
141
142
TCGLabel *over = gen_new_label();
143
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
144
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
145
146
dest = tcg_temp_new_ptr();
147
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
148
}; \
149
TCGLabel *over = gen_new_label(); \
150
gen_set_rm(s, RISCV_FRM_DYN); \
151
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
152
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\
153
\
154
data = FIELD_DP32(data, VDATA, VM, a->vm); \
155
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
156
}; \
157
TCGLabel *over = gen_new_label(); \
158
gen_set_rm(s, RISCV_FRM_DYN); \
159
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
160
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
161
\
162
data = FIELD_DP32(data, VDATA, VM, a->vm); \
163
@@ -XXX,XX +XXX,XX @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
164
uint32_t data = 0;
165
TCGLabel *over = gen_new_label();
166
gen_set_rm_chkfrm(s, rm);
167
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
168
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
169
170
data = FIELD_DP32(data, VDATA, VM, a->vm);
171
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
172
gen_helper_vmv_v_x_d,
173
};
174
TCGLabel *over = gen_new_label();
175
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
176
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
177
178
t1 = tcg_temp_new_i64();
179
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
180
}; \
181
TCGLabel *over = gen_new_label(); \
182
gen_set_rm_chkfrm(s, FRM); \
183
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
184
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
185
\
186
data = FIELD_DP32(data, VDATA, VM, a->vm); \
187
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
188
}; \
189
TCGLabel *over = gen_new_label(); \
190
gen_set_rm(s, RISCV_FRM_DYN); \
191
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
192
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
193
\
194
data = FIELD_DP32(data, VDATA, VM, a->vm); \
195
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
196
}; \
197
TCGLabel *over = gen_new_label(); \
198
gen_set_rm_chkfrm(s, FRM); \
199
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
200
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
201
\
202
data = FIELD_DP32(data, VDATA, VM, a->vm); \
203
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
204
}; \
205
TCGLabel *over = gen_new_label(); \
206
gen_set_rm_chkfrm(s, FRM); \
207
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
208
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
209
\
210
data = FIELD_DP32(data, VDATA, VM, a->vm); \
211
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
212
uint32_t data = 0; \
213
gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
214
TCGLabel *over = gen_new_label(); \
215
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
216
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
217
\
218
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
219
@@ -XXX,XX +XXX,XX @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
220
require_vm(a->vm, a->rd)) {
221
uint32_t data = 0;
222
TCGLabel *over = gen_new_label();
223
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
224
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
225
226
data = FIELD_DP32(data, VDATA, VM, a->vm);
227
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
228
TCGv s1;
229
TCGLabel *over = gen_new_label();
230
231
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
232
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
233
234
t1 = tcg_temp_new_i64();
235
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
236
TCGv_i64 t1;
237
TCGLabel *over = gen_new_label();
238
239
- /* if vl == 0 or vstart >= vl, skip vector register write back */
240
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
241
+ /* if vstart >= vl, skip vector register write back */
242
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
243
244
/* NaN-box f[rs1] */
245
@@ -XXX,XX +XXX,XX @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
246
uint32_t data = 0;
247
gen_helper_gvec_3_ptr *fn;
248
TCGLabel *over = gen_new_label();
249
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
250
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
251
252
static gen_helper_gvec_3_ptr * const fns[6][4] = {
37
--
253
--
38
2.31.1
254
2.41.0
39
40
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
2
3
Get kernel and fdt start address in virt.c, and pass them to KVM
3
This commit adds support for the Zvbc vector-crypto extension, which
4
when cpu reset. Add kvm_riscv.h to place riscv specific interface.
4
consists of the following instructions:
5
5
6
In addition, PLIC is created without M-mode PLIC contexts when KVM
6
* vclmulh.[vx,vv]
7
is enabled.
7
* vclmul.[vx,vv]
8
8
9
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
9
Translation functions are defined in
10
Signed-off-by: Mingwang Li <limingwang@huawei.com>
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
`target/riscv/vcrypto_helper.c`.
12
Reviewed-by: Anup Patel <anup@brainfault.org>
12
13
Message-id: 20220112081329.1835-7-jiangyifei@huawei.com
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Max Chou <max.chou@sifive.com>
15
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
[max.chou@sifive.com: Exposed x-zvbc property]
19
Message-ID: <20230711165917.2629866-5-max.chou@sifive.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
21
---
16
include/hw/riscv/boot.h | 1 +
22
target/riscv/cpu_cfg.h | 1 +
17
target/riscv/cpu.h | 3 ++
23
target/riscv/helper.h | 6 +++
18
target/riscv/kvm_riscv.h | 24 ++++++++++++
24
target/riscv/insn32.decode | 6 +++
19
hw/intc/sifive_plic.c | 20 +++++++---
25
target/riscv/cpu.c | 9 ++++
20
hw/riscv/boot.c | 16 +++++++-
26
target/riscv/translate.c | 1 +
21
hw/riscv/virt.c | 83 ++++++++++++++++++++++++++++------------
27
target/riscv/vcrypto_helper.c | 59 ++++++++++++++++++++++
22
target/riscv/cpu.c | 8 ++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 62 ++++++++++++++++++++++++
23
target/riscv/kvm-stub.c | 25 ++++++++++++
29
target/riscv/meson.build | 3 +-
24
target/riscv/kvm.c | 14 +++++++
30
8 files changed, 146 insertions(+), 1 deletion(-)
25
target/riscv/meson.build | 2 +-
31
create mode 100644 target/riscv/vcrypto_helper.c
26
10 files changed, 164 insertions(+), 32 deletions(-)
32
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
27
create mode 100644 target/riscv/kvm_riscv.h
33
28
create mode 100644 target/riscv/kvm-stub.c
34
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
29
35
index XXXXXXX..XXXXXXX 100644
30
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
36
--- a/target/riscv/cpu_cfg.h
31
index XXXXXXX..XXXXXXX 100644
37
+++ b/target/riscv/cpu_cfg.h
32
--- a/include/hw/riscv/boot.h
38
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
33
+++ b/include/hw/riscv/boot.h
39
bool ext_zve32f;
34
@@ -XXX,XX +XXX,XX @@ void riscv_rom_copy_firmware_info(MachineState *machine, hwaddr rom_base,
40
bool ext_zve64f;
35
hwaddr rom_size,
41
bool ext_zve64d;
36
uint32_t reset_vec_size,
42
+ bool ext_zvbc;
37
uint64_t kernel_entry);
43
bool ext_zmmul;
38
+void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr);
44
bool ext_zvfbfmin;
39
45
bool ext_zvfbfwma;
40
#endif /* RISCV_BOOT_H */
46
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
41
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
47
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
48
--- a/target/riscv/helper.h
43
--- a/target/riscv/cpu.h
49
+++ b/target/riscv/helper.h
44
+++ b/target/riscv/cpu.h
50
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vfwcvtbf16_f_f_v, void, ptr, ptr, ptr, env, i32)
45
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
51
46
52
DEF_HELPER_6(vfwmaccbf16_vv, void, ptr, ptr, ptr, ptr, env, i32)
47
/* Fields from here on are preserved across CPU reset. */
53
DEF_HELPER_6(vfwmaccbf16_vf, void, ptr, ptr, i64, ptr, env, i32)
48
QEMUTimer *timer; /* Internal timer */
54
+
49
+
55
+/* Vector crypto functions */
50
+ hwaddr kernel_addr;
56
+DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
51
+ hwaddr fdt_addr;
57
+DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
58
+DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
59
+DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
60
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/riscv/insn32.decode
63
+++ b/target/riscv/insn32.decode
64
@@ -XXX,XX +XXX,XX @@ vfwcvtbf16_f_f_v 010010 . ..... 01101 001 ..... 1010111 @r2_vm
65
# *** Zvfbfwma Standard Extension ***
66
vfwmaccbf16_vv 111011 . ..... ..... 001 ..... 1010111 @r_vm
67
vfwmaccbf16_vf 111011 . ..... ..... 101 ..... 1010111 @r_vm
68
+
69
+# *** Zvbc vector crypto extension ***
70
+vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
71
+vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
72
+vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
73
+vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
74
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/riscv/cpu.c
77
+++ b/target/riscv/cpu.c
78
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
79
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
80
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
81
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
82
+ ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
83
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
84
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
85
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
86
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
87
return;
88
}
89
90
+ if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
91
+ error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
92
+ return;
93
+ }
94
+
95
if (cpu->cfg.ext_zk) {
96
cpu->cfg.ext_zkn = true;
97
cpu->cfg.ext_zkr = true;
98
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
99
DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
100
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
101
102
+ /* Vector cryptography extensions */
103
+ DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
104
+
105
DEFINE_PROP_END_OF_LIST(),
52
};
106
};
53
107
54
OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass,
108
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
55
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/riscv/translate.c
111
+++ b/target/riscv/translate.c
112
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
113
#include "insn_trans/trans_rvzfa.c.inc"
114
#include "insn_trans/trans_rvzfh.c.inc"
115
#include "insn_trans/trans_rvk.c.inc"
116
+#include "insn_trans/trans_rvvk.c.inc"
117
#include "insn_trans/trans_privileged.c.inc"
118
#include "insn_trans/trans_svinval.c.inc"
119
#include "insn_trans/trans_rvbf16.c.inc"
120
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
56
new file mode 100644
121
new file mode 100644
57
index XXXXXXX..XXXXXXX
122
index XXXXXXX..XXXXXXX
58
--- /dev/null
123
--- /dev/null
59
+++ b/target/riscv/kvm_riscv.h
124
+++ b/target/riscv/vcrypto_helper.c
60
@@ -XXX,XX +XXX,XX @@
125
@@ -XXX,XX +XXX,XX @@
61
+/*
126
+/*
62
+ * QEMU KVM support -- RISC-V specific functions.
127
+ * RISC-V Vector Crypto Extension Helpers for QEMU.
63
+ *
128
+ *
64
+ * Copyright (c) 2020 Huawei Technologies Co., Ltd
129
+ * Copyright (C) 2023 SiFive, Inc.
130
+ * Written by Codethink Ltd and SiFive.
65
+ *
131
+ *
66
+ * This program is free software; you can redistribute it and/or modify it
132
+ * This program is free software; you can redistribute it and/or modify it
67
+ * under the terms and conditions of the GNU General Public License,
133
+ * under the terms and conditions of the GNU General Public License,
68
+ * version 2 or later, as published by the Free Software Foundation.
134
+ * version 2 or later, as published by the Free Software Foundation.
69
+ *
135
+ *
...
...
74
+ *
140
+ *
75
+ * You should have received a copy of the GNU General Public License along with
141
+ * You should have received a copy of the GNU General Public License along with
76
+ * this program. If not, see <http://www.gnu.org/licenses/>.
142
+ * this program. If not, see <http://www.gnu.org/licenses/>.
77
+ */
143
+ */
78
+
144
+
79
+#ifndef QEMU_KVM_RISCV_H
145
+#include "qemu/osdep.h"
80
+#define QEMU_KVM_RISCV_H
146
+#include "qemu/host-utils.h"
81
+
147
+#include "qemu/bitops.h"
82
+void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
148
+#include "cpu.h"
83
+
149
+#include "exec/memop.h"
84
+#endif
150
+#include "exec/exec-all.h"
85
diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
151
+#include "exec/helper-proto.h"
86
index XXXXXXX..XXXXXXX 100644
152
+#include "internals.h"
87
--- a/hw/intc/sifive_plic.c
153
+#include "vector_internals.h"
88
+++ b/hw/intc/sifive_plic.c
154
+
89
@@ -XXX,XX +XXX,XX @@
155
+static uint64_t clmul64(uint64_t y, uint64_t x)
90
#include "target/riscv/cpu.h"
156
+{
91
#include "migration/vmstate.h"
157
+ uint64_t result = 0;
92
#include "hw/irq.h"
158
+ for (int j = 63; j >= 0; j--) {
93
+#include "sysemu/kvm.h"
159
+ if ((y >> j) & 1) {
94
160
+ result ^= (x << j);
95
static bool addr_between(uint32_t addr, uint32_t base, uint32_t num)
96
{
97
@@ -XXX,XX +XXX,XX @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
98
uint32_t context_stride, uint32_t aperture_size)
99
{
100
DeviceState *dev = qdev_new(TYPE_SIFIVE_PLIC);
101
- int i;
102
+ int i, j = 0;
103
+ SiFivePLICState *plic;
104
105
assert(enable_stride == (enable_stride & -enable_stride));
106
assert(context_stride == (context_stride & -context_stride));
107
@@ -XXX,XX +XXX,XX @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
108
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
109
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
110
111
+ plic = SIFIVE_PLIC(dev);
112
for (i = 0; i < num_harts; i++) {
113
CPUState *cpu = qemu_get_cpu(hartid_base + i);
114
115
- qdev_connect_gpio_out(dev, i,
116
- qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT));
117
- qdev_connect_gpio_out(dev, num_harts + i,
118
- qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT));
119
+ if (plic->addr_config[j].mode == PLICMode_M) {
120
+ j++;
121
+ qdev_connect_gpio_out(dev, num_harts + i,
122
+ qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT));
123
+ }
161
+ }
124
+
162
+ }
125
+ if (plic->addr_config[j].mode == PLICMode_S) {
163
+ return result;
126
+ j++;
164
+}
127
+ qdev_connect_gpio_out(dev, i,
165
+
128
+ qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT));
166
+static uint64_t clmulh64(uint64_t y, uint64_t x)
167
+{
168
+ uint64_t result = 0;
169
+ for (int j = 63; j >= 1; j--) {
170
+ if ((y >> j) & 1) {
171
+ result ^= (x >> (64 - j));
129
+ }
172
+ }
130
}
173
+ }
131
174
+ return result;
132
return dev;
175
+}
133
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
176
+
134
index XXXXXXX..XXXXXXX 100644
177
+RVVCALL(OPIVV2, vclmul_vv, OP_UUU_D, H8, H8, H8, clmul64)
135
--- a/hw/riscv/boot.c
178
+GEN_VEXT_VV(vclmul_vv, 8)
136
+++ b/hw/riscv/boot.c
179
+RVVCALL(OPIVX2, vclmul_vx, OP_UUU_D, H8, H8, clmul64)
137
@@ -XXX,XX +XXX,XX @@
180
+GEN_VEXT_VX(vclmul_vx, 8)
138
#include "elf.h"
181
+RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
139
#include "sysemu/device_tree.h"
182
+GEN_VEXT_VV(vclmulh_vv, 8)
140
#include "sysemu/qtest.h"
183
+RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
141
+#include "sysemu/kvm.h"
184
+GEN_VEXT_VX(vclmulh_vx, 8)
142
185
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
143
#include <libfdt.h>
144
145
@@ -XXX,XX +XXX,XX @@ char *riscv_plic_hart_config_string(int hart_count)
146
CPUState *cs = qemu_get_cpu(i);
147
CPURISCVState *env = &RISCV_CPU(cs)->env;
148
149
- if (riscv_has_ext(env, RVS)) {
150
+ if (kvm_enabled()) {
151
+ vals[i] = "S";
152
+ } else if (riscv_has_ext(env, RVS)) {
153
vals[i] = "MS";
154
} else {
155
vals[i] = "M";
156
@@ -XXX,XX +XXX,XX @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts
157
158
return;
159
}
160
+
161
+void riscv_setup_direct_kernel(hwaddr kernel_addr, hwaddr fdt_addr)
162
+{
163
+ CPUState *cs;
164
+
165
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
166
+ RISCVCPU *riscv_cpu = RISCV_CPU(cs);
167
+ riscv_cpu->env.kernel_addr = kernel_addr;
168
+ riscv_cpu->env.fdt_addr = fdt_addr;
169
+ }
170
+}
171
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/hw/riscv/virt.c
174
+++ b/hw/riscv/virt.c
175
@@ -XXX,XX +XXX,XX @@
176
#include "chardev/char.h"
177
#include "sysemu/device_tree.h"
178
#include "sysemu/sysemu.h"
179
+#include "sysemu/kvm.h"
180
#include "hw/pci/pci.h"
181
#include "hw/pci-host/gpex.h"
182
#include "hw/display/ramfb.h"
183
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
184
"sifive,plic-1.0.0", "riscv,plic0"
185
};
186
187
- plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
188
+ if (kvm_enabled()) {
189
+ plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
190
+ } else {
191
+ plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4);
192
+ }
193
194
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
195
- plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
196
- plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
197
- plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
198
- plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
199
+ if (kvm_enabled()) {
200
+ plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
201
+ plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
202
+ } else {
203
+ plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]);
204
+ plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT);
205
+ plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]);
206
+ plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT);
207
+ }
208
}
209
210
plic_phandles[socket] = (*phandle)++;
211
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
212
213
create_fdt_socket_memory(s, memmap, socket);
214
215
- if (s->have_aclint) {
216
- create_fdt_socket_aclint(s, memmap, socket, intc_phandles);
217
- } else {
218
- create_fdt_socket_clint(s, memmap, socket, intc_phandles);
219
+ if (!kvm_enabled()) {
220
+ if (s->have_aclint) {
221
+ create_fdt_socket_aclint(s, memmap, socket, intc_phandles);
222
+ } else {
223
+ create_fdt_socket_clint(s, memmap, socket, intc_phandles);
224
+ }
225
}
226
227
create_fdt_socket_plic(s, memmap, socket, phandle,
228
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
229
hart_count, &error_abort);
230
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort);
231
232
- /* Per-socket CLINT */
233
- riscv_aclint_swi_create(
234
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
235
- base_hartid, hart_count, false);
236
- riscv_aclint_mtimer_create(
237
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size +
238
- RISCV_ACLINT_SWI_SIZE,
239
- RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
240
- RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
241
- RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
242
-
243
- /* Per-socket ACLINT SSWI */
244
- if (s->have_aclint) {
245
+ if (!kvm_enabled()) {
246
+ /* Per-socket CLINT */
247
riscv_aclint_swi_create(
248
- memmap[VIRT_ACLINT_SSWI].base +
249
- i * memmap[VIRT_ACLINT_SSWI].size,
250
- base_hartid, hart_count, true);
251
+ memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
252
+ base_hartid, hart_count, false);
253
+ riscv_aclint_mtimer_create(
254
+ memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size +
255
+ RISCV_ACLINT_SWI_SIZE,
256
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
257
+ RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
258
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
259
+
260
+ /* Per-socket ACLINT SSWI */
261
+ if (s->have_aclint) {
262
+ riscv_aclint_swi_create(
263
+ memmap[VIRT_ACLINT_SSWI].base +
264
+ i * memmap[VIRT_ACLINT_SSWI].size,
265
+ base_hartid, hart_count, true);
266
+ }
267
}
268
269
/* Per-socket PLIC hart topology configuration string */
270
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
271
memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
272
mask_rom);
273
274
+ /*
275
+ * Only direct boot kernel is currently supported for KVM VM,
276
+ * so the "-bios" parameter is ignored and treated like "-bios none"
277
+ * when KVM is enabled.
278
+ */
279
+ if (kvm_enabled()) {
280
+ g_free(machine->firmware);
281
+ machine->firmware = g_strdup("none");
282
+ }
283
+
284
if (riscv_is_32bit(&s->soc[0])) {
285
firmware_end_addr = riscv_find_and_load_firmware(machine,
286
RISCV32_BIOS_BIN, start_addr, NULL);
287
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
288
virt_memmap[VIRT_MROM].size, kernel_entry,
289
fdt_load_addr, machine->fdt);
290
291
+ /*
292
+ * Only direct boot kernel is currently supported for KVM VM,
293
+ * So here setup kernel start address and fdt address.
294
+ * TODO:Support firmware loading and integrate to TCG start
295
+ */
296
+ if (kvm_enabled()) {
297
+ riscv_setup_direct_kernel(kernel_entry, fdt_load_addr);
298
+ }
299
+
300
/* SiFive Test MMIO device */
301
sifive_test_create(memmap[VIRT_TEST].base);
302
303
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/riscv/cpu.c
306
+++ b/target/riscv/cpu.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "hw/qdev-properties.h"
309
#include "migration/vmstate.h"
310
#include "fpu/softfloat-helpers.h"
311
+#include "sysemu/kvm.h"
312
+#include "kvm_riscv.h"
313
314
/* RISC-V CPU definitions */
315
316
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset(DeviceState *dev)
317
cs->exception_index = RISCV_EXCP_NONE;
318
env->load_res = -1;
319
set_default_nan_mode(1, &env->fp_status);
320
+
321
+#ifndef CONFIG_USER_ONLY
322
+ if (kvm_enabled()) {
323
+ kvm_riscv_reset_vcpu(cpu);
324
+ }
325
+#endif
326
}
327
328
static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
329
diff --git a/target/riscv/kvm-stub.c b/target/riscv/kvm-stub.c
330
new file mode 100644
186
new file mode 100644
331
index XXXXXXX..XXXXXXX
187
index XXXXXXX..XXXXXXX
332
--- /dev/null
188
--- /dev/null
333
+++ b/target/riscv/kvm-stub.c
189
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
334
@@ -XXX,XX +XXX,XX @@
190
@@ -XXX,XX +XXX,XX @@
335
+/*
191
+/*
336
+ * QEMU KVM RISC-V specific function stubs
192
+ * RISC-V translation routines for the vector crypto extension.
337
+ *
193
+ *
338
+ * Copyright (c) 2020 Huawei Technologies Co., Ltd
194
+ * Copyright (C) 2023 SiFive, Inc.
195
+ * Written by Codethink Ltd and SiFive.
339
+ *
196
+ *
340
+ * This program is free software; you can redistribute it and/or modify it
197
+ * This program is free software; you can redistribute it and/or modify it
341
+ * under the terms and conditions of the GNU General Public License,
198
+ * under the terms and conditions of the GNU General Public License,
342
+ * version 2 or later, as published by the Free Software Foundation.
199
+ * version 2 or later, as published by the Free Software Foundation.
343
+ *
200
+ *
...
...
347
+ * more details.
204
+ * more details.
348
+ *
205
+ *
349
+ * You should have received a copy of the GNU General Public License along with
206
+ * You should have received a copy of the GNU General Public License along with
350
+ * this program. If not, see <http://www.gnu.org/licenses/>.
207
+ * this program. If not, see <http://www.gnu.org/licenses/>.
351
+ */
208
+ */
352
+#include "qemu/osdep.h"
209
+
353
+#include "cpu.h"
210
+/*
354
+#include "kvm_riscv.h"
211
+ * Zvbc
355
+
212
+ */
356
+void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
213
+
357
+{
214
+#define GEN_VV_MASKED_TRANS(NAME, CHECK) \
358
+ abort();
215
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
359
+}
216
+ { \
360
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
217
+ if (CHECK(s, a)) { \
361
index XXXXXXX..XXXXXXX 100644
218
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, \
362
--- a/target/riscv/kvm.c
219
+ gen_helper_##NAME, s); \
363
+++ b/target/riscv/kvm.c
220
+ } \
364
@@ -XXX,XX +XXX,XX @@
221
+ return false; \
365
#include "hw/irq.h"
222
+ }
366
#include "qemu/log.h"
223
+
367
#include "hw/loader.h"
224
+static bool vclmul_vv_check(DisasContext *s, arg_rmrr *a)
368
+#include "kvm_riscv.h"
225
+{
369
226
+ return opivv_check(s, a) &&
370
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
227
+ s->cfg_ptr->ext_zvbc == true &&
371
uint64_t idx)
228
+ s->sew == MO_64;
372
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
229
+}
373
return 0;
230
+
374
}
231
+GEN_VV_MASKED_TRANS(vclmul_vv, vclmul_vv_check)
375
232
+GEN_VV_MASKED_TRANS(vclmulh_vv, vclmul_vv_check)
376
+void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
233
+
377
+{
234
+#define GEN_VX_MASKED_TRANS(NAME, CHECK) \
378
+ CPURISCVState *env = &cpu->env;
235
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
379
+
236
+ { \
380
+ if (!kvm_enabled()) {
237
+ if (CHECK(s, a)) { \
381
+ return;
238
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, \
382
+ }
239
+ gen_helper_##NAME, s); \
383
+ env->pc = cpu->env.kernel_addr;
240
+ } \
384
+ env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
241
+ return false; \
385
+ env->gpr[11] = cpu->env.fdt_addr; /* a1 */
242
+ }
386
+ env->satp = 0;
243
+
387
+}
244
+static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
388
+
245
+{
389
bool kvm_arch_cpu_check_are_resettable(void)
246
+ return opivx_check(s, a) &&
390
{
247
+ s->cfg_ptr->ext_zvbc == true &&
391
return true;
248
+ s->sew == MO_64;
249
+}
250
+
251
+GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
252
+GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
392
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
253
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
393
index XXXXXXX..XXXXXXX 100644
254
index XXXXXXX..XXXXXXX 100644
394
--- a/target/riscv/meson.build
255
--- a/target/riscv/meson.build
395
+++ b/target/riscv/meson.build
256
+++ b/target/riscv/meson.build
396
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
257
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
397
'translate.c',
258
'translate.c',
398
'm128_helper.c'
259
'm128_helper.c',
260
'crypto_helper.c',
261
- 'zce_helper.c'
262
+ 'zce_helper.c',
263
+ 'vcrypto_helper.c'
399
))
264
))
400
-riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
265
riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
401
+riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
266
402
403
riscv_softmmu_ss = ss.source_set()
404
riscv_softmmu_ss.add(files(
405
--
267
--
406
2.31.1
268
2.41.0
407
408
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
Vector narrowing conversion instructions are provided to and from all
3
Move the checks out of `do_opiv{v,x,i}_gvec{,_shift}` functions
4
supported integer EEWs for Zve32f extension.
4
and into the corresponding macros. This enables the functions to be
5
reused in proceeding commits without check duplication.
5
6
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220118014522.13613-17-frank.chang@sifive.com
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
10
Signed-off-by: Max Chou <max.chou@sifive.com>
11
Message-ID: <20230711165917.2629866-6-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
13
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 3 +++
14
target/riscv/insn_trans/trans_rvv.c.inc | 28 +++++++++++--------------
12
1 file changed, 3 insertions(+)
15
1 file changed, 12 insertions(+), 16 deletions(-)
13
16
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
17
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
19
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
20
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
21
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
19
return opfv_narrow_check(s, a) &&
22
gen_helper_gvec_4_ptr *fn)
20
require_rvf(s) &&
23
{
21
(s->sew != MO_64) &&
24
TCGLabel *over = gen_new_label();
22
+ require_zve32f(s) &&
25
- if (!opivv_check(s, a)) {
23
require_zve64f(s);
26
- return false;
27
- }
28
29
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
30
31
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
32
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
33
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
34
}; \
35
+ if (!opivv_check(s, a)) { \
36
+ return false; \
37
+ } \
38
return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
24
}
39
}
25
40
26
@@ -XXX,XX +XXX,XX @@ static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
41
@@ -XXX,XX +XXX,XX @@ static inline bool
27
return opfv_narrow_check(s, a) &&
42
do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
28
require_scale_rvf(s) &&
43
gen_helper_opivx *fn)
29
(s->sew != MO_8) &&
44
{
30
+ require_scale_zve32f(s) &&
45
- if (!opivx_check(s, a)) {
31
require_scale_zve64f(s);
46
- return false;
47
- }
48
-
49
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
50
TCGv_i64 src1 = tcg_temp_new_i64();
51
52
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
53
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
54
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
55
}; \
56
+ if (!opivx_check(s, a)) { \
57
+ return false; \
58
+ } \
59
return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
32
}
60
}
33
61
34
@@ -XXX,XX +XXX,XX @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
62
@@ -XXX,XX +XXX,XX @@ static inline bool
35
vext_check_isa_ill(s) &&
63
do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
36
/* OPFV narrowing instructions ignore vs1 check */
64
gen_helper_opivx *fn, imm_mode_t imm_mode)
37
vext_check_sd(s, a->rd, a->rs2, a->vm) &&
65
{
38
+ require_scale_zve32f(s) &&
66
- if (!opivx_check(s, a)) {
39
require_scale_zve64f(s);
67
- return false;
68
- }
69
-
70
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
71
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
72
extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
73
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
74
gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
75
gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
76
}; \
77
+ if (!opivx_check(s, a)) { \
78
+ return false; \
79
+ } \
80
return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
81
fns[s->sew], IMM_MODE); \
40
}
82
}
83
@@ -XXX,XX +XXX,XX @@ static inline bool
84
do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
85
gen_helper_opivx *fn)
86
{
87
- if (!opivx_check(s, a)) {
88
- return false;
89
- }
90
-
91
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
92
TCGv_i32 src1 = tcg_temp_new_i32();
93
94
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
95
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
96
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
97
}; \
98
- \
99
+ if (!opivx_check(s, a)) { \
100
+ return false; \
101
+ } \
102
return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
103
}
41
104
42
--
105
--
43
2.31.1
106
2.41.0
44
45
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
Vector widening conversion instructions are provided to and from all
3
Zvbb (implemented in later commit) has a widening instruction, which
4
supported integer EEWs for Zve32f extension.
4
requires an extra check on the enabled extensions. Refactor
5
GEN_OPIVX_WIDEN_TRANS() to take a check function to avoid reimplementing
6
it.
5
7
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
8
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220118014522.13613-16-frank.chang@sifive.com
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Message-ID: <20230711165917.2629866-7-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
14
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 18 ++++++++++++++++++
15
target/riscv/insn_trans/trans_rvv.c.inc | 52 +++++++++++--------------
12
1 file changed, 18 insertions(+)
16
1 file changed, 23 insertions(+), 29 deletions(-)
13
17
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool require_zve32f(DisasContext *s)
22
@@ -XXX,XX +XXX,XX @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
19
return s->ext_zve32f ? s->sew <= MO_32 : true;
23
vext_check_ds(s, a->rd, a->rs2, a->vm);
20
}
24
}
21
25
22
+static bool require_scale_zve32f(DisasContext *s)
26
-static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
23
+{
27
- gen_helper_opivx *fn)
24
+ /* RVV + Zve32f = RVV. */
28
-{
25
+ if (has_ext(s, RVV)) {
29
- if (opivx_widen_check(s, a)) {
26
+ return true;
30
- return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
27
+ }
31
- }
28
+
32
- return false;
29
+ /* Zve32f doesn't support FP64. (Section 18.2) */
33
-}
30
+ return s->ext_zve64f ? s->sew <= MO_16 : true;
34
-
31
+}
35
-#define GEN_OPIVX_WIDEN_TRANS(NAME) \
32
+
36
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
33
static bool require_zve64f(DisasContext *s)
37
-{ \
34
{
38
- static gen_helper_opivx * const fns[3] = { \
35
/* RVV + Zve64f = RVV. */
39
- gen_helper_##NAME##_b, \
36
@@ -XXX,XX +XXX,XX @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
40
- gen_helper_##NAME##_h, \
37
(s->sew != MO_8) &&
41
- gen_helper_##NAME##_w \
38
vext_check_isa_ill(s) &&
42
- }; \
39
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
43
- return do_opivx_widen(s, a, fns[s->sew]); \
40
+ require_scale_zve32f(s) &&
44
+#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
41
require_scale_zve64f(s);
45
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
46
+{ \
47
+ if (CHECK(s, a)) { \
48
+ static gen_helper_opivx * const fns[3] = { \
49
+ gen_helper_##NAME##_b, \
50
+ gen_helper_##NAME##_h, \
51
+ gen_helper_##NAME##_w \
52
+ }; \
53
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
54
+ } \
55
+ return false; \
42
}
56
}
43
57
44
@@ -XXX,XX +XXX,XX @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
58
-GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
45
(s->sew != MO_8) &&
59
-GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
46
vext_check_isa_ill(s) &&
60
-GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
47
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
61
-GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
48
+ require_scale_zve32f(s) &&
62
+GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
49
require_scale_zve64f(s);
63
+GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
50
}
64
+GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
51
65
+GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
52
@@ -XXX,XX +XXX,XX @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
66
53
(s->sew != MO_8) &&
67
/* WIDEN OPIVV with WIDEN */
54
vext_check_isa_ill(s) &&
68
static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
55
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) &&
69
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vrem_vx, opivx_check)
56
+ require_scale_zve32f(s) &&
70
GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
57
require_scale_zve64f(s);
71
GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
58
}
72
GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
59
73
-GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
60
@@ -XXX,XX +XXX,XX @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
74
-GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
61
(s->sew != MO_8) &&
75
-GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
62
vext_check_isa_ill(s) &&
76
+GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
63
vext_check_dd(s, a->rd, a->rs2, a->vm) &&
77
+GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
64
+ require_scale_zve32f(s) &&
78
+GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
65
require_scale_zve64f(s);
79
66
}
80
/* Vector Single-Width Integer Multiply-Add Instructions */
67
81
GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
68
@@ -XXX,XX +XXX,XX @@ static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
82
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
69
{
83
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
70
return opfv_widen_check(s, a) &&
84
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
71
require_rvf(s) &&
85
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
72
+ require_zve32f(s) &&
86
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
73
require_zve64f(s);
87
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
74
}
88
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
75
89
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
76
@@ -XXX,XX +XXX,XX @@ static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
90
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
77
return opfv_widen_check(s, a) &&
91
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
78
require_scale_rvf(s) &&
92
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
79
(s->sew != MO_8) &&
93
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
80
+ require_scale_zve32f(s) &&
94
81
require_scale_zve64f(s);
95
/* Vector Integer Merge and Move Instructions */
82
}
96
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
83
84
@@ -XXX,XX +XXX,XX @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
85
vext_check_isa_ill(s) &&
86
/* OPFV widening instructions ignore vs1 check */
87
vext_check_ds(s, a->rd, a->rs2, a->vm) &&
88
+ require_scale_zve32f(s) &&
89
require_scale_zve64f(s);
90
}
91
92
--
97
--
93
2.31.1
98
2.41.0
94
95
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
Move some macros out of `vector_helper` and into `vector_internals`.
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
This ensures they can be used by both vector and vector-crypto helpers
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
(latter implemented in proceeding commits).
6
Message-id: 20220120122050.41546-17-zhiwei_liu@c-sky.com
6
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
Signed-off-by: Max Chou <max.chou@sifive.com>
10
Message-ID: <20230711165917.2629866-8-max.chou@sifive.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
12
---
9
target/riscv/cpu.h | 5 +++++
13
target/riscv/vector_internals.h | 46 +++++++++++++++++++++++++++++++++
10
target/riscv/vector_helper.c | 7 +++++--
14
target/riscv/vector_helper.c | 42 ------------------------------
11
2 files changed, 10 insertions(+), 2 deletions(-)
15
2 files changed, 46 insertions(+), 42 deletions(-)
12
16
13
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
17
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu.h
19
--- a/target/riscv/vector_internals.h
16
+++ b/target/riscv/cpu.h
20
+++ b/target/riscv/vector_internals.h
17
@@ -XXX,XX +XXX,XX @@ static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
21
@@ -XXX,XX +XXX,XX @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
18
}
22
/* expand macro args before macro */
19
#endif
23
#define RVVCALL(macro, ...) macro(__VA_ARGS__)
20
24
21
+static inline int riscv_cpu_xlen(CPURISCVState *env)
25
+/* (TD, T2, TX2) */
22
+{
26
+#define OP_UU_B uint8_t, uint8_t, uint8_t
23
+ return 16 << env->xl;
27
+#define OP_UU_H uint16_t, uint16_t, uint16_t
28
+#define OP_UU_W uint32_t, uint32_t, uint32_t
29
+#define OP_UU_D uint64_t, uint64_t, uint64_t
30
+
31
/* (TD, T1, T2, TX1, TX2) */
32
#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
33
#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
34
#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
35
#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
36
37
+#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
38
+static void do_##NAME(void *vd, void *vs2, int i) \
39
+{ \
40
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
41
+ *((TD *)vd + HD(i)) = OP(s2); \
24
+}
42
+}
25
+
43
+
26
/*
44
+#define GEN_VEXT_V(NAME, ESZ) \
27
* Encode LMUL to lmul as follows:
45
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
28
* LMUL vlmul lmul
46
+ CPURISCVState *env, uint32_t desc) \
47
+{ \
48
+ uint32_t vm = vext_vm(desc); \
49
+ uint32_t vl = env->vl; \
50
+ uint32_t total_elems = \
51
+ vext_get_total_elems(env, desc, ESZ); \
52
+ uint32_t vta = vext_vta(desc); \
53
+ uint32_t vma = vext_vma(desc); \
54
+ uint32_t i; \
55
+ \
56
+ for (i = env->vstart; i < vl; i++) { \
57
+ if (!vm && !vext_elem_mask(v0, i)) { \
58
+ /* set masked-off elements to 1s */ \
59
+ vext_set_elems_1s(vd, vma, i * ESZ, \
60
+ (i + 1) * ESZ); \
61
+ continue; \
62
+ } \
63
+ do_##NAME(vd, vs2, i); \
64
+ } \
65
+ env->vstart = 0; \
66
+ /* set tail elements to 1s */ \
67
+ vext_set_elems_1s(vd, vta, vl * ESZ, \
68
+ total_elems * ESZ); \
69
+}
70
+
71
/* operation of two vector elements */
72
typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
73
74
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
75
do_##NAME, ESZ); \
76
}
77
78
+/* Three of the widening shortening macros: */
79
+/* (TD, T1, T2, TX1, TX2) */
80
+#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
81
+#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
82
+#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
83
+
84
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
29
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
85
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
30
index XXXXXXX..XXXXXXX 100644
86
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/vector_helper.c
87
--- a/target/riscv/vector_helper.c
32
+++ b/target/riscv/vector_helper.c
88
+++ b/target/riscv/vector_helper.c
33
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
89
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
34
uint64_t lmul = FIELD_EX64(s2, VTYPE, VLMUL);
90
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
35
uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW);
91
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
36
uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV);
92
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
37
- bool vill = FIELD_EX64(s2, VTYPE, VILL);
93
-#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
38
- target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED);
94
-#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
39
+ int xlen = riscv_cpu_xlen(env);
95
-#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
40
+ bool vill = (s2 >> (xlen - 1)) & 0x1;
96
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
41
+ target_ulong reserved = s2 &
97
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
42
+ MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT,
98
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
43
+ xlen - 1 - R_VTYPE_RESERVED_SHIFT);
99
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VF(vfwnmsac_vf_h, 4)
44
100
GEN_VEXT_VF(vfwnmsac_vf_w, 8)
45
if (lmul & 4) {
101
46
/* Fractional LMUL. */
102
/* Vector Floating-Point Square-Root Instruction */
103
-/* (TD, T2, TX2) */
104
-#define OP_UU_H uint16_t, uint16_t, uint16_t
105
-#define OP_UU_W uint32_t, uint32_t, uint32_t
106
-#define OP_UU_D uint64_t, uint64_t, uint64_t
107
-
108
#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
109
static void do_##NAME(void *vd, void *vs2, int i, \
110
CPURISCVState *env) \
111
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
112
GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
113
114
/* Vector Floating-Point Classify Instruction */
115
-#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
116
-static void do_##NAME(void *vd, void *vs2, int i) \
117
-{ \
118
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
119
- *((TD *)vd + HD(i)) = OP(s2); \
120
-}
121
-
122
-#define GEN_VEXT_V(NAME, ESZ) \
123
-void HELPER(NAME)(void *vd, void *v0, void *vs2, \
124
- CPURISCVState *env, uint32_t desc) \
125
-{ \
126
- uint32_t vm = vext_vm(desc); \
127
- uint32_t vl = env->vl; \
128
- uint32_t total_elems = \
129
- vext_get_total_elems(env, desc, ESZ); \
130
- uint32_t vta = vext_vta(desc); \
131
- uint32_t vma = vext_vma(desc); \
132
- uint32_t i; \
133
- \
134
- for (i = env->vstart; i < vl; i++) { \
135
- if (!vm && !vext_elem_mask(v0, i)) { \
136
- /* set masked-off elements to 1s */ \
137
- vext_set_elems_1s(vd, vma, i * ESZ, \
138
- (i + 1) * ESZ); \
139
- continue; \
140
- } \
141
- do_##NAME(vd, vs2, i); \
142
- } \
143
- env->vstart = 0; \
144
- /* set tail elements to 1s */ \
145
- vext_set_elems_1s(vd, vta, vl * ESZ, \
146
- total_elems * ESZ); \
147
-}
148
-
149
target_ulong fclass_h(uint64_t frs1)
150
{
151
float16 f = frs1;
47
--
152
--
48
2.31.1
153
2.41.0
49
50
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
Get GPR CSR and FP registers from kvm by KVM_GET_ONE_REG ioctl.
3
This commit adds support for the Zvbb vector-crypto extension, which
4
consists of the following instructions:
4
5
5
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
* vrol.[vv,vx]
6
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
* vror.[vv,vx,vi]
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
* vbrev8.v
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
9
* vrev8.v
9
Message-id: 20220112081329.1835-5-jiangyifei@huawei.com
10
* vandn.[vv,vx]
11
* vbrev.v
12
* vclz.v
13
* vctz.v
14
* vcpop.v
15
* vwsll.[vv,vx,vi]
16
17
Translation functions are defined in
18
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
19
`target/riscv/vcrypto_helper.c`.
20
21
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
22
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
23
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
24
[max.chou@sifive.com: Fix imm mode of vror.vi]
25
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
26
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
27
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
28
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
29
Signed-off-by: Max Chou <max.chou@sifive.com>
30
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
[max.chou@sifive.com: Exposed x-zvbb property]
32
Message-ID: <20230711165917.2629866-9-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
34
---
12
target/riscv/kvm.c | 112 ++++++++++++++++++++++++++++++++++++++++++++-
35
target/riscv/cpu_cfg.h | 1 +
13
1 file changed, 111 insertions(+), 1 deletion(-)
36
target/riscv/helper.h | 62 +++++++++
37
target/riscv/insn32.decode | 20 +++
38
target/riscv/cpu.c | 12 ++
39
target/riscv/vcrypto_helper.c | 138 +++++++++++++++++++
40
target/riscv/insn_trans/trans_rvvk.c.inc | 164 +++++++++++++++++++++++
41
6 files changed, 397 insertions(+)
14
42
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
43
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
16
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/kvm.c
45
--- a/target/riscv/cpu_cfg.h
18
+++ b/target/riscv/kvm.c
46
+++ b/target/riscv/cpu_cfg.h
19
@@ -XXX,XX +XXX,XX @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
47
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
20
return id;
48
bool ext_zve32f;
21
}
49
bool ext_zve64f;
22
50
bool ext_zve64d;
23
+#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
51
+ bool ext_zvbb;
24
+ KVM_REG_RISCV_CORE_REG(name))
52
bool ext_zvbc;
25
+
53
bool ext_zmmul;
26
+#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
54
bool ext_zvfbfmin;
27
+ KVM_REG_RISCV_CSR_REG(name))
55
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
28
+
56
index XXXXXXX..XXXXXXX 100644
29
+#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
57
--- a/target/riscv/helper.h
30
+
58
+++ b/target/riscv/helper.h
31
+#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
59
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
32
+
60
DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
33
+#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
61
DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
34
+ do { \
62
DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
35
+ int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
63
+
36
+ if (ret) { \
64
+DEF_HELPER_6(vror_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
37
+ return ret; \
65
+DEF_HELPER_6(vror_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
38
+ } \
66
+DEF_HELPER_6(vror_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
39
+ } while (0)
67
+DEF_HELPER_6(vror_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
40
+
68
+
41
+static int kvm_riscv_get_regs_core(CPUState *cs)
69
+DEF_HELPER_6(vror_vx_b, void, ptr, ptr, tl, ptr, env, i32)
70
+DEF_HELPER_6(vror_vx_h, void, ptr, ptr, tl, ptr, env, i32)
71
+DEF_HELPER_6(vror_vx_w, void, ptr, ptr, tl, ptr, env, i32)
72
+DEF_HELPER_6(vror_vx_d, void, ptr, ptr, tl, ptr, env, i32)
73
+
74
+DEF_HELPER_6(vrol_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
75
+DEF_HELPER_6(vrol_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
76
+DEF_HELPER_6(vrol_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
77
+DEF_HELPER_6(vrol_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
78
+
79
+DEF_HELPER_6(vrol_vx_b, void, ptr, ptr, tl, ptr, env, i32)
80
+DEF_HELPER_6(vrol_vx_h, void, ptr, ptr, tl, ptr, env, i32)
81
+DEF_HELPER_6(vrol_vx_w, void, ptr, ptr, tl, ptr, env, i32)
82
+DEF_HELPER_6(vrol_vx_d, void, ptr, ptr, tl, ptr, env, i32)
83
+
84
+DEF_HELPER_5(vrev8_v_b, void, ptr, ptr, ptr, env, i32)
85
+DEF_HELPER_5(vrev8_v_h, void, ptr, ptr, ptr, env, i32)
86
+DEF_HELPER_5(vrev8_v_w, void, ptr, ptr, ptr, env, i32)
87
+DEF_HELPER_5(vrev8_v_d, void, ptr, ptr, ptr, env, i32)
88
+DEF_HELPER_5(vbrev8_v_b, void, ptr, ptr, ptr, env, i32)
89
+DEF_HELPER_5(vbrev8_v_h, void, ptr, ptr, ptr, env, i32)
90
+DEF_HELPER_5(vbrev8_v_w, void, ptr, ptr, ptr, env, i32)
91
+DEF_HELPER_5(vbrev8_v_d, void, ptr, ptr, ptr, env, i32)
92
+DEF_HELPER_5(vbrev_v_b, void, ptr, ptr, ptr, env, i32)
93
+DEF_HELPER_5(vbrev_v_h, void, ptr, ptr, ptr, env, i32)
94
+DEF_HELPER_5(vbrev_v_w, void, ptr, ptr, ptr, env, i32)
95
+DEF_HELPER_5(vbrev_v_d, void, ptr, ptr, ptr, env, i32)
96
+
97
+DEF_HELPER_5(vclz_v_b, void, ptr, ptr, ptr, env, i32)
98
+DEF_HELPER_5(vclz_v_h, void, ptr, ptr, ptr, env, i32)
99
+DEF_HELPER_5(vclz_v_w, void, ptr, ptr, ptr, env, i32)
100
+DEF_HELPER_5(vclz_v_d, void, ptr, ptr, ptr, env, i32)
101
+DEF_HELPER_5(vctz_v_b, void, ptr, ptr, ptr, env, i32)
102
+DEF_HELPER_5(vctz_v_h, void, ptr, ptr, ptr, env, i32)
103
+DEF_HELPER_5(vctz_v_w, void, ptr, ptr, ptr, env, i32)
104
+DEF_HELPER_5(vctz_v_d, void, ptr, ptr, ptr, env, i32)
105
+DEF_HELPER_5(vcpop_v_b, void, ptr, ptr, ptr, env, i32)
106
+DEF_HELPER_5(vcpop_v_h, void, ptr, ptr, ptr, env, i32)
107
+DEF_HELPER_5(vcpop_v_w, void, ptr, ptr, ptr, env, i32)
108
+DEF_HELPER_5(vcpop_v_d, void, ptr, ptr, ptr, env, i32)
109
+
110
+DEF_HELPER_6(vwsll_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
111
+DEF_HELPER_6(vwsll_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
112
+DEF_HELPER_6(vwsll_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
113
+DEF_HELPER_6(vwsll_vx_b, void, ptr, ptr, tl, ptr, env, i32)
114
+DEF_HELPER_6(vwsll_vx_h, void, ptr, ptr, tl, ptr, env, i32)
115
+DEF_HELPER_6(vwsll_vx_w, void, ptr, ptr, tl, ptr, env, i32)
116
+
117
+DEF_HELPER_6(vandn_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
118
+DEF_HELPER_6(vandn_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
119
+DEF_HELPER_6(vandn_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
120
+DEF_HELPER_6(vandn_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
121
+DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
122
+DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
123
+DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
124
+DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
125
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/riscv/insn32.decode
128
+++ b/target/riscv/insn32.decode
129
@@ -XXX,XX +XXX,XX @@
130
%imm_u 12:s20 !function=ex_shift_12
131
%imm_bs 30:2 !function=ex_shift_3
132
%imm_rnum 20:4
133
+%imm_z6 26:1 15:5
134
135
# Argument sets:
136
&empty
137
@@ -XXX,XX +XXX,XX @@
138
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
139
@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
140
@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
141
+@r2_zimm6 ..... . vm:1 ..... ..... ... ..... ....... &rmrr %rs2 rs1=%imm_z6 %rd
142
@r2_zimm11 . zimm:11 ..... ... ..... ....... %rs1 %rd
143
@r2_zimm10 .. zimm:10 ..... ... ..... ....... %rs1 %rd
144
@r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
145
@@ -XXX,XX +XXX,XX @@ vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
146
vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
147
vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
148
vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
149
+
150
+# *** Zvbb vector crypto extension ***
151
+vrol_vv 010101 . ..... ..... 000 ..... 1010111 @r_vm
152
+vrol_vx 010101 . ..... ..... 100 ..... 1010111 @r_vm
153
+vror_vv 010100 . ..... ..... 000 ..... 1010111 @r_vm
154
+vror_vx 010100 . ..... ..... 100 ..... 1010111 @r_vm
155
+vror_vi 01010. . ..... ..... 011 ..... 1010111 @r2_zimm6
156
+vbrev8_v 010010 . ..... 01000 010 ..... 1010111 @r2_vm
157
+vrev8_v 010010 . ..... 01001 010 ..... 1010111 @r2_vm
158
+vandn_vv 000001 . ..... ..... 000 ..... 1010111 @r_vm
159
+vandn_vx 000001 . ..... ..... 100 ..... 1010111 @r_vm
160
+vbrev_v 010010 . ..... 01010 010 ..... 1010111 @r2_vm
161
+vclz_v 010010 . ..... 01100 010 ..... 1010111 @r2_vm
162
+vctz_v 010010 . ..... 01101 010 ..... 1010111 @r2_vm
163
+vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
164
+vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
165
+vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
166
+vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
167
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/target/riscv/cpu.c
170
+++ b/target/riscv/cpu.c
171
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
172
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
173
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
174
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
175
+ ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
176
ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
177
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
178
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
179
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
180
return;
181
}
182
183
+ /*
184
+ * In principle Zve*x would also suffice here, were they supported
185
+ * in qemu
186
+ */
187
+ if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
188
+ error_setg(errp,
189
+ "Vector crypto extensions require V or Zve* extensions");
190
+ return;
191
+ }
192
+
193
if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
194
error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
195
return;
196
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
197
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
198
199
/* Vector cryptography extensions */
200
+ DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
201
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
202
203
DEFINE_PROP_END_OF_LIST(),
204
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/riscv/vcrypto_helper.c
207
+++ b/target/riscv/vcrypto_helper.c
208
@@ -XXX,XX +XXX,XX @@
209
#include "qemu/osdep.h"
210
#include "qemu/host-utils.h"
211
#include "qemu/bitops.h"
212
+#include "qemu/bswap.h"
213
#include "cpu.h"
214
#include "exec/memop.h"
215
#include "exec/exec-all.h"
216
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
217
GEN_VEXT_VV(vclmulh_vv, 8)
218
RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
219
GEN_VEXT_VX(vclmulh_vx, 8)
220
+
221
+RVVCALL(OPIVV2, vror_vv_b, OP_UUU_B, H1, H1, H1, ror8)
222
+RVVCALL(OPIVV2, vror_vv_h, OP_UUU_H, H2, H2, H2, ror16)
223
+RVVCALL(OPIVV2, vror_vv_w, OP_UUU_W, H4, H4, H4, ror32)
224
+RVVCALL(OPIVV2, vror_vv_d, OP_UUU_D, H8, H8, H8, ror64)
225
+GEN_VEXT_VV(vror_vv_b, 1)
226
+GEN_VEXT_VV(vror_vv_h, 2)
227
+GEN_VEXT_VV(vror_vv_w, 4)
228
+GEN_VEXT_VV(vror_vv_d, 8)
229
+
230
+RVVCALL(OPIVX2, vror_vx_b, OP_UUU_B, H1, H1, ror8)
231
+RVVCALL(OPIVX2, vror_vx_h, OP_UUU_H, H2, H2, ror16)
232
+RVVCALL(OPIVX2, vror_vx_w, OP_UUU_W, H4, H4, ror32)
233
+RVVCALL(OPIVX2, vror_vx_d, OP_UUU_D, H8, H8, ror64)
234
+GEN_VEXT_VX(vror_vx_b, 1)
235
+GEN_VEXT_VX(vror_vx_h, 2)
236
+GEN_VEXT_VX(vror_vx_w, 4)
237
+GEN_VEXT_VX(vror_vx_d, 8)
238
+
239
+RVVCALL(OPIVV2, vrol_vv_b, OP_UUU_B, H1, H1, H1, rol8)
240
+RVVCALL(OPIVV2, vrol_vv_h, OP_UUU_H, H2, H2, H2, rol16)
241
+RVVCALL(OPIVV2, vrol_vv_w, OP_UUU_W, H4, H4, H4, rol32)
242
+RVVCALL(OPIVV2, vrol_vv_d, OP_UUU_D, H8, H8, H8, rol64)
243
+GEN_VEXT_VV(vrol_vv_b, 1)
244
+GEN_VEXT_VV(vrol_vv_h, 2)
245
+GEN_VEXT_VV(vrol_vv_w, 4)
246
+GEN_VEXT_VV(vrol_vv_d, 8)
247
+
248
+RVVCALL(OPIVX2, vrol_vx_b, OP_UUU_B, H1, H1, rol8)
249
+RVVCALL(OPIVX2, vrol_vx_h, OP_UUU_H, H2, H2, rol16)
250
+RVVCALL(OPIVX2, vrol_vx_w, OP_UUU_W, H4, H4, rol32)
251
+RVVCALL(OPIVX2, vrol_vx_d, OP_UUU_D, H8, H8, rol64)
252
+GEN_VEXT_VX(vrol_vx_b, 1)
253
+GEN_VEXT_VX(vrol_vx_h, 2)
254
+GEN_VEXT_VX(vrol_vx_w, 4)
255
+GEN_VEXT_VX(vrol_vx_d, 8)
256
+
257
+static uint64_t brev8(uint64_t val)
42
+{
258
+{
43
+ int ret = 0;
259
+ val = ((val & 0x5555555555555555ull) << 1) |
44
+ int i;
260
+ ((val & 0xAAAAAAAAAAAAAAAAull) >> 1);
45
+ target_ulong reg;
261
+ val = ((val & 0x3333333333333333ull) << 2) |
46
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
262
+ ((val & 0xCCCCCCCCCCCCCCCCull) >> 2);
47
+
263
+ val = ((val & 0x0F0F0F0F0F0F0F0Full) << 4) |
48
+ ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
264
+ ((val & 0xF0F0F0F0F0F0F0F0ull) >> 4);
49
+ if (ret) {
265
+
50
+ return ret;
266
+ return val;
51
+ }
52
+ env->pc = reg;
53
+
54
+ for (i = 1; i < 32; i++) {
55
+ uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
56
+ ret = kvm_get_one_reg(cs, id, &reg);
57
+ if (ret) {
58
+ return ret;
59
+ }
60
+ env->gpr[i] = reg;
61
+ }
62
+
63
+ return ret;
64
+}
267
+}
65
+
268
+
66
+static int kvm_riscv_get_regs_csr(CPUState *cs)
269
+RVVCALL(OPIVV1, vbrev8_v_b, OP_UU_B, H1, H1, brev8)
270
+RVVCALL(OPIVV1, vbrev8_v_h, OP_UU_H, H2, H2, brev8)
271
+RVVCALL(OPIVV1, vbrev8_v_w, OP_UU_W, H4, H4, brev8)
272
+RVVCALL(OPIVV1, vbrev8_v_d, OP_UU_D, H8, H8, brev8)
273
+GEN_VEXT_V(vbrev8_v_b, 1)
274
+GEN_VEXT_V(vbrev8_v_h, 2)
275
+GEN_VEXT_V(vbrev8_v_w, 4)
276
+GEN_VEXT_V(vbrev8_v_d, 8)
277
+
278
+#define DO_IDENTITY(a) (a)
279
+RVVCALL(OPIVV1, vrev8_v_b, OP_UU_B, H1, H1, DO_IDENTITY)
280
+RVVCALL(OPIVV1, vrev8_v_h, OP_UU_H, H2, H2, bswap16)
281
+RVVCALL(OPIVV1, vrev8_v_w, OP_UU_W, H4, H4, bswap32)
282
+RVVCALL(OPIVV1, vrev8_v_d, OP_UU_D, H8, H8, bswap64)
283
+GEN_VEXT_V(vrev8_v_b, 1)
284
+GEN_VEXT_V(vrev8_v_h, 2)
285
+GEN_VEXT_V(vrev8_v_w, 4)
286
+GEN_VEXT_V(vrev8_v_d, 8)
287
+
288
+#define DO_ANDN(a, b) ((a) & ~(b))
289
+RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
290
+RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
291
+RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
292
+RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
293
+GEN_VEXT_VV(vandn_vv_b, 1)
294
+GEN_VEXT_VV(vandn_vv_h, 2)
295
+GEN_VEXT_VV(vandn_vv_w, 4)
296
+GEN_VEXT_VV(vandn_vv_d, 8)
297
+
298
+RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
299
+RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
300
+RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
301
+RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
302
+GEN_VEXT_VX(vandn_vx_b, 1)
303
+GEN_VEXT_VX(vandn_vx_h, 2)
304
+GEN_VEXT_VX(vandn_vx_w, 4)
305
+GEN_VEXT_VX(vandn_vx_d, 8)
306
+
307
+RVVCALL(OPIVV1, vbrev_v_b, OP_UU_B, H1, H1, revbit8)
308
+RVVCALL(OPIVV1, vbrev_v_h, OP_UU_H, H2, H2, revbit16)
309
+RVVCALL(OPIVV1, vbrev_v_w, OP_UU_W, H4, H4, revbit32)
310
+RVVCALL(OPIVV1, vbrev_v_d, OP_UU_D, H8, H8, revbit64)
311
+GEN_VEXT_V(vbrev_v_b, 1)
312
+GEN_VEXT_V(vbrev_v_h, 2)
313
+GEN_VEXT_V(vbrev_v_w, 4)
314
+GEN_VEXT_V(vbrev_v_d, 8)
315
+
316
+RVVCALL(OPIVV1, vclz_v_b, OP_UU_B, H1, H1, clz8)
317
+RVVCALL(OPIVV1, vclz_v_h, OP_UU_H, H2, H2, clz16)
318
+RVVCALL(OPIVV1, vclz_v_w, OP_UU_W, H4, H4, clz32)
319
+RVVCALL(OPIVV1, vclz_v_d, OP_UU_D, H8, H8, clz64)
320
+GEN_VEXT_V(vclz_v_b, 1)
321
+GEN_VEXT_V(vclz_v_h, 2)
322
+GEN_VEXT_V(vclz_v_w, 4)
323
+GEN_VEXT_V(vclz_v_d, 8)
324
+
325
+RVVCALL(OPIVV1, vctz_v_b, OP_UU_B, H1, H1, ctz8)
326
+RVVCALL(OPIVV1, vctz_v_h, OP_UU_H, H2, H2, ctz16)
327
+RVVCALL(OPIVV1, vctz_v_w, OP_UU_W, H4, H4, ctz32)
328
+RVVCALL(OPIVV1, vctz_v_d, OP_UU_D, H8, H8, ctz64)
329
+GEN_VEXT_V(vctz_v_b, 1)
330
+GEN_VEXT_V(vctz_v_h, 2)
331
+GEN_VEXT_V(vctz_v_w, 4)
332
+GEN_VEXT_V(vctz_v_d, 8)
333
+
334
+RVVCALL(OPIVV1, vcpop_v_b, OP_UU_B, H1, H1, ctpop8)
335
+RVVCALL(OPIVV1, vcpop_v_h, OP_UU_H, H2, H2, ctpop16)
336
+RVVCALL(OPIVV1, vcpop_v_w, OP_UU_W, H4, H4, ctpop32)
337
+RVVCALL(OPIVV1, vcpop_v_d, OP_UU_D, H8, H8, ctpop64)
338
+GEN_VEXT_V(vcpop_v_b, 1)
339
+GEN_VEXT_V(vcpop_v_h, 2)
340
+GEN_VEXT_V(vcpop_v_w, 4)
341
+GEN_VEXT_V(vcpop_v_d, 8)
342
+
343
+#define DO_SLL(N, M) (N << (M & (sizeof(N) * 8 - 1)))
344
+RVVCALL(OPIVV2, vwsll_vv_b, WOP_UUU_B, H2, H1, H1, DO_SLL)
345
+RVVCALL(OPIVV2, vwsll_vv_h, WOP_UUU_H, H4, H2, H2, DO_SLL)
346
+RVVCALL(OPIVV2, vwsll_vv_w, WOP_UUU_W, H8, H4, H4, DO_SLL)
347
+GEN_VEXT_VV(vwsll_vv_b, 2)
348
+GEN_VEXT_VV(vwsll_vv_h, 4)
349
+GEN_VEXT_VV(vwsll_vv_w, 8)
350
+
351
+RVVCALL(OPIVX2, vwsll_vx_b, WOP_UUU_B, H2, H1, DO_SLL)
352
+RVVCALL(OPIVX2, vwsll_vx_h, WOP_UUU_H, H4, H2, DO_SLL)
353
+RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
354
+GEN_VEXT_VX(vwsll_vx_b, 2)
355
+GEN_VEXT_VX(vwsll_vx_h, 4)
356
+GEN_VEXT_VX(vwsll_vx_w, 8)
357
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
360
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
361
@@ -XXX,XX +XXX,XX @@ static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
362
363
GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
364
GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
365
+
366
+/*
367
+ * Zvbb
368
+ */
369
+
370
+#define GEN_OPIVI_GVEC_TRANS_CHECK(NAME, IMM_MODE, OPIVX, SUF, CHECK) \
371
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
372
+ { \
373
+ if (CHECK(s, a)) { \
374
+ static gen_helper_opivx *const fns[4] = { \
375
+ gen_helper_##OPIVX##_b, \
376
+ gen_helper_##OPIVX##_h, \
377
+ gen_helper_##OPIVX##_w, \
378
+ gen_helper_##OPIVX##_d, \
379
+ }; \
380
+ return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew], \
381
+ IMM_MODE); \
382
+ } \
383
+ return false; \
384
+ }
385
+
386
+#define GEN_OPIVV_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
387
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
388
+ { \
389
+ if (CHECK(s, a)) { \
390
+ static gen_helper_gvec_4_ptr *const fns[4] = { \
391
+ gen_helper_##NAME##_b, \
392
+ gen_helper_##NAME##_h, \
393
+ gen_helper_##NAME##_w, \
394
+ gen_helper_##NAME##_d, \
395
+ }; \
396
+ return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
397
+ } \
398
+ return false; \
399
+ }
400
+
401
+#define GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(NAME, SUF, CHECK) \
402
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
403
+ { \
404
+ if (CHECK(s, a)) { \
405
+ static gen_helper_opivx *const fns[4] = { \
406
+ gen_helper_##NAME##_b, \
407
+ gen_helper_##NAME##_h, \
408
+ gen_helper_##NAME##_w, \
409
+ gen_helper_##NAME##_d, \
410
+ }; \
411
+ return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, \
412
+ fns[s->sew]); \
413
+ } \
414
+ return false; \
415
+ }
416
+
417
+static bool zvbb_vv_check(DisasContext *s, arg_rmrr *a)
67
+{
418
+{
68
+ int ret = 0;
419
+ return opivv_check(s, a) && s->cfg_ptr->ext_zvbb == true;
69
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
70
+
71
+ KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
72
+ KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
73
+ KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
74
+ KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
75
+ KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
76
+ KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
77
+ KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
78
+ KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
79
+ KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
80
+ return ret;
81
+}
420
+}
82
+
421
+
83
+static int kvm_riscv_get_regs_fp(CPUState *cs)
422
+static bool zvbb_vx_check(DisasContext *s, arg_rmrr *a)
84
+{
423
+{
85
+ int ret = 0;
424
+ return opivx_check(s, a) && s->cfg_ptr->ext_zvbb == true;
86
+ int i;
87
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
88
+
89
+ if (riscv_has_ext(env, RVD)) {
90
+ uint64_t reg;
91
+ for (i = 0; i < 32; i++) {
92
+ ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
93
+ if (ret) {
94
+ return ret;
95
+ }
96
+ env->fpr[i] = reg;
97
+ }
98
+ return ret;
99
+ }
100
+
101
+ if (riscv_has_ext(env, RVF)) {
102
+ uint32_t reg;
103
+ for (i = 0; i < 32; i++) {
104
+ ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
105
+ if (ret) {
106
+ return ret;
107
+ }
108
+ env->fpr[i] = reg;
109
+ }
110
+ return ret;
111
+ }
112
+
113
+ return ret;
114
+}
425
+}
115
+
426
+
116
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
427
+/* vrol.v[vx] */
117
KVM_CAP_LAST_INFO
428
+GEN_OPIVV_GVEC_TRANS_CHECK(vrol_vv, rotlv, zvbb_vv_check)
118
};
429
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vrol_vx, rotls, zvbb_vx_check)
119
430
+
120
int kvm_arch_get_registers(CPUState *cs)
431
+/* vror.v[vxi] */
121
{
432
+GEN_OPIVV_GVEC_TRANS_CHECK(vror_vv, rotrv, zvbb_vv_check)
122
- return 0;
433
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vror_vx, rotrs, zvbb_vx_check)
123
+ int ret = 0;
434
+GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri, zvbb_vx_check)
124
+
435
+
125
+ ret = kvm_riscv_get_regs_core(cs);
436
+#define GEN_OPIVX_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
126
+ if (ret) {
437
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
127
+ return ret;
438
+ { \
128
+ }
439
+ if (CHECK(s, a)) { \
129
+
440
+ static gen_helper_opivx *const fns[4] = { \
130
+ ret = kvm_riscv_get_regs_csr(cs);
441
+ gen_helper_##NAME##_b, \
131
+ if (ret) {
442
+ gen_helper_##NAME##_h, \
132
+ return ret;
443
+ gen_helper_##NAME##_w, \
133
+ }
444
+ gen_helper_##NAME##_d, \
134
+
445
+ }; \
135
+ ret = kvm_riscv_get_regs_fp(cs);
446
+ return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
136
+ if (ret) {
447
+ } \
137
+ return ret;
448
+ return false; \
138
+ }
449
+ }
139
+
450
+
140
+ return ret;
451
+/* vandn.v[vx] */
141
}
452
+GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andc, zvbb_vv_check)
142
453
+GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvbb_vx_check)
143
int kvm_arch_put_registers(CPUState *cs, int level)
454
+
455
+#define GEN_OPIV_TRANS(NAME, CHECK) \
456
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
457
+ { \
458
+ if (CHECK(s, a)) { \
459
+ uint32_t data = 0; \
460
+ static gen_helper_gvec_3_ptr *const fns[4] = { \
461
+ gen_helper_##NAME##_b, \
462
+ gen_helper_##NAME##_h, \
463
+ gen_helper_##NAME##_w, \
464
+ gen_helper_##NAME##_d, \
465
+ }; \
466
+ TCGLabel *over = gen_new_label(); \
467
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
468
+ \
469
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
470
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
471
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
472
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
473
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
474
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
475
+ vreg_ofs(s, a->rs2), cpu_env, \
476
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
477
+ data, fns[s->sew]); \
478
+ mark_vs_dirty(s); \
479
+ gen_set_label(over); \
480
+ return true; \
481
+ } \
482
+ return false; \
483
+ }
484
+
485
+static bool zvbb_opiv_check(DisasContext *s, arg_rmr *a)
486
+{
487
+ return s->cfg_ptr->ext_zvbb == true &&
488
+ require_rvv(s) &&
489
+ vext_check_isa_ill(s) &&
490
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
491
+}
492
+
493
+GEN_OPIV_TRANS(vbrev8_v, zvbb_opiv_check)
494
+GEN_OPIV_TRANS(vrev8_v, zvbb_opiv_check)
495
+GEN_OPIV_TRANS(vbrev_v, zvbb_opiv_check)
496
+GEN_OPIV_TRANS(vclz_v, zvbb_opiv_check)
497
+GEN_OPIV_TRANS(vctz_v, zvbb_opiv_check)
498
+GEN_OPIV_TRANS(vcpop_v, zvbb_opiv_check)
499
+
500
+static bool vwsll_vv_check(DisasContext *s, arg_rmrr *a)
501
+{
502
+ return s->cfg_ptr->ext_zvbb && opivv_widen_check(s, a);
503
+}
504
+
505
+static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
506
+{
507
+ return s->cfg_ptr->ext_zvbb && opivx_widen_check(s, a);
508
+}
509
+
510
+/* OPIVI without GVEC IR */
511
+#define GEN_OPIVI_WIDEN_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
512
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
513
+ { \
514
+ if (CHECK(s, a)) { \
515
+ static gen_helper_opivx *const fns[3] = { \
516
+ gen_helper_##OPIVX##_b, \
517
+ gen_helper_##OPIVX##_h, \
518
+ gen_helper_##OPIVX##_w, \
519
+ }; \
520
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, \
521
+ IMM_MODE); \
522
+ } \
523
+ return false; \
524
+ }
525
+
526
+GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
527
+GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
528
+GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
144
--
529
--
145
2.31.1
530
2.41.0
146
147
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
'host' type cpu is set isa to RV32 or RV64 simply, more isa info
3
This commit adds support for the Zvkned vector-crypto extension, which
4
will obtain from KVM in kvm_arch_init_vcpu()
4
consists of the following instructions:
5
5
6
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
* vaesef.[vv,vs]
7
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
* vaesdf.[vv,vs]
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
* vaesdm.[vv,vs]
9
Reviewed-by: Anup Patel <anup.patel@wdc.com>
9
* vaesz.vs
10
Message-id: 20220112081329.1835-10-jiangyifei@huawei.com
10
* vaesem.[vv,vs]
11
* vaeskf1.vi
12
* vaeskf2.vi
13
14
Translation functions are defined in
15
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
16
`target/riscv/vcrypto_helper.c`.
17
18
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
19
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
20
[max.chou@sifive.com: Replaced vstart checking by TCG op]
21
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
22
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
23
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
24
Signed-off-by: Max Chou <max.chou@sifive.com>
25
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
26
[max.chou@sifive.com: Imported aes-round.h and exposed x-zvkned
27
property]
28
[max.chou@sifive.com: Fixed endian issues and replaced the vstart & vl
29
egs checking by helper function]
30
[max.chou@sifive.com: Replaced bswap32 calls in aes key expanding]
31
Message-ID: <20230711165917.2629866-10-max.chou@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
33
---
13
target/riscv/cpu.h | 1 +
34
target/riscv/cpu_cfg.h | 1 +
14
target/riscv/cpu.c | 15 +++++++++++++++
35
target/riscv/helper.h | 14 ++
15
2 files changed, 16 insertions(+)
36
target/riscv/insn32.decode | 14 ++
37
target/riscv/cpu.c | 4 +-
38
target/riscv/vcrypto_helper.c | 202 +++++++++++++++++++++++
39
target/riscv/insn_trans/trans_rvvk.c.inc | 147 +++++++++++++++++
40
6 files changed, 381 insertions(+), 1 deletion(-)
16
41
17
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
42
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
18
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu.h
44
--- a/target/riscv/cpu_cfg.h
20
+++ b/target/riscv/cpu.h
45
+++ b/target/riscv/cpu_cfg.h
46
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
47
bool ext_zve64d;
48
bool ext_zvbb;
49
bool ext_zvbc;
50
+ bool ext_zvkned;
51
bool ext_zmmul;
52
bool ext_zvfbfmin;
53
bool ext_zvfbfwma;
54
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/riscv/helper.h
57
+++ b/target/riscv/helper.h
58
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
59
DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
60
DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
61
DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
62
+
63
+DEF_HELPER_2(egs_check, void, i32, env)
64
+
65
+DEF_HELPER_4(vaesef_vv, void, ptr, ptr, env, i32)
66
+DEF_HELPER_4(vaesef_vs, void, ptr, ptr, env, i32)
67
+DEF_HELPER_4(vaesdf_vv, void, ptr, ptr, env, i32)
68
+DEF_HELPER_4(vaesdf_vs, void, ptr, ptr, env, i32)
69
+DEF_HELPER_4(vaesem_vv, void, ptr, ptr, env, i32)
70
+DEF_HELPER_4(vaesem_vs, void, ptr, ptr, env, i32)
71
+DEF_HELPER_4(vaesdm_vv, void, ptr, ptr, env, i32)
72
+DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
73
+DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
74
+DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
75
+DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
76
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/riscv/insn32.decode
79
+++ b/target/riscv/insn32.decode
21
@@ -XXX,XX +XXX,XX @@
80
@@ -XXX,XX +XXX,XX @@
22
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
81
@r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
23
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
82
@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
24
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
83
@r2 ....... ..... ..... ... ..... ....... &r2 %rs1 %rd
25
+#define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host")
84
+@r2_vm_1 ...... . ..... ..... ... ..... ....... &rmr vm=1 %rs2 %rd
26
85
@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
27
#if defined(TARGET_RISCV32)
86
@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
28
# define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE32
87
@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
88
@@ -XXX,XX +XXX,XX @@ vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
89
vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
90
vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
91
vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
92
+
93
+# *** Zvkned vector crypto extension ***
94
+vaesef_vv 101000 1 ..... 00011 010 ..... 1110111 @r2_vm_1
95
+vaesef_vs 101001 1 ..... 00011 010 ..... 1110111 @r2_vm_1
96
+vaesdf_vv 101000 1 ..... 00001 010 ..... 1110111 @r2_vm_1
97
+vaesdf_vs 101001 1 ..... 00001 010 ..... 1110111 @r2_vm_1
98
+vaesem_vv 101000 1 ..... 00010 010 ..... 1110111 @r2_vm_1
99
+vaesem_vs 101001 1 ..... 00010 010 ..... 1110111 @r2_vm_1
100
+vaesdm_vv 101000 1 ..... 00000 010 ..... 1110111 @r2_vm_1
101
+vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
102
+vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
103
+vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
104
+vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
29
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
105
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
30
index XXXXXXX..XXXXXXX 100644
106
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu.c
107
--- a/target/riscv/cpu.c
32
+++ b/target/riscv/cpu.c
108
+++ b/target/riscv/cpu.c
33
@@ -XXX,XX +XXX,XX @@ static void rv32_imafcu_nommu_cpu_init(Object *obj)
109
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
34
}
110
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
35
#endif
111
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
36
112
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
37
+#if defined(CONFIG_KVM)
113
+ ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
38
+static void riscv_host_cpu_init(Object *obj)
114
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
39
+{
115
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
40
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
116
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
41
+#if defined(TARGET_RISCV32)
117
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
42
+ set_misa(env, MXL_RV32, 0);
118
* In principle Zve*x would also suffice here, were they supported
43
+#elif defined(TARGET_RISCV64)
119
* in qemu
44
+ set_misa(env, MXL_RV64, 0);
120
*/
45
+#endif
121
- if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
46
+}
122
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
47
+#endif
123
error_setg(errp,
48
+
124
"Vector crypto extensions require V or Zve* extensions");
49
static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
125
return;
50
{
126
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
51
ObjectClass *oc;
127
/* Vector cryptography extensions */
52
@@ -XXX,XX +XXX,XX @@ static const TypeInfo riscv_cpu_type_infos[] = {
128
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
53
.class_init = riscv_cpu_class_init,
129
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
54
},
130
+ DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
55
DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
131
56
+#if defined(CONFIG_KVM)
132
DEFINE_PROP_END_OF_LIST(),
57
+ DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
133
};
58
+#endif
134
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
59
#if defined(TARGET_RISCV32)
135
index XXXXXXX..XXXXXXX 100644
60
DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
136
--- a/target/riscv/vcrypto_helper.c
61
DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
137
+++ b/target/riscv/vcrypto_helper.c
138
@@ -XXX,XX +XXX,XX @@
139
#include "qemu/bitops.h"
140
#include "qemu/bswap.h"
141
#include "cpu.h"
142
+#include "crypto/aes.h"
143
+#include "crypto/aes-round.h"
144
#include "exec/memop.h"
145
#include "exec/exec-all.h"
146
#include "exec/helper-proto.h"
147
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
148
GEN_VEXT_VX(vwsll_vx_b, 2)
149
GEN_VEXT_VX(vwsll_vx_h, 4)
150
GEN_VEXT_VX(vwsll_vx_w, 8)
151
+
152
+void HELPER(egs_check)(uint32_t egs, CPURISCVState *env)
153
+{
154
+ uint32_t vl = env->vl;
155
+ uint32_t vstart = env->vstart;
156
+
157
+ if (vl % egs != 0 || vstart % egs != 0) {
158
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
159
+ }
160
+}
161
+
162
+static inline void xor_round_key(AESState *round_state, AESState *round_key)
163
+{
164
+ round_state->v = round_state->v ^ round_key->v;
165
+}
166
+
167
+#define GEN_ZVKNED_HELPER_VV(NAME, ...) \
168
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
169
+ uint32_t desc) \
170
+ { \
171
+ uint32_t vl = env->vl; \
172
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
173
+ uint32_t vta = vext_vta(desc); \
174
+ \
175
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
176
+ AESState round_key; \
177
+ round_key.d[0] = *((uint64_t *)vs2 + H8(i * 2 + 0)); \
178
+ round_key.d[1] = *((uint64_t *)vs2 + H8(i * 2 + 1)); \
179
+ AESState round_state; \
180
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
181
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
182
+ __VA_ARGS__; \
183
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
184
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
185
+ } \
186
+ env->vstart = 0; \
187
+ /* set tail elements to 1s */ \
188
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
189
+ }
190
+
191
+#define GEN_ZVKNED_HELPER_VS(NAME, ...) \
192
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
193
+ uint32_t desc) \
194
+ { \
195
+ uint32_t vl = env->vl; \
196
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
197
+ uint32_t vta = vext_vta(desc); \
198
+ \
199
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
200
+ AESState round_key; \
201
+ round_key.d[0] = *((uint64_t *)vs2 + H8(0)); \
202
+ round_key.d[1] = *((uint64_t *)vs2 + H8(1)); \
203
+ AESState round_state; \
204
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
205
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
206
+ __VA_ARGS__; \
207
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
208
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
209
+ } \
210
+ env->vstart = 0; \
211
+ /* set tail elements to 1s */ \
212
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
213
+ }
214
+
215
+GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
216
+ &round_state,
217
+ &round_key,
218
+ false);)
219
+GEN_ZVKNED_HELPER_VS(vaesef_vs, aesenc_SB_SR_AK(&round_state,
220
+ &round_state,
221
+ &round_key,
222
+ false);)
223
+GEN_ZVKNED_HELPER_VV(vaesdf_vv, aesdec_ISB_ISR_AK(&round_state,
224
+ &round_state,
225
+ &round_key,
226
+ false);)
227
+GEN_ZVKNED_HELPER_VS(vaesdf_vs, aesdec_ISB_ISR_AK(&round_state,
228
+ &round_state,
229
+ &round_key,
230
+ false);)
231
+GEN_ZVKNED_HELPER_VV(vaesem_vv, aesenc_SB_SR_MC_AK(&round_state,
232
+ &round_state,
233
+ &round_key,
234
+ false);)
235
+GEN_ZVKNED_HELPER_VS(vaesem_vs, aesenc_SB_SR_MC_AK(&round_state,
236
+ &round_state,
237
+ &round_key,
238
+ false);)
239
+GEN_ZVKNED_HELPER_VV(vaesdm_vv, aesdec_ISB_ISR_AK_IMC(&round_state,
240
+ &round_state,
241
+ &round_key,
242
+ false);)
243
+GEN_ZVKNED_HELPER_VS(vaesdm_vs, aesdec_ISB_ISR_AK_IMC(&round_state,
244
+ &round_state,
245
+ &round_key,
246
+ false);)
247
+GEN_ZVKNED_HELPER_VS(vaesz_vs, xor_round_key(&round_state, &round_key);)
248
+
249
+void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
250
+ CPURISCVState *env, uint32_t desc)
251
+{
252
+ uint32_t *vd = vd_vptr;
253
+ uint32_t *vs2 = vs2_vptr;
254
+ uint32_t vl = env->vl;
255
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
256
+ uint32_t vta = vext_vta(desc);
257
+
258
+ uimm &= 0b1111;
259
+ if (uimm > 10 || uimm == 0) {
260
+ uimm ^= 0b1000;
261
+ }
262
+
263
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
264
+ uint32_t rk[8], tmp;
265
+ static const uint32_t rcon[] = {
266
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
267
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
268
+ };
269
+
270
+ rk[0] = vs2[i * 4 + H4(0)];
271
+ rk[1] = vs2[i * 4 + H4(1)];
272
+ rk[2] = vs2[i * 4 + H4(2)];
273
+ rk[3] = vs2[i * 4 + H4(3)];
274
+ tmp = ror32(rk[3], 8);
275
+
276
+ rk[4] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
277
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
278
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
279
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
280
+ ^ rcon[uimm - 1];
281
+ rk[5] = rk[1] ^ rk[4];
282
+ rk[6] = rk[2] ^ rk[5];
283
+ rk[7] = rk[3] ^ rk[6];
284
+
285
+ vd[i * 4 + H4(0)] = rk[4];
286
+ vd[i * 4 + H4(1)] = rk[5];
287
+ vd[i * 4 + H4(2)] = rk[6];
288
+ vd[i * 4 + H4(3)] = rk[7];
289
+ }
290
+ env->vstart = 0;
291
+ /* set tail elements to 1s */
292
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
293
+}
294
+
295
+void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
296
+ CPURISCVState *env, uint32_t desc)
297
+{
298
+ uint32_t *vd = vd_vptr;
299
+ uint32_t *vs2 = vs2_vptr;
300
+ uint32_t vl = env->vl;
301
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
302
+ uint32_t vta = vext_vta(desc);
303
+
304
+ uimm &= 0b1111;
305
+ if (uimm > 14 || uimm < 2) {
306
+ uimm ^= 0b1000;
307
+ }
308
+
309
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
310
+ uint32_t rk[12], tmp;
311
+ static const uint32_t rcon[] = {
312
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
313
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
314
+ };
315
+
316
+ rk[0] = vd[i * 4 + H4(0)];
317
+ rk[1] = vd[i * 4 + H4(1)];
318
+ rk[2] = vd[i * 4 + H4(2)];
319
+ rk[3] = vd[i * 4 + H4(3)];
320
+ rk[4] = vs2[i * 4 + H4(0)];
321
+ rk[5] = vs2[i * 4 + H4(1)];
322
+ rk[6] = vs2[i * 4 + H4(2)];
323
+ rk[7] = vs2[i * 4 + H4(3)];
324
+
325
+ if (uimm % 2 == 0) {
326
+ tmp = ror32(rk[7], 8);
327
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
328
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
329
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
330
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
331
+ ^ rcon[(uimm - 1) / 2];
332
+ } else {
333
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(rk[7] >> 24) & 0xff] << 24) |
334
+ ((uint32_t)AES_sbox[(rk[7] >> 16) & 0xff] << 16) |
335
+ ((uint32_t)AES_sbox[(rk[7] >> 8) & 0xff] << 8) |
336
+ ((uint32_t)AES_sbox[(rk[7] >> 0) & 0xff] << 0));
337
+ }
338
+ rk[9] = rk[1] ^ rk[8];
339
+ rk[10] = rk[2] ^ rk[9];
340
+ rk[11] = rk[3] ^ rk[10];
341
+
342
+ vd[i * 4 + H4(0)] = rk[8];
343
+ vd[i * 4 + H4(1)] = rk[9];
344
+ vd[i * 4 + H4(2)] = rk[10];
345
+ vd[i * 4 + H4(3)] = rk[11];
346
+ }
347
+ env->vstart = 0;
348
+ /* set tail elements to 1s */
349
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
350
+}
351
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
352
index XXXXXXX..XXXXXXX 100644
353
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
354
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
355
@@ -XXX,XX +XXX,XX @@ static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
356
GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
357
GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
358
GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
359
+
360
+/*
361
+ * Zvkned
362
+ */
363
+
364
+#define ZVKNED_EGS 4
365
+
366
+#define GEN_V_UNMASKED_TRANS(NAME, CHECK, EGS) \
367
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
368
+ { \
369
+ if (CHECK(s, a)) { \
370
+ TCGv_ptr rd_v, rs2_v; \
371
+ TCGv_i32 desc, egs; \
372
+ uint32_t data = 0; \
373
+ TCGLabel *over = gen_new_label(); \
374
+ \
375
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
376
+ /* save opcode for unwinding in case we throw an exception */ \
377
+ decode_save_opc(s); \
378
+ egs = tcg_constant_i32(EGS); \
379
+ gen_helper_egs_check(egs, cpu_env); \
380
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
381
+ } \
382
+ \
383
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
384
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
385
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
386
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
387
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
388
+ rd_v = tcg_temp_new_ptr(); \
389
+ rs2_v = tcg_temp_new_ptr(); \
390
+ desc = tcg_constant_i32( \
391
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
392
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
393
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
394
+ gen_helper_##NAME(rd_v, rs2_v, cpu_env, desc); \
395
+ mark_vs_dirty(s); \
396
+ gen_set_label(over); \
397
+ return true; \
398
+ } \
399
+ return false; \
400
+ }
401
+
402
+static bool vaes_check_vv(DisasContext *s, arg_rmr *a)
403
+{
404
+ int egw_bytes = ZVKNED_EGS << s->sew;
405
+ return s->cfg_ptr->ext_zvkned == true &&
406
+ require_rvv(s) &&
407
+ vext_check_isa_ill(s) &&
408
+ MAXSZ(s) >= egw_bytes &&
409
+ require_align(a->rd, s->lmul) &&
410
+ require_align(a->rs2, s->lmul) &&
411
+ s->sew == MO_32;
412
+}
413
+
414
+static bool vaes_check_overlap(DisasContext *s, int vd, int vs2)
415
+{
416
+ int8_t op_size = s->lmul <= 0 ? 1 : 1 << s->lmul;
417
+ return !is_overlapped(vd, op_size, vs2, 1);
418
+}
419
+
420
+static bool vaes_check_vs(DisasContext *s, arg_rmr *a)
421
+{
422
+ int egw_bytes = ZVKNED_EGS << s->sew;
423
+ return vaes_check_overlap(s, a->rd, a->rs2) &&
424
+ MAXSZ(s) >= egw_bytes &&
425
+ s->cfg_ptr->ext_zvkned == true &&
426
+ require_rvv(s) &&
427
+ vext_check_isa_ill(s) &&
428
+ require_align(a->rd, s->lmul) &&
429
+ s->sew == MO_32;
430
+}
431
+
432
+GEN_V_UNMASKED_TRANS(vaesef_vv, vaes_check_vv, ZVKNED_EGS)
433
+GEN_V_UNMASKED_TRANS(vaesef_vs, vaes_check_vs, ZVKNED_EGS)
434
+GEN_V_UNMASKED_TRANS(vaesdf_vv, vaes_check_vv, ZVKNED_EGS)
435
+GEN_V_UNMASKED_TRANS(vaesdf_vs, vaes_check_vs, ZVKNED_EGS)
436
+GEN_V_UNMASKED_TRANS(vaesdm_vv, vaes_check_vv, ZVKNED_EGS)
437
+GEN_V_UNMASKED_TRANS(vaesdm_vs, vaes_check_vs, ZVKNED_EGS)
438
+GEN_V_UNMASKED_TRANS(vaesz_vs, vaes_check_vs, ZVKNED_EGS)
439
+GEN_V_UNMASKED_TRANS(vaesem_vv, vaes_check_vv, ZVKNED_EGS)
440
+GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
441
+
442
+#define GEN_VI_UNMASKED_TRANS(NAME, CHECK, EGS) \
443
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
444
+ { \
445
+ if (CHECK(s, a)) { \
446
+ TCGv_ptr rd_v, rs2_v; \
447
+ TCGv_i32 uimm_v, desc, egs; \
448
+ uint32_t data = 0; \
449
+ TCGLabel *over = gen_new_label(); \
450
+ \
451
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
452
+ /* save opcode for unwinding in case we throw an exception */ \
453
+ decode_save_opc(s); \
454
+ egs = tcg_constant_i32(EGS); \
455
+ gen_helper_egs_check(egs, cpu_env); \
456
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
457
+ } \
458
+ \
459
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
460
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
461
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
462
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
463
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
464
+ \
465
+ rd_v = tcg_temp_new_ptr(); \
466
+ rs2_v = tcg_temp_new_ptr(); \
467
+ uimm_v = tcg_constant_i32(a->rs1); \
468
+ desc = tcg_constant_i32( \
469
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
470
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
471
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
472
+ gen_helper_##NAME(rd_v, rs2_v, uimm_v, cpu_env, desc); \
473
+ mark_vs_dirty(s); \
474
+ gen_set_label(over); \
475
+ return true; \
476
+ } \
477
+ return false; \
478
+ }
479
+
480
+static bool vaeskf1_check(DisasContext *s, arg_vaeskf1_vi *a)
481
+{
482
+ int egw_bytes = ZVKNED_EGS << s->sew;
483
+ return s->cfg_ptr->ext_zvkned == true &&
484
+ require_rvv(s) &&
485
+ vext_check_isa_ill(s) &&
486
+ MAXSZ(s) >= egw_bytes &&
487
+ s->sew == MO_32 &&
488
+ require_align(a->rd, s->lmul) &&
489
+ require_align(a->rs2, s->lmul);
490
+}
491
+
492
+static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
493
+{
494
+ int egw_bytes = ZVKNED_EGS << s->sew;
495
+ return s->cfg_ptr->ext_zvkned == true &&
496
+ require_rvv(s) &&
497
+ vext_check_isa_ill(s) &&
498
+ MAXSZ(s) >= egw_bytes &&
499
+ s->sew == MO_32 &&
500
+ require_align(a->rd, s->lmul) &&
501
+ require_align(a->rs2, s->lmul);
502
+}
503
+
504
+GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
505
+GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
62
--
506
--
63
2.31.1
507
2.41.0
64
65
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
This commit adds support for the Zvknh vector-crypto extension, which
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
consists of the following instructions:
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
6
Message-id: 20220120122050.41546-12-zhiwei_liu@c-sky.com
6
* vsha2ms.vv
7
* vsha2c[hl].vv
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
15
[max.chou@sifive.com: Replaced vstart checking by TCG op]
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
18
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
19
Signed-off-by: Max Chou <max.chou@sifive.com>
20
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
21
[max.chou@sifive.com: Exposed x-zvknha & x-zvknhb properties]
22
[max.chou@sifive.com: Replaced SEW selection to happened during
23
translation]
24
Message-ID: <20230711165917.2629866-11-max.chou@sifive.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
26
---
9
target/riscv/cpu.h | 4 ++++
27
target/riscv/cpu_cfg.h | 2 +
10
target/riscv/cpu.c | 1 +
28
target/riscv/helper.h | 6 +
11
target/riscv/cpu_helper.c | 43 +++++++++++++++++++++++++++++++++++++++
29
target/riscv/insn32.decode | 5 +
12
target/riscv/csr.c | 19 +++++++++++++++++
30
target/riscv/cpu.c | 13 +-
13
target/riscv/machine.c | 1 +
31
target/riscv/vcrypto_helper.c | 238 +++++++++++++++++++++++
14
5 files changed, 68 insertions(+)
32
target/riscv/insn_trans/trans_rvvk.c.inc | 129 ++++++++++++
33
6 files changed, 390 insertions(+), 3 deletions(-)
15
34
16
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
35
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
17
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.h
37
--- a/target/riscv/cpu_cfg.h
19
+++ b/target/riscv/cpu.h
38
+++ b/target/riscv/cpu_cfg.h
20
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
39
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
21
target_ulong upmmask;
40
bool ext_zvbb;
22
target_ulong upmbase;
41
bool ext_zvbc;
23
#endif
42
bool ext_zvkned;
24
+ target_ulong cur_pmmask;
43
+ bool ext_zvknha;
25
+ target_ulong cur_pmbase;
44
+ bool ext_zvknhb;
26
45
bool ext_zmmul;
27
float_status fp_status;
46
bool ext_zvfbfmin;
28
47
bool ext_zvfbfwma;
29
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
48
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
30
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
49
index XXXXXXX..XXXXXXX 100644
31
target_ulong *cs_base, uint32_t *pflags);
50
--- a/target/riscv/helper.h
32
51
+++ b/target/riscv/helper.h
33
+void riscv_cpu_update_mask(CPURISCVState *env);
52
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
34
+
53
DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
35
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
54
DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
36
target_ulong *ret_value,
55
DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
37
target_ulong new_value, target_ulong write_mask);
56
+
57
+DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
58
+DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
59
+DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
60
+DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
61
+DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
62
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/riscv/insn32.decode
65
+++ b/target/riscv/insn32.decode
66
@@ -XXX,XX +XXX,XX @@ vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
67
vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
68
vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
69
vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
70
+
71
+# *** Zvknh vector crypto extension ***
72
+vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
73
+vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
74
+vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
38
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
75
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
39
index XXXXXXX..XXXXXXX 100644
76
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/cpu.c
77
--- a/target/riscv/cpu.c
41
+++ b/target/riscv/cpu.c
78
+++ b/target/riscv/cpu.c
42
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset(DeviceState *dev)
79
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
43
env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
80
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
44
#endif
81
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
45
env->xl = riscv_cpu_mxl(env);
82
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
46
+ riscv_cpu_update_mask(env);
83
+ ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
47
cs->exception_index = RISCV_EXCP_NONE;
84
+ ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
48
env->load_res = -1;
85
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
49
set_default_nan_mode(1, &env->fp_status);
86
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
50
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
87
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
88
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
89
* In principle Zve*x would also suffice here, were they supported
90
* in qemu
91
*/
92
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
93
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
94
+ !cpu->cfg.ext_zve32f) {
95
error_setg(errp,
96
"Vector crypto extensions require V or Zve* extensions");
97
return;
98
}
99
100
- if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
101
- error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
102
+ if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
103
+ error_setg(
104
+ errp,
105
+ "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
106
return;
107
}
108
109
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
110
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
111
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
112
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
113
+ DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
114
+ DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
115
116
DEFINE_PROP_END_OF_LIST(),
117
};
118
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
51
index XXXXXXX..XXXXXXX 100644
119
index XXXXXXX..XXXXXXX 100644
52
--- a/target/riscv/cpu_helper.c
120
--- a/target/riscv/vcrypto_helper.c
53
+++ b/target/riscv/cpu_helper.c
121
+++ b/target/riscv/vcrypto_helper.c
54
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
122
@@ -XXX,XX +XXX,XX @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
55
*pflags = flags;
123
/* set tail elements to 1s */
124
vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
56
}
125
}
57
126
+
58
+void riscv_cpu_update_mask(CPURISCVState *env)
127
+static inline uint32_t sig0_sha256(uint32_t x)
59
+{
128
+{
60
+ target_ulong mask = -1, base = 0;
129
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
61
+ /*
130
+}
62
+ * TODO: Current RVJ spec does not specify
131
+
63
+ * how the extension interacts with XLEN.
132
+static inline uint32_t sig1_sha256(uint32_t x)
64
+ */
133
+{
65
+#ifndef CONFIG_USER_ONLY
134
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
66
+ if (riscv_has_ext(env, RVJ)) {
135
+}
67
+ switch (env->priv) {
136
+
68
+ case PRV_M:
137
+static inline uint64_t sig0_sha512(uint64_t x)
69
+ if (env->mmte & M_PM_ENABLE) {
138
+{
70
+ mask = env->mpmmask;
139
+ return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
71
+ base = env->mpmbase;
140
+}
72
+ }
141
+
73
+ break;
142
+static inline uint64_t sig1_sha512(uint64_t x)
74
+ case PRV_S:
143
+{
75
+ if (env->mmte & S_PM_ENABLE) {
144
+ return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
76
+ mask = env->spmmask;
145
+}
77
+ base = env->spmbase;
146
+
78
+ }
147
+static inline void vsha2ms_e32(uint32_t *vd, uint32_t *vs1, uint32_t *vs2)
79
+ break;
148
+{
80
+ case PRV_U:
149
+ uint32_t res[4];
81
+ if (env->mmte & U_PM_ENABLE) {
150
+ res[0] = sig1_sha256(vs1[H4(2)]) + vs2[H4(1)] + sig0_sha256(vd[H4(1)]) +
82
+ mask = env->upmmask;
151
+ vd[H4(0)];
83
+ base = env->upmbase;
152
+ res[1] = sig1_sha256(vs1[H4(3)]) + vs2[H4(2)] + sig0_sha256(vd[H4(2)]) +
84
+ }
153
+ vd[H4(1)];
85
+ break;
154
+ res[2] =
86
+ default:
155
+ sig1_sha256(res[0]) + vs2[H4(3)] + sig0_sha256(vd[H4(3)]) + vd[H4(2)];
87
+ g_assert_not_reached();
156
+ res[3] =
157
+ sig1_sha256(res[1]) + vs1[H4(0)] + sig0_sha256(vs2[H4(0)]) + vd[H4(3)];
158
+ vd[H4(3)] = res[3];
159
+ vd[H4(2)] = res[2];
160
+ vd[H4(1)] = res[1];
161
+ vd[H4(0)] = res[0];
162
+}
163
+
164
+static inline void vsha2ms_e64(uint64_t *vd, uint64_t *vs1, uint64_t *vs2)
165
+{
166
+ uint64_t res[4];
167
+ res[0] = sig1_sha512(vs1[2]) + vs2[1] + sig0_sha512(vd[1]) + vd[0];
168
+ res[1] = sig1_sha512(vs1[3]) + vs2[2] + sig0_sha512(vd[2]) + vd[1];
169
+ res[2] = sig1_sha512(res[0]) + vs2[3] + sig0_sha512(vd[3]) + vd[2];
170
+ res[3] = sig1_sha512(res[1]) + vs1[0] + sig0_sha512(vs2[0]) + vd[3];
171
+ vd[3] = res[3];
172
+ vd[2] = res[2];
173
+ vd[1] = res[1];
174
+ vd[0] = res[0];
175
+}
176
+
177
+void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
178
+ uint32_t desc)
179
+{
180
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
181
+ uint32_t esz = sew == MO_32 ? 4 : 8;
182
+ uint32_t total_elems;
183
+ uint32_t vta = vext_vta(desc);
184
+
185
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
186
+ if (sew == MO_32) {
187
+ vsha2ms_e32(((uint32_t *)vd) + i * 4, ((uint32_t *)vs1) + i * 4,
188
+ ((uint32_t *)vs2) + i * 4);
189
+ } else {
190
+ /* If not 32 then SEW should be 64 */
191
+ vsha2ms_e64(((uint64_t *)vd) + i * 4, ((uint64_t *)vs1) + i * 4,
192
+ ((uint64_t *)vs2) + i * 4);
88
+ }
193
+ }
89
+ }
194
+ }
90
+#endif
195
+ /* set tail elements to 1s */
91
+ if (env->xl == MXL_RV32) {
196
+ total_elems = vext_get_total_elems(env, desc, esz);
92
+ env->cur_pmmask = mask & UINT32_MAX;
197
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
93
+ env->cur_pmbase = base & UINT32_MAX;
198
+ env->vstart = 0;
94
+ } else {
199
+}
95
+ env->cur_pmmask = mask;
200
+
96
+ env->cur_pmbase = base;
201
+static inline uint64_t sum0_64(uint64_t x)
97
+ }
202
+{
98
+}
203
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
99
+
204
+}
100
#ifndef CONFIG_USER_ONLY
205
+
101
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
206
+static inline uint32_t sum0_32(uint32_t x)
102
{
207
+{
103
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
208
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
104
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
209
+}
105
env->priv = newpriv;
210
+
106
env->xl = cpu_recompute_xl(env);
211
+static inline uint64_t sum1_64(uint64_t x)
107
+ riscv_cpu_update_mask(env);
212
+{
108
213
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
109
/*
214
+}
110
* Clear the load reservation - otherwise a reservation placed in one
215
+
111
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
216
+static inline uint32_t sum1_32(uint32_t x)
217
+{
218
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
219
+}
220
+
221
+#define ch(x, y, z) ((x & y) ^ ((~x) & z))
222
+
223
+#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
224
+
225
+static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1)
226
+{
227
+ uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0];
228
+ uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0];
229
+ uint64_t W0 = vs1[0], W1 = vs1[1];
230
+ uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0;
231
+ uint64_t T2 = sum0_64(a) + maj(a, b, c);
232
+
233
+ h = g;
234
+ g = f;
235
+ f = e;
236
+ e = d + T1;
237
+ d = c;
238
+ c = b;
239
+ b = a;
240
+ a = T1 + T2;
241
+
242
+ T1 = h + sum1_64(e) + ch(e, f, g) + W1;
243
+ T2 = sum0_64(a) + maj(a, b, c);
244
+ h = g;
245
+ g = f;
246
+ f = e;
247
+ e = d + T1;
248
+ d = c;
249
+ c = b;
250
+ b = a;
251
+ a = T1 + T2;
252
+
253
+ vd[0] = f;
254
+ vd[1] = e;
255
+ vd[2] = b;
256
+ vd[3] = a;
257
+}
258
+
259
+static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1)
260
+{
261
+ uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)];
262
+ uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)];
263
+ uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)];
264
+ uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0;
265
+ uint32_t T2 = sum0_32(a) + maj(a, b, c);
266
+
267
+ h = g;
268
+ g = f;
269
+ f = e;
270
+ e = d + T1;
271
+ d = c;
272
+ c = b;
273
+ b = a;
274
+ a = T1 + T2;
275
+
276
+ T1 = h + sum1_32(e) + ch(e, f, g) + W1;
277
+ T2 = sum0_32(a) + maj(a, b, c);
278
+ h = g;
279
+ g = f;
280
+ f = e;
281
+ e = d + T1;
282
+ d = c;
283
+ c = b;
284
+ b = a;
285
+ a = T1 + T2;
286
+
287
+ vd[H4(0)] = f;
288
+ vd[H4(1)] = e;
289
+ vd[H4(2)] = b;
290
+ vd[H4(3)] = a;
291
+}
292
+
293
+void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
294
+ uint32_t desc)
295
+{
296
+ const uint32_t esz = 4;
297
+ uint32_t total_elems;
298
+ uint32_t vta = vext_vta(desc);
299
+
300
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
301
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
302
+ ((uint32_t *)vs1) + 4 * i + 2);
303
+ }
304
+
305
+ /* set tail elements to 1s */
306
+ total_elems = vext_get_total_elems(env, desc, esz);
307
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
308
+ env->vstart = 0;
309
+}
310
+
311
+void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
312
+ uint32_t desc)
313
+{
314
+ const uint32_t esz = 8;
315
+ uint32_t total_elems;
316
+ uint32_t vta = vext_vta(desc);
317
+
318
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
319
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
320
+ ((uint64_t *)vs1) + 4 * i + 2);
321
+ }
322
+
323
+ /* set tail elements to 1s */
324
+ total_elems = vext_get_total_elems(env, desc, esz);
325
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
326
+ env->vstart = 0;
327
+}
328
+
329
+void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
330
+ uint32_t desc)
331
+{
332
+ const uint32_t esz = 4;
333
+ uint32_t total_elems;
334
+ uint32_t vta = vext_vta(desc);
335
+
336
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
337
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
338
+ (((uint32_t *)vs1) + 4 * i));
339
+ }
340
+
341
+ /* set tail elements to 1s */
342
+ total_elems = vext_get_total_elems(env, desc, esz);
343
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
344
+ env->vstart = 0;
345
+}
346
+
347
+void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
348
+ uint32_t desc)
349
+{
350
+ uint32_t esz = 8;
351
+ uint32_t total_elems;
352
+ uint32_t vta = vext_vta(desc);
353
+
354
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
355
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
356
+ (((uint64_t *)vs1) + 4 * i));
357
+ }
358
+
359
+ /* set tail elements to 1s */
360
+ total_elems = vext_get_total_elems(env, desc, esz);
361
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
362
+ env->vstart = 0;
363
+}
364
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
112
index XXXXXXX..XXXXXXX 100644
365
index XXXXXXX..XXXXXXX 100644
113
--- a/target/riscv/csr.c
366
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
114
+++ b/target/riscv/csr.c
367
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
115
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mmte(CPURISCVState *env, int csrno,
368
@@ -XXX,XX +XXX,XX @@ static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
116
/* hardwiring pm.instruction bit to 0, since it's not supported yet */
369
117
wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
370
GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
118
env->mmte = wpri_val | PM_EXT_DIRTY;
371
GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
119
+ riscv_cpu_update_mask(env);
372
+
120
373
+/*
121
/* Set XS and SD bits, since PM CSRs are dirty */
374
+ * Zvknh
122
mstatus = env->mstatus | MSTATUS_XS;
375
+ */
123
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
376
+
124
uint64_t mstatus;
377
+#define ZVKNH_EGS 4
125
378
+
126
env->mpmmask = val;
379
+#define GEN_VV_UNMASKED_TRANS(NAME, CHECK, EGS) \
127
+ if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
380
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
128
+ env->cur_pmmask = val;
381
+ { \
129
+ }
382
+ if (CHECK(s, a)) { \
130
env->mmte |= PM_EXT_DIRTY;
383
+ uint32_t data = 0; \
131
384
+ TCGLabel *over = gen_new_label(); \
132
/* Set XS and SD bits, since PM CSRs are dirty */
385
+ TCGv_i32 egs; \
133
@@ -XXX,XX +XXX,XX @@ static RISCVException write_spmmask(CPURISCVState *env, int csrno,
386
+ \
134
return RISCV_EXCP_NONE;
387
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
135
}
388
+ /* save opcode for unwinding in case we throw an exception */ \
136
env->spmmask = val;
389
+ decode_save_opc(s); \
137
+ if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
390
+ egs = tcg_constant_i32(EGS); \
138
+ env->cur_pmmask = val;
391
+ gen_helper_egs_check(egs, cpu_env); \
139
+ }
392
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
140
env->mmte |= PM_EXT_DIRTY;
393
+ } \
141
394
+ \
142
/* Set XS and SD bits, since PM CSRs are dirty */
395
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
143
@@ -XXX,XX +XXX,XX @@ static RISCVException write_upmmask(CPURISCVState *env, int csrno,
396
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
144
return RISCV_EXCP_NONE;
397
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
145
}
398
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
146
env->upmmask = val;
399
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
147
+ if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
400
+ \
148
+ env->cur_pmmask = val;
401
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
149
+ }
402
+ vreg_ofs(s, a->rs2), cpu_env, \
150
env->mmte |= PM_EXT_DIRTY;
403
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
151
404
+ data, gen_helper_##NAME); \
152
/* Set XS and SD bits, since PM CSRs are dirty */
405
+ \
153
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
406
+ mark_vs_dirty(s); \
154
uint64_t mstatus;
407
+ gen_set_label(over); \
155
408
+ return true; \
156
env->mpmbase = val;
409
+ } \
157
+ if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
410
+ return false; \
158
+ env->cur_pmbase = val;
411
+ }
159
+ }
412
+
160
env->mmte |= PM_EXT_DIRTY;
413
+static bool vsha_check_sew(DisasContext *s)
161
414
+{
162
/* Set XS and SD bits, since PM CSRs are dirty */
415
+ return (s->cfg_ptr->ext_zvknha == true && s->sew == MO_32) ||
163
@@ -XXX,XX +XXX,XX @@ static RISCVException write_spmbase(CPURISCVState *env, int csrno,
416
+ (s->cfg_ptr->ext_zvknhb == true &&
164
return RISCV_EXCP_NONE;
417
+ (s->sew == MO_32 || s->sew == MO_64));
165
}
418
+}
166
env->spmbase = val;
419
+
167
+ if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
420
+static bool vsha_check(DisasContext *s, arg_rmrr *a)
168
+ env->cur_pmbase = val;
421
+{
169
+ }
422
+ int egw_bytes = ZVKNH_EGS << s->sew;
170
env->mmte |= PM_EXT_DIRTY;
423
+ int mult = 1 << MAX(s->lmul, 0);
171
424
+ return opivv_check(s, a) &&
172
/* Set XS and SD bits, since PM CSRs are dirty */
425
+ vsha_check_sew(s) &&
173
@@ -XXX,XX +XXX,XX @@ static RISCVException write_upmbase(CPURISCVState *env, int csrno,
426
+ MAXSZ(s) >= egw_bytes &&
174
return RISCV_EXCP_NONE;
427
+ !is_overlapped(a->rd, mult, a->rs1, mult) &&
175
}
428
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
176
env->upmbase = val;
429
+ s->lmul >= 0;
177
+ if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
430
+}
178
+ env->cur_pmbase = val;
431
+
179
+ }
432
+GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, ZVKNH_EGS)
180
env->mmte |= PM_EXT_DIRTY;
433
+
181
434
+static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
182
/* Set XS and SD bits, since PM CSRs are dirty */
435
+{
183
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
436
+ if (vsha_check(s, a)) {
184
index XXXXXXX..XXXXXXX 100644
437
+ uint32_t data = 0;
185
--- a/target/riscv/machine.c
438
+ TCGLabel *over = gen_new_label();
186
+++ b/target/riscv/machine.c
439
+ TCGv_i32 egs;
187
@@ -XXX,XX +XXX,XX @@ static int riscv_cpu_post_load(void *opaque, int version_id)
440
+
188
CPURISCVState *env = &cpu->env;
441
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
189
442
+ /* save opcode for unwinding in case we throw an exception */
190
env->xl = cpu_recompute_xl(env);
443
+ decode_save_opc(s);
191
+ riscv_cpu_update_mask(env);
444
+ egs = tcg_constant_i32(ZVKNH_EGS);
192
return 0;
445
+ gen_helper_egs_check(egs, cpu_env);
193
}
446
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
194
447
+ }
448
+
449
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
450
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
451
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
452
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
453
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
454
+
455
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
456
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
457
+ s->cfg_ptr->vlen / 8, data,
458
+ s->sew == MO_32 ?
459
+ gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv);
460
+
461
+ mark_vs_dirty(s);
462
+ gen_set_label(over);
463
+ return true;
464
+ }
465
+ return false;
466
+}
467
+
468
+static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
469
+{
470
+ if (vsha_check(s, a)) {
471
+ uint32_t data = 0;
472
+ TCGLabel *over = gen_new_label();
473
+ TCGv_i32 egs;
474
+
475
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
476
+ /* save opcode for unwinding in case we throw an exception */
477
+ decode_save_opc(s);
478
+ egs = tcg_constant_i32(ZVKNH_EGS);
479
+ gen_helper_egs_check(egs, cpu_env);
480
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
481
+ }
482
+
483
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
484
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
485
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
486
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
487
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
488
+
489
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
490
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
491
+ s->cfg_ptr->vlen / 8, data,
492
+ s->sew == MO_32 ?
493
+ gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv);
494
+
495
+ mark_vs_dirty(s);
496
+ gen_set_label(over);
497
+ return true;
498
+ }
499
+ return false;
500
+}
195
--
501
--
196
2.31.1
502
2.41.0
197
198
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
2
3
Signed-off-by: Frank Chang <frank.chang@sifive.com>
3
This commit adds support for the Zvksh vector-crypto extension, which
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
consists of the following instructions:
5
Message-id: 20220118014522.13613-2-frank.chang@sifive.com
5
6
* vsm3me.vv
7
* vsm3c.vi
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvksh property]
20
Message-ID: <20230711165917.2629866-12-max.chou@sifive.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
22
---
8
target/riscv/cpu.h | 1 +
23
target/riscv/cpu_cfg.h | 1 +
9
target/riscv/cpu.c | 4 ++++
24
target/riscv/helper.h | 3 +
10
target/riscv/cpu_helper.c | 5 ++++-
25
target/riscv/insn32.decode | 4 +
11
target/riscv/csr.c | 6 +++++-
26
target/riscv/cpu.c | 6 +-
12
target/riscv/translate.c | 2 ++
27
target/riscv/vcrypto_helper.c | 134 +++++++++++++++++++++++
13
5 files changed, 16 insertions(+), 2 deletions(-)
28
target/riscv/insn_trans/trans_rvvk.c.inc | 31 ++++++
14
29
6 files changed, 177 insertions(+), 2 deletions(-)
15
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
30
16
index XXXXXXX..XXXXXXX 100644
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
17
--- a/target/riscv/cpu.h
32
index XXXXXXX..XXXXXXX 100644
18
+++ b/target/riscv/cpu.h
33
--- a/target/riscv/cpu_cfg.h
19
@@ -XXX,XX +XXX,XX @@ struct RISCVCPU {
34
+++ b/target/riscv/cpu_cfg.h
20
bool ext_icsr;
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
21
bool ext_zfh;
36
bool ext_zvkned;
22
bool ext_zfhmin;
37
bool ext_zvknha;
23
+ bool ext_zve64f;
38
bool ext_zvknhb;
24
39
+ bool ext_zvksh;
25
char *priv_spec;
40
bool ext_zmmul;
26
char *user_spec;
41
bool ext_zvfbfmin;
42
bool ext_zvfbfwma;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
48
DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
49
DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
53
+DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
54
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/riscv/insn32.decode
57
+++ b/target/riscv/insn32.decode
58
@@ -XXX,XX +XXX,XX @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
59
vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
61
vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
+
63
+# *** Zvksh vector crypto extension ***
64
+vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
65
+vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
27
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
66
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
28
index XXXXXXX..XXXXXXX 100644
67
index XXXXXXX..XXXXXXX 100644
29
--- a/target/riscv/cpu.c
68
--- a/target/riscv/cpu.c
30
+++ b/target/riscv/cpu.c
69
+++ b/target/riscv/cpu.c
31
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
70
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
32
}
71
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
33
set_vext_version(env, vext_version);
72
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
34
}
73
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
35
+ if (cpu->cfg.ext_zve64f && !cpu->cfg.ext_f) {
74
+ ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
36
+ error_setg(errp, "Zve64f extension depends upon RVF.");
75
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
37
+ return;
76
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
38
+ }
77
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
39
if (cpu->cfg.ext_j) {
78
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
40
ext |= RVJ;
79
* In principle Zve*x would also suffice here, were they supported
41
}
80
* in qemu
42
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
81
*/
43
index XXXXXXX..XXXXXXX 100644
82
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
44
--- a/target/riscv/cpu_helper.c
83
- !cpu->cfg.ext_zve32f) {
45
+++ b/target/riscv/cpu_helper.c
84
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
46
@@ -XXX,XX +XXX,XX @@ static RISCVMXL cpu_get_xl(CPURISCVState *env)
85
+ cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
47
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
86
error_setg(errp,
48
target_ulong *cs_base, uint32_t *pflags)
87
"Vector crypto extensions require V or Zve* extensions");
49
{
88
return;
50
+ CPUState *cs = env_cpu(env);
89
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
51
+ RISCVCPU *cpu = RISCV_CPU(cs);
90
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
52
+
91
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
53
uint32_t flags = 0;
92
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
54
93
+ DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
55
*pc = env->pc;
94
56
*cs_base = 0;
95
DEFINE_PROP_END_OF_LIST(),
57
96
};
58
- if (riscv_has_ext(env, RVV)) {
97
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
59
+ if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve64f) {
98
index XXXXXXX..XXXXXXX 100644
60
/*
99
--- a/target/riscv/vcrypto_helper.c
61
* If env->vl equals to VLMAX, we can use generic vector operation
100
+++ b/target/riscv/vcrypto_helper.c
62
* expanders (GVEC) to accerlate the vector operations.
101
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
63
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
102
vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
64
index XXXXXXX..XXXXXXX 100644
103
env->vstart = 0;
65
--- a/target/riscv/csr.c
104
}
66
+++ b/target/riscv/csr.c
105
+
67
@@ -XXX,XX +XXX,XX @@ static RISCVException fs(CPURISCVState *env, int csrno)
106
+static inline uint32_t p1(uint32_t x)
68
107
+{
69
static RISCVException vs(CPURISCVState *env, int csrno)
108
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
70
{
109
+}
71
- if (env->misa_ext & RVV) {
110
+
72
+ CPUState *cs = env_cpu(env);
111
+static inline uint32_t zvksh_w(uint32_t m16, uint32_t m9, uint32_t m3,
73
+ RISCVCPU *cpu = RISCV_CPU(cs);
112
+ uint32_t m13, uint32_t m6)
74
+
113
+{
75
+ if (env->misa_ext & RVV ||
114
+ return p1(m16 ^ m9 ^ rol32(m3, 15)) ^ rol32(m13, 7) ^ m6;
76
+ cpu->cfg.ext_zve64f) {
115
+}
77
#if !defined(CONFIG_USER_ONLY)
116
+
78
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
117
+void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
79
return RISCV_EXCP_ILLEGAL_INST;
118
+ CPURISCVState *env, uint32_t desc)
80
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
119
+{
81
index XXXXXXX..XXXXXXX 100644
120
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
82
--- a/target/riscv/translate.c
121
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
83
+++ b/target/riscv/translate.c
122
+ uint32_t vta = vext_vta(desc);
84
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
123
+ uint32_t *vd = vd_vptr;
85
bool ext_ifencei;
124
+ uint32_t *vs1 = vs1_vptr;
86
bool ext_zfh;
125
+ uint32_t *vs2 = vs2_vptr;
87
bool ext_zfhmin;
126
+
88
+ bool ext_zve64f;
127
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
89
bool hlsx;
128
+ uint32_t w[24];
90
/* vector extension */
129
+ for (int j = 0; j < 8; j++) {
91
bool vill;
130
+ w[j] = bswap32(vs1[H4((i * 8) + j)]);
92
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
131
+ w[j + 8] = bswap32(vs2[H4((i * 8) + j)]);
93
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
132
+ }
94
ctx->ext_zfh = cpu->cfg.ext_zfh;
133
+ for (int j = 0; j < 8; j++) {
95
ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
134
+ w[j + 16] =
96
+ ctx->ext_zve64f = cpu->cfg.ext_zve64f;
135
+ zvksh_w(w[j], w[j + 7], w[j + 13], w[j + 3], w[j + 10]);
97
ctx->vlen = cpu->cfg.vlen;
136
+ }
98
ctx->elen = cpu->cfg.elen;
137
+ for (int j = 0; j < 8; j++) {
99
ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS);
138
+ vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
139
+ }
140
+ }
141
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
142
+ env->vstart = 0;
143
+}
144
+
145
+static inline uint32_t ff1(uint32_t x, uint32_t y, uint32_t z)
146
+{
147
+ return x ^ y ^ z;
148
+}
149
+
150
+static inline uint32_t ff2(uint32_t x, uint32_t y, uint32_t z)
151
+{
152
+ return (x & y) | (x & z) | (y & z);
153
+}
154
+
155
+static inline uint32_t ff_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
156
+{
157
+ return (j <= 15) ? ff1(x, y, z) : ff2(x, y, z);
158
+}
159
+
160
+static inline uint32_t gg1(uint32_t x, uint32_t y, uint32_t z)
161
+{
162
+ return x ^ y ^ z;
163
+}
164
+
165
+static inline uint32_t gg2(uint32_t x, uint32_t y, uint32_t z)
166
+{
167
+ return (x & y) | (~x & z);
168
+}
169
+
170
+static inline uint32_t gg_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
171
+{
172
+ return (j <= 15) ? gg1(x, y, z) : gg2(x, y, z);
173
+}
174
+
175
+static inline uint32_t t_j(uint32_t j)
176
+{
177
+ return (j <= 15) ? 0x79cc4519 : 0x7a879d8a;
178
+}
179
+
180
+static inline uint32_t p_0(uint32_t x)
181
+{
182
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
183
+}
184
+
185
+static void sm3c(uint32_t *vd, uint32_t *vs1, uint32_t *vs2, uint32_t uimm)
186
+{
187
+ uint32_t x0, x1;
188
+ uint32_t j;
189
+ uint32_t ss1, ss2, tt1, tt2;
190
+ x0 = vs2[0] ^ vs2[4];
191
+ x1 = vs2[1] ^ vs2[5];
192
+ j = 2 * uimm;
193
+ ss1 = rol32(rol32(vs1[0], 12) + vs1[4] + rol32(t_j(j), j % 32), 7);
194
+ ss2 = ss1 ^ rol32(vs1[0], 12);
195
+ tt1 = ff_j(vs1[0], vs1[1], vs1[2], j) + vs1[3] + ss2 + x0;
196
+ tt2 = gg_j(vs1[4], vs1[5], vs1[6], j) + vs1[7] + ss1 + vs2[0];
197
+ vs1[3] = vs1[2];
198
+ vd[3] = rol32(vs1[1], 9);
199
+ vs1[1] = vs1[0];
200
+ vd[1] = tt1;
201
+ vs1[7] = vs1[6];
202
+ vd[7] = rol32(vs1[5], 19);
203
+ vs1[5] = vs1[4];
204
+ vd[5] = p_0(tt2);
205
+ j = 2 * uimm + 1;
206
+ ss1 = rol32(rol32(vd[1], 12) + vd[5] + rol32(t_j(j), j % 32), 7);
207
+ ss2 = ss1 ^ rol32(vd[1], 12);
208
+ tt1 = ff_j(vd[1], vs1[1], vd[3], j) + vs1[3] + ss2 + x1;
209
+ tt2 = gg_j(vd[5], vs1[5], vd[7], j) + vs1[7] + ss1 + vs2[1];
210
+ vd[2] = rol32(vs1[1], 9);
211
+ vd[0] = tt1;
212
+ vd[6] = rol32(vs1[5], 19);
213
+ vd[4] = p_0(tt2);
214
+}
215
+
216
+void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
217
+ CPURISCVState *env, uint32_t desc)
218
+{
219
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
220
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
221
+ uint32_t vta = vext_vta(desc);
222
+ uint32_t *vd = vd_vptr;
223
+ uint32_t *vs2 = vs2_vptr;
224
+ uint32_t v1[8], v2[8], v3[8];
225
+
226
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
227
+ for (int k = 0; k < 8; k++) {
228
+ v2[k] = bswap32(vd[H4(i * 8 + k)]);
229
+ v3[k] = bswap32(vs2[H4(i * 8 + k)]);
230
+ }
231
+ sm3c(v1, v2, v3, uimm);
232
+ for (int k = 0; k < 8; k++) {
233
+ vd[i * 8 + k] = bswap32(v1[H4(k)]);
234
+ }
235
+ }
236
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
237
+ env->vstart = 0;
238
+}
239
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
242
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
243
@@ -XXX,XX +XXX,XX @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
244
}
245
return false;
246
}
247
+
248
+/*
249
+ * Zvksh
250
+ */
251
+
252
+#define ZVKSH_EGS 8
253
+
254
+static inline bool vsm3_check(DisasContext *s, arg_rmrr *a)
255
+{
256
+ int egw_bytes = ZVKSH_EGS << s->sew;
257
+ int mult = 1 << MAX(s->lmul, 0);
258
+ return s->cfg_ptr->ext_zvksh == true &&
259
+ require_rvv(s) &&
260
+ vext_check_isa_ill(s) &&
261
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
262
+ MAXSZ(s) >= egw_bytes &&
263
+ s->sew == MO_32;
264
+}
265
+
266
+static inline bool vsm3me_check(DisasContext *s, arg_rmrr *a)
267
+{
268
+ return vsm3_check(s, a) && vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
269
+}
270
+
271
+static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
272
+{
273
+ return vsm3_check(s, a) && vext_check_ss(s, a->rd, a->rs2, a->vm);
274
+}
275
+
276
+GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
277
+GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
100
--
278
--
101
2.31.1
279
2.41.0
102
103
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
As pc will be written by the xepc in exception return, just ignore
3
This commit adds support for the Zvkg vector-crypto extension, which
4
pc in translation.
4
consists of the following instructions:
5
5
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
* vgmul.vv
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
* vghsh.vv
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
9
Message-id: 20220120122050.41546-3-zhiwei_liu@c-sky.com
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvkg property]
20
[max.chou@sifive.com: Replaced uint by int for cross win32 build]
21
Message-ID: <20230711165917.2629866-13-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
23
---
12
target/riscv/helper.h | 4 ++--
24
target/riscv/cpu_cfg.h | 1 +
13
target/riscv/op_helper.c | 4 ++--
25
target/riscv/helper.h | 3 +
14
target/riscv/insn_trans/trans_privileged.c.inc | 7 ++-----
26
target/riscv/insn32.decode | 4 ++
15
3 files changed, 6 insertions(+), 9 deletions(-)
27
target/riscv/cpu.c | 6 +-
16
28
target/riscv/vcrypto_helper.c | 72 ++++++++++++++++++++++++
29
target/riscv/insn_trans/trans_rvvk.c.inc | 30 ++++++++++
30
6 files changed, 114 insertions(+), 2 deletions(-)
31
32
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_cfg.h
35
+++ b/target/riscv/cpu_cfg.h
36
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
37
bool ext_zve64d;
38
bool ext_zvbb;
39
bool ext_zvbc;
40
+ bool ext_zvkg;
41
bool ext_zvkned;
42
bool ext_zvknha;
43
bool ext_zvknhb;
17
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
18
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/helper.h
46
--- a/target/riscv/helper.h
20
+++ b/target/riscv/helper.h
47
+++ b/target/riscv/helper.h
21
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(csrr_i128, tl, env, int)
48
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
22
DEF_HELPER_4(csrw_i128, void, env, int, tl, tl)
49
23
DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
50
DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
24
#ifndef CONFIG_USER_ONLY
51
DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
25
-DEF_HELPER_2(sret, tl, env, tl)
52
+
26
-DEF_HELPER_2(mret, tl, env, tl)
53
+DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
27
+DEF_HELPER_1(sret, tl, env)
54
+DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
28
+DEF_HELPER_1(mret, tl, env)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
29
DEF_HELPER_1(wfi, void, env)
56
index XXXXXXX..XXXXXXX 100644
30
DEF_HELPER_1(tlb_flush, void, env)
57
--- a/target/riscv/insn32.decode
31
#endif
58
+++ b/target/riscv/insn32.decode
32
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
59
@@ -XXX,XX +XXX,XX @@ vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
33
index XXXXXXX..XXXXXXX 100644
60
# *** Zvksh vector crypto extension ***
34
--- a/target/riscv/op_helper.c
61
vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
35
+++ b/target/riscv/op_helper.c
62
vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
36
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
63
+
37
64
+# *** Zvkg vector crypto extension ***
38
#ifndef CONFIG_USER_ONLY
65
+vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
39
66
+vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
40
-target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
67
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
41
+target_ulong helper_sret(CPURISCVState *env)
68
index XXXXXXX..XXXXXXX 100644
42
{
69
--- a/target/riscv/cpu.c
43
uint64_t mstatus;
70
+++ b/target/riscv/cpu.c
44
target_ulong prev_priv, prev_virt;
71
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
45
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
72
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
46
return retpc;
73
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
74
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
75
+ ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
76
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
77
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
78
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
79
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
80
* In principle Zve*x would also suffice here, were they supported
81
* in qemu
82
*/
83
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
84
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
85
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
86
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
87
error_setg(errp,
88
"Vector crypto extensions require V or Zve* extensions");
89
return;
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
91
/* Vector cryptography extensions */
92
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
93
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
94
+ DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false),
95
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
96
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
97
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/vcrypto_helper.c
101
+++ b/target/riscv/vcrypto_helper.c
102
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
103
vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
104
env->vstart = 0;
47
}
105
}
48
106
+
49
-target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
107
+void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
50
+target_ulong helper_mret(CPURISCVState *env)
108
+ CPURISCVState *env, uint32_t desc)
51
{
109
+{
52
if (!(env->priv >= PRV_M)) {
110
+ uint64_t *vd = vd_vptr;
53
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
111
+ uint64_t *vs1 = vs1_vptr;
54
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
112
+ uint64_t *vs2 = vs2_vptr;
55
index XXXXXXX..XXXXXXX 100644
113
+ uint32_t vta = vext_vta(desc);
56
--- a/target/riscv/insn_trans/trans_privileged.c.inc
114
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
57
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
115
+
58
@@ -XXX,XX +XXX,XX @@ static bool trans_uret(DisasContext *ctx, arg_uret *a)
116
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
59
static bool trans_sret(DisasContext *ctx, arg_sret *a)
117
+ uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
60
{
118
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
61
#ifndef CONFIG_USER_ONLY
119
+ uint64_t X[2] = {vs1[i * 2 + 0], vs1[i * 2 + 1]};
62
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
120
+ uint64_t Z[2] = {0, 0};
63
-
121
+
64
if (has_ext(ctx, RVS)) {
122
+ uint64_t S[2] = {brev8(Y[0] ^ X[0]), brev8(Y[1] ^ X[1])};
65
- gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
123
+
66
+ gen_helper_sret(cpu_pc, cpu_env);
124
+ for (int j = 0; j < 128; j++) {
67
tcg_gen_exit_tb(NULL, 0); /* no chaining */
125
+ if ((S[j / 64] >> (j % 64)) & 1) {
68
ctx->base.is_jmp = DISAS_NORETURN;
126
+ Z[0] ^= H[0];
69
} else {
127
+ Z[1] ^= H[1];
70
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
128
+ }
71
static bool trans_mret(DisasContext *ctx, arg_mret *a)
129
+ bool reduce = ((H[1] >> 63) & 1);
72
{
130
+ H[1] = H[1] << 1 | H[0] >> 63;
73
#ifndef CONFIG_USER_ONLY
131
+ H[0] = H[0] << 1;
74
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
132
+ if (reduce) {
75
- gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
133
+ H[0] ^= 0x87;
76
+ gen_helper_mret(cpu_pc, cpu_env);
134
+ }
77
tcg_gen_exit_tb(NULL, 0); /* no chaining */
135
+ }
78
ctx->base.is_jmp = DISAS_NORETURN;
136
+
79
return true;
137
+ vd[i * 2 + 0] = brev8(Z[0]);
138
+ vd[i * 2 + 1] = brev8(Z[1]);
139
+ }
140
+ /* set tail elements to 1s */
141
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
142
+ env->vstart = 0;
143
+}
144
+
145
+void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
146
+ uint32_t desc)
147
+{
148
+ uint64_t *vd = vd_vptr;
149
+ uint64_t *vs2 = vs2_vptr;
150
+ uint32_t vta = vext_vta(desc);
151
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
152
+
153
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
154
+ uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
155
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
156
+ uint64_t Z[2] = {0, 0};
157
+
158
+ for (int j = 0; j < 128; j++) {
159
+ if ((Y[j / 64] >> (j % 64)) & 1) {
160
+ Z[0] ^= H[0];
161
+ Z[1] ^= H[1];
162
+ }
163
+ bool reduce = ((H[1] >> 63) & 1);
164
+ H[1] = H[1] << 1 | H[0] >> 63;
165
+ H[0] = H[0] << 1;
166
+ if (reduce) {
167
+ H[0] ^= 0x87;
168
+ }
169
+ }
170
+
171
+ vd[i * 2 + 0] = brev8(Z[0]);
172
+ vd[i * 2 + 1] = brev8(Z[1]);
173
+ }
174
+ /* set tail elements to 1s */
175
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
176
+ env->vstart = 0;
177
+}
178
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
181
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
182
@@ -XXX,XX +XXX,XX @@ static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
183
184
GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
185
GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
186
+
187
+/*
188
+ * Zvkg
189
+ */
190
+
191
+#define ZVKG_EGS 4
192
+
193
+static bool vgmul_check(DisasContext *s, arg_rmr *a)
194
+{
195
+ int egw_bytes = ZVKG_EGS << s->sew;
196
+ return s->cfg_ptr->ext_zvkg == true &&
197
+ vext_check_isa_ill(s) &&
198
+ require_rvv(s) &&
199
+ MAXSZ(s) >= egw_bytes &&
200
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
201
+ s->sew == MO_32;
202
+}
203
+
204
+GEN_V_UNMASKED_TRANS(vgmul_vv, vgmul_check, ZVKG_EGS)
205
+
206
+static bool vghsh_check(DisasContext *s, arg_rmrr *a)
207
+{
208
+ int egw_bytes = ZVKG_EGS << s->sew;
209
+ return s->cfg_ptr->ext_zvkg == true &&
210
+ opivv_check(s, a) &&
211
+ MAXSZ(s) >= egw_bytes &&
212
+ s->sew == MO_32;
213
+}
214
+
215
+GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
80
--
216
--
81
2.31.1
217
2.41.0
82
83
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
Vector widening conversion instructions are provided to and from all
3
Allows sharing of sm4_subword between different targets.
4
supported integer EEWs for Zve64f extension.
5
4
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
5
Signed-off-by: Max Chou <max.chou@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
8
Message-id: 20220118014522.13613-9-frank.chang@sifive.com
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Max Chou <max.chou@sifive.com>
9
Message-ID: <20230711165917.2629866-14-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 32 +++++++++++++++++++------
12
include/crypto/sm4.h | 8 ++++++++
12
1 file changed, 25 insertions(+), 7 deletions(-)
13
target/arm/tcg/crypto_helper.c | 10 ++--------
14
2 files changed, 10 insertions(+), 8 deletions(-)
13
15
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
--- a/include/crypto/sm4.h
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/include/crypto/sm4.h
18
@@ -XXX,XX +XXX,XX @@ static bool require_zve64f(DisasContext *s)
20
@@ -XXX,XX +XXX,XX @@
19
return s->ext_zve64f ? s->sew <= MO_32 : true;
21
20
}
22
extern const uint8_t sm4_sbox[256];
21
23
22
+static bool require_scale_zve64f(DisasContext *s)
24
+static inline uint32_t sm4_subword(uint32_t word)
23
+{
25
+{
24
+ /* RVV + Zve64f = RVV. */
26
+ return sm4_sbox[word & 0xff] |
25
+ if (has_ext(s, RVV)) {
27
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
26
+ return true;
28
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
27
+ }
29
+ sm4_sbox[(word >> 24) & 0xff] << 24;
28
+
29
+ /* Zve64f doesn't support FP64. (Section 18.2) */
30
+ return s->ext_zve64f ? s->sew <= MO_16 : true;
31
+}
30
+}
32
+
31
+
33
/* Destination vector register group cannot overlap source mask register. */
32
#endif
34
static bool require_vm(int vm, int vd)
33
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
35
{
34
index XXXXXXX..XXXXXXX 100644
36
@@ -XXX,XX +XXX,XX @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
35
--- a/target/arm/tcg/crypto_helper.c
37
require_scale_rvf(s) &&
36
+++ b/target/arm/tcg/crypto_helper.c
38
(s->sew != MO_8) &&
37
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm)
39
vext_check_isa_ill(s) &&
38
CR_ST_WORD(d, (i + 3) % 4) ^
40
- vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
39
CR_ST_WORD(n, i);
41
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
40
42
+ require_scale_zve64f(s);
41
- t = sm4_sbox[t & 0xff] |
43
}
42
- sm4_sbox[(t >> 8) & 0xff] << 8 |
44
43
- sm4_sbox[(t >> 16) & 0xff] << 16 |
45
/* OPFVV with WIDEN */
44
- sm4_sbox[(t >> 24) & 0xff] << 24;
46
@@ -XXX,XX +XXX,XX @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
45
+ t = sm4_subword(t);
47
require_scale_rvf(s) &&
46
48
(s->sew != MO_8) &&
47
CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
49
vext_check_isa_ill(s) &&
48
rol32(t, 24);
50
- vext_check_ds(s, a->rd, a->rs2, a->vm);
49
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4ekey(uint64_t *rd, uint64_t *rn, uint64_t *rm)
51
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
50
CR_ST_WORD(d, (i + 3) % 4) ^
52
+ require_scale_zve64f(s);
51
CR_ST_WORD(m, i);
53
}
52
54
53
- t = sm4_sbox[t & 0xff] |
55
/* OPFVF with WIDEN */
54
- sm4_sbox[(t >> 8) & 0xff] << 8 |
56
@@ -XXX,XX +XXX,XX @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
55
- sm4_sbox[(t >> 16) & 0xff] << 16 |
57
require_scale_rvf(s) &&
56
- sm4_sbox[(t >> 24) & 0xff] << 24;
58
(s->sew != MO_8) &&
57
+ t = sm4_subword(t);
59
vext_check_isa_ill(s) &&
58
60
- vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
59
CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
61
+ vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) &&
60
}
62
+ require_scale_zve64f(s);
63
}
64
65
/* WIDEN OPFVV with WIDEN */
66
@@ -XXX,XX +XXX,XX @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
67
require_scale_rvf(s) &&
68
(s->sew != MO_8) &&
69
vext_check_isa_ill(s) &&
70
- vext_check_dd(s, a->rd, a->rs2, a->vm);
71
+ vext_check_dd(s, a->rd, a->rs2, a->vm) &&
72
+ require_scale_zve64f(s);
73
}
74
75
/* WIDEN OPFVF with WIDEN */
76
@@ -XXX,XX +XXX,XX @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
77
static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
78
{
79
return opfv_widen_check(s, a) &&
80
- require_rvf(s);
81
+ require_rvf(s) &&
82
+ require_zve64f(s);
83
}
84
85
static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
86
{
87
return opfv_widen_check(s, a) &&
88
require_scale_rvf(s) &&
89
- (s->sew != MO_8);
90
+ (s->sew != MO_8) &&
91
+ require_scale_zve64f(s);
92
}
93
94
#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
95
@@ -XXX,XX +XXX,XX @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
96
require_scale_rvf(s) &&
97
vext_check_isa_ill(s) &&
98
/* OPFV widening instructions ignore vs1 check */
99
- vext_check_ds(s, a->rd, a->rs2, a->vm);
100
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
101
+ require_scale_zve64f(s);
102
}
103
104
#define GEN_OPFXV_WIDEN_TRANS(NAME) \
105
--
61
--
106
2.31.1
62
2.41.0
107
108
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
Add kvm_riscv_get/put_regs_timer to synchronize virtual time context
3
Adds sm4_ck constant for use in sm4 cryptography across different targets.
4
from KVM.
5
4
6
To set register of RISCV_TIMER_REG(state) will occur a error from KVM
5
Signed-off-by: Max Chou <max.chou@sifive.com>
7
on kvm_timer_state == 0. It's better to adapt in KVM, but it doesn't matter
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
8
that adaping in QEMU.
7
Signed-off-by: Max Chou <max.chou@sifive.com>
9
8
Message-ID: <20230711165917.2629866-15-max.chou@sifive.com>
10
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
11
Signed-off-by: Mingwang Li <limingwang@huawei.com>
12
Reviewed-by: Anup Patel <anup.patel@wdc.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-id: 20220112081329.1835-11-jiangyifei@huawei.com
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
10
---
17
target/riscv/cpu.h | 7 +++++
11
include/crypto/sm4.h | 1 +
18
target/riscv/kvm.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++
12
crypto/sm4.c | 10 ++++++++++
19
2 files changed, 79 insertions(+)
13
2 files changed, 11 insertions(+)
20
14
21
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
15
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
22
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu.h
17
--- a/include/crypto/sm4.h
24
+++ b/target/riscv/cpu.h
18
+++ b/include/crypto/sm4.h
25
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
19
@@ -XXX,XX +XXX,XX @@
26
20
#define QEMU_SM4_H
27
hwaddr kernel_addr;
21
28
hwaddr fdt_addr;
22
extern const uint8_t sm4_sbox[256];
29
+
23
+extern const uint32_t sm4_ck[32];
30
+ /* kvm timer */
24
31
+ bool kvm_timer_dirty;
25
static inline uint32_t sm4_subword(uint32_t word)
32
+ uint64_t kvm_timer_time;
26
{
33
+ uint64_t kvm_timer_compare;
27
diff --git a/crypto/sm4.c b/crypto/sm4.c
34
+ uint64_t kvm_timer_state;
28
index XXXXXXX..XXXXXXX 100644
35
+ uint64_t kvm_timer_frequency;
29
--- a/crypto/sm4.c
30
+++ b/crypto/sm4.c
31
@@ -XXX,XX +XXX,XX @@ uint8_t const sm4_sbox[] = {
32
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
36
};
33
};
37
34
38
OBJECT_DECLARE_TYPE(RISCVCPU, RISCVCPUClass,
35
+uint32_t const sm4_ck[] = {
39
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
36
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
40
index XXXXXXX..XXXXXXX 100644
37
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
41
--- a/target/riscv/kvm.c
38
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
42
+++ b/target/riscv/kvm.c
39
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
43
@@ -XXX,XX +XXX,XX @@
40
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
44
#include "kvm_riscv.h"
41
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
45
#include "sbi_ecall_interface.h"
42
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
46
#include "chardev/char-fe.h"
43
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
47
+#include "migration/migration.h"
44
+};
48
49
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
50
uint64_t idx)
51
@@ -XXX,XX +XXX,XX @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
52
#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
53
KVM_REG_RISCV_CSR_REG(name))
54
55
+#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
56
+ KVM_REG_RISCV_TIMER_REG(name))
57
+
58
#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
59
60
#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
61
@@ -XXX,XX +XXX,XX @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
62
} \
63
} while (0)
64
65
+#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
66
+ do { \
67
+ int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
68
+ if (ret) { \
69
+ abort(); \
70
+ } \
71
+ } while (0)
72
+
73
+#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
74
+ do { \
75
+ int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
76
+ if (ret) { \
77
+ abort(); \
78
+ } \
79
+ } while (0)
80
+
81
static int kvm_riscv_get_regs_core(CPUState *cs)
82
{
83
int ret = 0;
84
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_put_regs_fp(CPUState *cs)
85
return ret;
86
}
87
88
+static void kvm_riscv_get_regs_timer(CPUState *cs)
89
+{
90
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
91
+
92
+ if (env->kvm_timer_dirty) {
93
+ return;
94
+ }
95
+
96
+ KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
97
+ KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
98
+ KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
99
+ KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
100
+
101
+ env->kvm_timer_dirty = true;
102
+}
103
+
104
+static void kvm_riscv_put_regs_timer(CPUState *cs)
105
+{
106
+ uint64_t reg;
107
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
108
+
109
+ if (!env->kvm_timer_dirty) {
110
+ return;
111
+ }
112
+
113
+ KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
114
+ KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
115
+
116
+ /*
117
+ * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
118
+ * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
119
+ * doesn't matter that adaping in QEMU now.
120
+ * TODO If KVM changes, adapt here.
121
+ */
122
+ if (env->kvm_timer_state) {
123
+ KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
124
+ }
125
+
126
+ /*
127
+ * For now, migration will not work between Hosts with different timer
128
+ * frequency. Therefore, we should check whether they are the same here
129
+ * during the migration.
130
+ */
131
+ if (migration_is_running(migrate_get_current()->state)) {
132
+ KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
133
+ if (reg != env->kvm_timer_frequency) {
134
+ error_report("Dst Hosts timer frequency != Src Hosts");
135
+ }
136
+ }
137
+
138
+ env->kvm_timer_dirty = false;
139
+}
140
141
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
142
KVM_CAP_LAST_INFO
143
--
45
--
144
2.31.1
46
2.41.0
145
146
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
When KVM is enabled, set the S-mode external interrupt through
3
This commit adds support for the Zvksed vector-crypto extension, which
4
kvm_riscv_set_irq function.
4
consists of the following instructions:
5
5
6
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
* vsm4k.vi
7
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
* vsm4r.[vv,vs]
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
9
Reviewed-by: Anup Patel <anup.patel@wdc.com>
9
Translation functions are defined in
10
Message-id: 20220112081329.1835-8-jiangyifei@huawei.com
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Signed-off-by: Max Chou <max.chou@sifive.com>
14
Reviewed-by: Frank Chang <frank.chang@sifive.com>
15
[lawrence.hunter@codethink.co.uk: Moved SM4 functions from
16
crypto_helper.c to vcrypto_helper.c]
17
[nazar.kazakov@codethink.co.uk: Added alignment checks, refactored code to
18
use macros, and minor style changes]
19
Signed-off-by: Max Chou <max.chou@sifive.com>
20
Message-ID: <20230711165917.2629866-16-max.chou@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
22
---
13
target/riscv/kvm_riscv.h | 1 +
23
target/riscv/cpu_cfg.h | 1 +
14
target/riscv/cpu.c | 6 +++++-
24
target/riscv/helper.h | 4 +
15
target/riscv/kvm-stub.c | 5 +++++
25
target/riscv/insn32.decode | 5 +
16
target/riscv/kvm.c | 17 +++++++++++++++++
26
target/riscv/cpu.c | 5 +-
17
4 files changed, 28 insertions(+), 1 deletion(-)
27
target/riscv/vcrypto_helper.c | 127 +++++++++++++++++++++++
18
28
target/riscv/insn_trans/trans_rvvk.c.inc | 43 ++++++++
19
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
29
6 files changed, 184 insertions(+), 1 deletion(-)
20
index XXXXXXX..XXXXXXX 100644
30
21
--- a/target/riscv/kvm_riscv.h
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
22
+++ b/target/riscv/kvm_riscv.h
32
index XXXXXXX..XXXXXXX 100644
23
@@ -XXX,XX +XXX,XX @@
33
--- a/target/riscv/cpu_cfg.h
24
#define QEMU_KVM_RISCV_H
34
+++ b/target/riscv/cpu_cfg.h
25
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
26
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
36
bool ext_zvkned;
27
+void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
37
bool ext_zvknha;
28
38
bool ext_zvknhb;
29
#endif
39
+ bool ext_zvksed;
40
bool ext_zvksh;
41
bool ext_zmmul;
42
bool ext_zvfbfmin;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
48
49
DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32)
53
+DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32)
54
+DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/insn32.decode
58
+++ b/target/riscv/insn32.decode
59
@@ -XXX,XX +XXX,XX @@ vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
# *** Zvkg vector crypto extension ***
61
vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
63
+
64
+# *** Zvksed vector crypto extension ***
65
+vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
+vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
67
+vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
30
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
68
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
31
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
32
--- a/target/riscv/cpu.c
70
--- a/target/riscv/cpu.c
33
+++ b/target/riscv/cpu.c
71
+++ b/target/riscv/cpu.c
34
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
72
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
35
case IRQ_S_EXT:
73
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
36
case IRQ_VS_EXT:
74
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
37
case IRQ_M_EXT:
75
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
38
- riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
76
+ ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
39
+ if (kvm_enabled()) {
77
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
40
+ kvm_riscv_set_irq(cpu, irq, level);
78
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
41
+ } else {
79
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
42
+ riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
80
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
43
+ }
81
* in qemu
44
break;
82
*/
45
default:
83
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
46
g_assert_not_reached();
84
- cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
47
diff --git a/target/riscv/kvm-stub.c b/target/riscv/kvm-stub.c
85
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) &&
48
index XXXXXXX..XXXXXXX 100644
86
+ !cpu->cfg.ext_zve32f) {
49
--- a/target/riscv/kvm-stub.c
87
error_setg(errp,
50
+++ b/target/riscv/kvm-stub.c
88
"Vector crypto extensions require V or Zve* extensions");
51
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
89
return;
52
{
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
53
abort();
91
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
92
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
93
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
94
+ DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false),
95
DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
96
97
DEFINE_PROP_END_OF_LIST(),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/vcrypto_helper.c
101
+++ b/target/riscv/vcrypto_helper.c
102
@@ -XXX,XX +XXX,XX @@
103
#include "cpu.h"
104
#include "crypto/aes.h"
105
#include "crypto/aes-round.h"
106
+#include "crypto/sm4.h"
107
#include "exec/memop.h"
108
#include "exec/exec-all.h"
109
#include "exec/helper-proto.h"
110
@@ -XXX,XX +XXX,XX @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
111
vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
112
env->vstart = 0;
54
}
113
}
55
+
114
+
56
+void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
115
+void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
57
+{
116
+ uint32_t desc)
58
+ abort();
117
+{
59
+}
118
+ const uint32_t egs = 4;
60
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
119
+ uint32_t rnd = uimm5 & 0x7;
61
index XXXXXXX..XXXXXXX 100644
120
+ uint32_t group_start = env->vstart / egs;
62
--- a/target/riscv/kvm.c
121
+ uint32_t group_end = env->vl / egs;
63
+++ b/target/riscv/kvm.c
122
+ uint32_t esz = sizeof(uint32_t);
64
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
123
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
65
env->satp = 0;
124
+
125
+ for (uint32_t i = group_start; i < group_end; ++i) {
126
+ uint32_t vstart = i * egs;
127
+ uint32_t vend = (i + 1) * egs;
128
+ uint32_t rk[4] = {0};
129
+ uint32_t tmp[8] = {0};
130
+
131
+ for (uint32_t j = vstart; j < vend; ++j) {
132
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
133
+ }
134
+
135
+ for (uint32_t j = 0; j < egs; ++j) {
136
+ tmp[j] = rk[j];
137
+ }
138
+
139
+ for (uint32_t j = 0; j < egs; ++j) {
140
+ uint32_t b, s;
141
+ b = tmp[j + 1] ^ tmp[j + 2] ^ tmp[j + 3] ^ sm4_ck[rnd * 4 + j];
142
+
143
+ s = sm4_subword(b);
144
+
145
+ tmp[j + 4] = tmp[j] ^ (s ^ rol32(s, 13) ^ rol32(s, 23));
146
+ }
147
+
148
+ for (uint32_t j = vstart; j < vend; ++j) {
149
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
150
+ }
151
+ }
152
+
153
+ env->vstart = 0;
154
+ /* set tail elements to 1s */
155
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
156
+}
157
+
158
+static void do_sm4_round(uint32_t *rk, uint32_t *buf)
159
+{
160
+ const uint32_t egs = 4;
161
+ uint32_t s, b;
162
+
163
+ for (uint32_t j = egs; j < egs * 2; ++j) {
164
+ b = buf[j - 3] ^ buf[j - 2] ^ buf[j - 1] ^ rk[j - 4];
165
+
166
+ s = sm4_subword(b);
167
+
168
+ buf[j] = buf[j - 4] ^ (s ^ rol32(s, 2) ^ rol32(s, 10) ^ rol32(s, 18) ^
169
+ rol32(s, 24));
170
+ }
171
+}
172
+
173
+void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
174
+{
175
+ const uint32_t egs = 4;
176
+ uint32_t group_start = env->vstart / egs;
177
+ uint32_t group_end = env->vl / egs;
178
+ uint32_t esz = sizeof(uint32_t);
179
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
180
+
181
+ for (uint32_t i = group_start; i < group_end; ++i) {
182
+ uint32_t vstart = i * egs;
183
+ uint32_t vend = (i + 1) * egs;
184
+ uint32_t rk[4] = {0};
185
+ uint32_t tmp[8] = {0};
186
+
187
+ for (uint32_t j = vstart; j < vend; ++j) {
188
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
189
+ }
190
+
191
+ for (uint32_t j = vstart; j < vend; ++j) {
192
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
193
+ }
194
+
195
+ do_sm4_round(rk, tmp);
196
+
197
+ for (uint32_t j = vstart; j < vend; ++j) {
198
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
199
+ }
200
+ }
201
+
202
+ env->vstart = 0;
203
+ /* set tail elements to 1s */
204
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
205
+}
206
+
207
+void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
208
+{
209
+ const uint32_t egs = 4;
210
+ uint32_t group_start = env->vstart / egs;
211
+ uint32_t group_end = env->vl / egs;
212
+ uint32_t esz = sizeof(uint32_t);
213
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
214
+
215
+ for (uint32_t i = group_start; i < group_end; ++i) {
216
+ uint32_t vstart = i * egs;
217
+ uint32_t vend = (i + 1) * egs;
218
+ uint32_t rk[4] = {0};
219
+ uint32_t tmp[8] = {0};
220
+
221
+ for (uint32_t j = 0; j < egs; ++j) {
222
+ rk[j] = *((uint32_t *)vs2 + H4(j));
223
+ }
224
+
225
+ for (uint32_t j = vstart; j < vend; ++j) {
226
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
227
+ }
228
+
229
+ do_sm4_round(rk, tmp);
230
+
231
+ for (uint32_t j = vstart; j < vend; ++j) {
232
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
233
+ }
234
+ }
235
+
236
+ env->vstart = 0;
237
+ /* set tail elements to 1s */
238
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
239
+}
240
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
243
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
244
@@ -XXX,XX +XXX,XX @@ static bool vghsh_check(DisasContext *s, arg_rmrr *a)
66
}
245
}
67
246
68
+void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
247
GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
69
+{
248
+
70
+ int ret;
249
+/*
71
+ unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
250
+ * Zvksed
72
+
251
+ */
73
+ if (irq != IRQ_S_EXT) {
252
+
74
+ perror("kvm riscv set irq != IRQ_S_EXT\n");
253
+#define ZVKSED_EGS 4
75
+ abort();
254
+
76
+ }
255
+static bool zvksed_check(DisasContext *s)
77
+
256
+{
78
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
257
+ int egw_bytes = ZVKSED_EGS << s->sew;
79
+ if (ret < 0) {
258
+ return s->cfg_ptr->ext_zvksed == true &&
80
+ perror("Set irq failed");
259
+ require_rvv(s) &&
81
+ abort();
260
+ vext_check_isa_ill(s) &&
82
+ }
261
+ MAXSZ(s) >= egw_bytes &&
83
+}
262
+ s->sew == MO_32;
84
+
263
+}
85
bool kvm_arch_cpu_check_are_resettable(void)
264
+
86
{
265
+static bool vsm4k_vi_check(DisasContext *s, arg_rmrr *a)
87
return true;
266
+{
267
+ return zvksed_check(s) &&
268
+ require_align(a->rd, s->lmul) &&
269
+ require_align(a->rs2, s->lmul);
270
+}
271
+
272
+GEN_VI_UNMASKED_TRANS(vsm4k_vi, vsm4k_vi_check, ZVKSED_EGS)
273
+
274
+static bool vsm4r_vv_check(DisasContext *s, arg_rmr *a)
275
+{
276
+ return zvksed_check(s) &&
277
+ require_align(a->rd, s->lmul) &&
278
+ require_align(a->rs2, s->lmul);
279
+}
280
+
281
+GEN_V_UNMASKED_TRANS(vsm4r_vv, vsm4r_vv_check, ZVKSED_EGS)
282
+
283
+static bool vsm4r_vs_check(DisasContext *s, arg_rmr *a)
284
+{
285
+ return zvksed_check(s) &&
286
+ !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
287
+ require_align(a->rd, s->lmul);
288
+}
289
+
290
+GEN_V_UNMASKED_TRANS(vsm4r_vs, vsm4r_vs_check, ZVKSED_EGS)
88
--
291
--
89
2.31.1
292
2.41.0
90
91
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Rob Bradford <rbradford@rivosinc.com>
2
2
3
We need not specially process vtype when XLEN changes.
3
These are WARL fields - zero out the bits for unavailable counters and
4
special case the TM bit in mcountinhibit which is hardwired to zero.
5
This patch achieves this by modifying the value written so that any use
6
of the field will see the correctly masked bits.
4
7
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
8
Tested by modifying OpenSBI to write max value to these CSRs and upon
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
subsequent read the appropriate number of bits for number of PMUs is
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
enabled and the TM bit is zero in mcountinhibit.
8
Message-id: 20220120122050.41546-16-zhiwei_liu@c-sky.com
11
12
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20230802124906.24197-1-rbradford@rivosinc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
17
---
11
target/riscv/cpu.h | 1 +
18
target/riscv/csr.c | 11 +++++++++--
12
target/riscv/cpu_helper.c | 3 +--
19
1 file changed, 9 insertions(+), 2 deletions(-)
13
target/riscv/csr.c | 13 ++++++++++++-
14
target/riscv/machine.c | 5 +++--
15
target/riscv/vector_helper.c | 3 ++-
16
5 files changed, 19 insertions(+), 6 deletions(-)
17
20
18
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu.h
21
+++ b/target/riscv/cpu.h
22
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
23
target_ulong vl;
24
target_ulong vstart;
25
target_ulong vtype;
26
+ bool vill;
27
28
target_ulong pc;
29
target_ulong load_res;
30
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/riscv/cpu_helper.c
33
+++ b/target/riscv/cpu_helper.c
34
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
35
uint32_t maxsz = vlmax << sew;
36
bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
37
(maxsz >= 8);
38
- flags = FIELD_DP32(flags, TB_FLAGS, VILL,
39
- FIELD_EX64(env->vtype, VTYPE, VILL));
40
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
41
flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
42
flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
43
FIELD_EX64(env->vtype, VTYPE, VLMUL));
44
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
21
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
45
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
46
--- a/target/riscv/csr.c
23
--- a/target/riscv/csr.c
47
+++ b/target/riscv/csr.c
24
+++ b/target/riscv/csr.c
48
@@ -XXX,XX +XXX,XX @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno,
25
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
49
static RISCVException read_vtype(CPURISCVState *env, int csrno,
50
target_ulong *val)
51
{
26
{
52
- *val = env->vtype;
27
int cidx;
53
+ uint64_t vill;
28
PMUCTRState *counter;
54
+ switch (env->xl) {
29
+ RISCVCPU *cpu = env_archcpu(env);
55
+ case MXL_RV32:
30
56
+ vill = (uint32_t)env->vill << 31;
31
- env->mcountinhibit = val;
57
+ break;
32
+ /* WARL register - disable unavailable counters; TM bit is always 0 */
58
+ case MXL_RV64:
33
+ env->mcountinhibit =
59
+ vill = (uint64_t)env->vill << 63;
34
+ val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
60
+ break;
35
61
+ default:
36
/* Check if any other counter is also monitoring cycles/instructions */
62
+ g_assert_not_reached();
37
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
63
+ }
38
@@ -XXX,XX +XXX,XX @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
64
+ *val = (target_ulong)vill | env->vtype;
39
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
40
target_ulong val)
41
{
42
- env->mcounteren = val;
43
+ RISCVCPU *cpu = env_archcpu(env);
44
+
45
+ /* WARL register - disable unavailable counters */
46
+ env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
47
+ COUNTEREN_IR);
65
return RISCV_EXCP_NONE;
48
return RISCV_EXCP_NONE;
66
}
49
}
67
50
68
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/riscv/machine.c
71
+++ b/target/riscv/machine.c
72
@@ -XXX,XX +XXX,XX @@ static bool vector_needed(void *opaque)
73
74
static const VMStateDescription vmstate_vector = {
75
.name = "cpu/vector",
76
- .version_id = 1,
77
- .minimum_version_id = 1,
78
+ .version_id = 2,
79
+ .minimum_version_id = 2,
80
.needed = vector_needed,
81
.fields = (VMStateField[]) {
82
VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64),
83
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_vector = {
84
VMSTATE_UINTTL(env.vl, RISCVCPU),
85
VMSTATE_UINTTL(env.vstart, RISCVCPU),
86
VMSTATE_UINTTL(env.vtype, RISCVCPU),
87
+ VMSTATE_BOOL(env.vill, RISCVCPU),
88
VMSTATE_END_OF_LIST()
89
}
90
};
91
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/riscv/vector_helper.c
94
+++ b/target/riscv/vector_helper.c
95
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
96
|| (ediv != 0)
97
|| (reserved != 0)) {
98
/* only set vill bit. */
99
- env->vtype = FIELD_DP64(0, VTYPE, VILL, 1);
100
+ env->vill = 1;
101
+ env->vtype = 0;
102
env->vl = 0;
103
env->vstart = 0;
104
return 0;
105
--
51
--
106
2.31.1
52
2.41.0
107
108
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
Signed-off-by: Frank Chang <frank.chang@sifive.com>
3
RVA23 Profiles states:
4
The RVA23 profiles are intended to be used for 64-bit application
5
processors that will run rich OS stacks from standard binary OS
6
distributions and with a substantial number of third-party binary user
7
applications that will be supported over a considerable length of time
8
in the field.
9
10
The chapter 4 of the unprivileged spec introduces the Zihintntl extension
11
and Zihintntl is a mandatory extension presented in RVA23 Profiles, whose
12
purpose is to enable application and operating system portability across
13
different implementations. Thus the DTS should contain the Zihintntl ISA
14
string in order to pass to software.
15
16
The unprivileged spec states:
17
Like any HINTs, these instructions may be freely ignored. Hence, although
18
they are described in terms of cache-based memory hierarchies, they do not
19
mandate the provision of caches.
20
21
These instructions are encoded with non-used opcode, e.g. ADD x0, x0, x2,
22
which QEMU already supports, and QEMU does not emulate cache. Therefore
23
these instructions can be considered as a no-op, and we only need to add
24
a new property for the Zihintntl extension.
25
26
Reviewed-by: Frank Chang <frank.chang@sifive.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
27
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-id: 20220118014522.13613-12-frank.chang@sifive.com
28
Signed-off-by: Jason Chien <jason.chien@sifive.com>
29
Message-ID: <20230726074049.19505-2-jason.chien@sifive.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
31
---
8
target/riscv/cpu.h | 1 +
32
target/riscv/cpu_cfg.h | 1 +
9
target/riscv/cpu.c | 4 ++--
33
target/riscv/cpu.c | 2 ++
10
target/riscv/cpu_helper.c | 2 +-
34
2 files changed, 3 insertions(+)
11
target/riscv/csr.c | 2 +-
12
target/riscv/translate.c | 2 ++
13
5 files changed, 7 insertions(+), 4 deletions(-)
14
35
15
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
36
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
16
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.h
38
--- a/target/riscv/cpu_cfg.h
18
+++ b/target/riscv/cpu.h
39
+++ b/target/riscv/cpu_cfg.h
19
@@ -XXX,XX +XXX,XX @@ struct RISCVCPU {
40
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
20
bool ext_icsr;
41
bool ext_icbom;
21
bool ext_zfh;
42
bool ext_icboz;
22
bool ext_zfhmin;
43
bool ext_zicond;
23
+ bool ext_zve32f;
44
+ bool ext_zihintntl;
24
bool ext_zve64f;
45
bool ext_zihintpause;
25
46
bool ext_smstateen;
26
char *priv_spec;
47
bool ext_sstc;
27
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
48
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
28
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
29
--- a/target/riscv/cpu.c
50
--- a/target/riscv/cpu.c
30
+++ b/target/riscv/cpu.c
51
+++ b/target/riscv/cpu.c
31
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
52
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
32
}
53
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
33
set_vext_version(env, vext_version);
54
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
34
}
55
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
35
- if (cpu->cfg.ext_zve64f && !cpu->cfg.ext_f) {
56
+ ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
36
- error_setg(errp, "Zve64f extension depends upon RVF.");
57
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
37
+ if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) {
58
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
38
+ error_setg(errp, "Zve32f/Zve64f extension depends upon RVF.");
59
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
39
return;
60
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
40
}
61
DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
41
if (cpu->cfg.ext_j) {
62
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
42
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
63
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
43
index XXXXXXX..XXXXXXX 100644
64
+ DEFINE_PROP_BOOL("Zihintntl", RISCVCPU, cfg.ext_zihintntl, true),
44
--- a/target/riscv/cpu_helper.c
65
DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
45
+++ b/target/riscv/cpu_helper.c
66
DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
46
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
67
DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
47
*pc = env->pc;
48
*cs_base = 0;
49
50
- if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve64f) {
51
+ if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
52
/*
53
* If env->vl equals to VLMAX, we can use generic vector operation
54
* expanders (GVEC) to accerlate the vector operations.
55
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/csr.c
58
+++ b/target/riscv/csr.c
59
@@ -XXX,XX +XXX,XX @@ static RISCVException vs(CPURISCVState *env, int csrno)
60
RISCVCPU *cpu = RISCV_CPU(cs);
61
62
if (env->misa_ext & RVV ||
63
- cpu->cfg.ext_zve64f) {
64
+ cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
65
#if !defined(CONFIG_USER_ONLY)
66
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
67
return RISCV_EXCP_ILLEGAL_INST;
68
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/riscv/translate.c
71
+++ b/target/riscv/translate.c
72
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
73
bool ext_ifencei;
74
bool ext_zfh;
75
bool ext_zfhmin;
76
+ bool ext_zve32f;
77
bool ext_zve64f;
78
bool hlsx;
79
/* vector extension */
80
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
81
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
82
ctx->ext_zfh = cpu->cfg.ext_zfh;
83
ctx->ext_zfhmin = cpu->cfg.ext_zfhmin;
84
+ ctx->ext_zve32f = cpu->cfg.ext_zve32f;
85
ctx->ext_zve64f = cpu->cfg.ext_zve64f;
86
ctx->vlen = cpu->cfg.vlen;
87
ctx->elen = cpu->cfg.elen;
88
--
68
--
89
2.31.1
69
2.41.0
90
91
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
Define one common function to compute a canonical address from a register
3
Commit a47842d ("riscv: Add support for the Zfa extension") implemented the zfa extension.
4
plus offset. Merge gen_pm_adjust_address into this function.
4
However, it has some typos for fleq.d and fltq.d. Both of them misused the fltq.s
5
helper function.
5
6
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Fixes: a47842d ("riscv: Add support for the Zfa extension")
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Message-id: 20220120122050.41546-14-zhiwei_liu@c-sky.com
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Message-ID: <20230728003906.768-1-zhiwei_liu@linux.alibaba.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
13
---
12
target/riscv/translate.c | 25 ++++++++++++-------------
14
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 ++--
13
target/riscv/insn_trans/trans_rva.c.inc | 9 +++------
15
1 file changed, 2 insertions(+), 2 deletions(-)
14
target/riscv/insn_trans/trans_rvd.c.inc | 19 ++-----------------
15
target/riscv/insn_trans/trans_rvf.c.inc | 19 ++-----------------
16
target/riscv/insn_trans/trans_rvi.c.inc | 18 ++----------------
17
5 files changed, 21 insertions(+), 69 deletions(-)
18
16
19
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
17
diff --git a/target/riscv/insn_trans/trans_rvzfa.c.inc b/target/riscv/insn_trans/trans_rvzfa.c.inc
20
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/translate.c
19
--- a/target/riscv/insn_trans/trans_rvzfa.c.inc
22
+++ b/target/riscv/translate.c
20
+++ b/target/riscv/insn_trans/trans_rvzfa.c.inc
23
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
21
@@ -XXX,XX +XXX,XX @@ bool trans_fleq_d(DisasContext *ctx, arg_fleq_d *a)
24
ctx->base.is_jmp = DISAS_NORETURN;
22
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
25
}
23
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
26
24
27
-/*
25
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
28
- * Generates address adjustment for PointerMasking
26
+ gen_helper_fleq_d(dest, cpu_env, src1, src2);
29
- */
30
-static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src)
31
+/* Compute a canonical address from a register plus offset. */
32
+static TCGv get_address(DisasContext *ctx, int rs1, int imm)
33
{
34
- TCGv temp;
35
- if (!s->pm_enabled) {
36
- /* Load unmodified address */
37
- return src;
38
- } else {
39
- temp = temp_new(s);
40
- tcg_gen_andc_tl(temp, src, pm_mask);
41
- tcg_gen_or_tl(temp, temp, pm_base);
42
- return temp;
43
+ TCGv addr = temp_new(ctx);
44
+ TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
45
+
46
+ tcg_gen_addi_tl(addr, src1, imm);
47
+ if (ctx->pm_enabled) {
48
+ tcg_gen_and_tl(addr, addr, pm_mask);
49
+ tcg_gen_or_tl(addr, addr, pm_base);
50
+ } else if (get_xl(ctx) == MXL_RV32) {
51
+ tcg_gen_ext32u_tl(addr, addr);
52
}
53
+ return addr;
54
}
55
56
#ifndef CONFIG_USER_ONLY
57
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/riscv/insn_trans/trans_rva.c.inc
60
+++ b/target/riscv/insn_trans/trans_rva.c.inc
61
@@ -XXX,XX +XXX,XX @@
62
63
static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
64
{
65
- TCGv src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
66
+ TCGv src1 = get_address(ctx, a->rs1, 0);
67
68
if (a->rl) {
69
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
70
}
71
- src1 = gen_pm_adjust_address(ctx, src1);
72
tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop);
73
if (a->aq) {
74
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
75
@@ -XXX,XX +XXX,XX @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
76
TCGLabel *l1 = gen_new_label();
77
TCGLabel *l2 = gen_new_label();
78
79
- src1 = get_gpr(ctx, a->rs1, EXT_ZERO);
80
- src1 = gen_pm_adjust_address(ctx, src1);
81
+ src1 = get_address(ctx, a->rs1, 0);
82
tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1);
83
84
/*
85
@@ -XXX,XX +XXX,XX @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
86
MemOp mop)
87
{
88
TCGv dest = dest_gpr(ctx, a->rd);
89
- TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
90
+ TCGv src1 = get_address(ctx, a->rs1, 0);
91
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
92
93
- src1 = gen_pm_adjust_address(ctx, src1);
94
func(dest, src1, src2, ctx->mem_idx, mop);
95
96
gen_set_gpr(ctx, a->rd, dest);
27
gen_set_gpr(ctx, a->rd, dest);
97
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/riscv/insn_trans/trans_rvd.c.inc
100
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
101
@@ -XXX,XX +XXX,XX @@ static bool trans_fld(DisasContext *ctx, arg_fld *a)
102
REQUIRE_FPU;
103
REQUIRE_EXT(ctx, RVD);
104
105
- addr = get_gpr(ctx, a->rs1, EXT_NONE);
106
- if (a->imm) {
107
- TCGv temp = temp_new(ctx);
108
- tcg_gen_addi_tl(temp, addr, a->imm);
109
- addr = temp;
110
- }
111
- addr = gen_pm_adjust_address(ctx, addr);
112
-
113
+ addr = get_address(ctx, a->rs1, a->imm);
114
tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
115
116
mark_fs_dirty(ctx);
117
@@ -XXX,XX +XXX,XX @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
118
REQUIRE_FPU;
119
REQUIRE_EXT(ctx, RVD);
120
121
- addr = get_gpr(ctx, a->rs1, EXT_NONE);
122
- if (a->imm) {
123
- TCGv temp = temp_new(ctx);
124
- tcg_gen_addi_tl(temp, addr, a->imm);
125
- addr = temp;
126
- }
127
- addr = gen_pm_adjust_address(ctx, addr);
128
-
129
+ addr = get_address(ctx, a->rs1, a->imm);
130
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
131
-
132
return true;
28
return true;
133
}
29
}
134
30
@@ -XXX,XX +XXX,XX @@ bool trans_fltq_d(DisasContext *ctx, arg_fltq_d *a)
135
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
31
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
136
index XXXXXXX..XXXXXXX 100644
32
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
137
--- a/target/riscv/insn_trans/trans_rvf.c.inc
33
138
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
34
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
139
@@ -XXX,XX +XXX,XX @@ static bool trans_flw(DisasContext *ctx, arg_flw *a)
35
+ gen_helper_fltq_d(dest, cpu_env, src1, src2);
140
REQUIRE_FPU;
141
REQUIRE_EXT(ctx, RVF);
142
143
- addr = get_gpr(ctx, a->rs1, EXT_NONE);
144
- if (a->imm) {
145
- TCGv temp = temp_new(ctx);
146
- tcg_gen_addi_tl(temp, addr, a->imm);
147
- addr = temp;
148
- }
149
- addr = gen_pm_adjust_address(ctx, addr);
150
-
151
+ addr = get_address(ctx, a->rs1, a->imm);
152
dest = cpu_fpr[a->rd];
153
tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL);
154
gen_nanbox_s(dest, dest);
155
@@ -XXX,XX +XXX,XX @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
156
REQUIRE_FPU;
157
REQUIRE_EXT(ctx, RVF);
158
159
- addr = get_gpr(ctx, a->rs1, EXT_NONE);
160
- if (a->imm) {
161
- TCGv temp = tcg_temp_new();
162
- tcg_gen_addi_tl(temp, addr, a->imm);
163
- addr = temp;
164
- }
165
- addr = gen_pm_adjust_address(ctx, addr);
166
-
167
+ addr = get_address(ctx, a->rs1, a->imm);
168
tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL);
169
-
170
return true;
171
}
172
173
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/riscv/insn_trans/trans_rvi.c.inc
176
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
177
@@ -XXX,XX +XXX,XX @@ static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
178
static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
179
{
180
TCGv dest = dest_gpr(ctx, a->rd);
181
- TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
182
-
183
- if (a->imm) {
184
- TCGv temp = temp_new(ctx);
185
- tcg_gen_addi_tl(temp, addr, a->imm);
186
- addr = temp;
187
- }
188
- addr = gen_pm_adjust_address(ctx, addr);
189
+ TCGv addr = get_address(ctx, a->rs1, a->imm);
190
191
tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop);
192
gen_set_gpr(ctx, a->rd, dest);
36
gen_set_gpr(ctx, a->rd, dest);
193
@@ -XXX,XX +XXX,XX @@ static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
194
195
static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
196
{
197
- TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
198
+ TCGv addr = get_address(ctx, a->rs1, a->imm);
199
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
200
201
- if (a->imm) {
202
- TCGv temp = temp_new(ctx);
203
- tcg_gen_addi_tl(temp, addr, a->imm);
204
- addr = temp;
205
- }
206
- addr = gen_pm_adjust_address(ctx, addr);
207
-
208
tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
209
return true;
37
return true;
210
}
38
}
211
--
39
--
212
2.31.1
40
2.41.0
213
214
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
When sew <= 32bits, not need to extend scalar reg.
3
When writing the upper mtime, we should keep the original lower mtime
4
When sew > 32bits, if xlen is less that sew, we should sign extend
4
whose value is given by cpu_riscv_read_rtc() instead of
5
the scalar register, except explicitly specified by the spec.
5
cpu_riscv_read_rtc_raw(). The same logic applies to writes to lower mtime.
6
6
7
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20220120122050.41546-21-zhiwei_liu@c-sky.com
9
Message-ID: <20230728082502.26439-1-jason.chien@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
11
---
12
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
12
hw/intc/riscv_aclint.c | 5 +++--
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 3 insertions(+), 2 deletions(-)
14
14
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
--- a/hw/intc/riscv_aclint.c
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/hw/intc/riscv_aclint.c
19
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
20
dest = tcg_temp_new_ptr();
20
return;
21
mask = tcg_temp_new_ptr();
21
} else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) {
22
src2 = tcg_temp_new_ptr();
22
uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq);
23
- src1 = get_gpr(s, rs1, EXT_NONE);
23
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
24
+ src1 = get_gpr(s, rs1, EXT_SIGN);
24
25
25
if (addr == mtimer->time_base) {
26
data = FIELD_DP32(data, VDATA, VM, vm);
26
if (size == 4) {
27
data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
27
/* time_lo for RV32/RV64 */
28
- mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r;
29
+ mtimer->time_delta = ((rtc & ~0xFFFFFFFFULL) | value) - rtc_r;
30
} else {
31
/* time for RV64 */
32
mtimer->time_delta = value - rtc_r;
33
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
34
} else {
35
if (size == 4) {
36
/* time_hi for RV32/RV64 */
37
- mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r;
38
+ mtimer->time_delta = (value << 32 | (rtc & 0xFFFFFFFF)) - rtc_r;
39
} else {
40
qemu_log_mask(LOG_GUEST_ERROR,
41
"aclint-mtimer: invalid time_hi write: %08x",
28
--
42
--
29
2.31.1
43
2.41.0
30
31
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
Use cached cur_pmmask and cur_pmbase to infer the
3
The variables whose values are given by cpu_riscv_read_rtc() should be named
4
current PM mode.
4
"rtc". The variables whose value are given by cpu_riscv_read_rtc_raw()
5
should be named "rtc_r".
5
6
6
This may decrease the TCG IR by one when pm_enabled
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
is true and pm_base_enabled is false.
8
9
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-id: 20220120122050.41546-15-zhiwei_liu@c-sky.com
9
Message-ID: <20230728082502.26439-2-jason.chien@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
11
---
15
target/riscv/cpu.h | 3 ++-
12
hw/intc/riscv_aclint.c | 6 +++---
16
target/riscv/cpu_helper.c | 24 ++++++------------------
13
1 file changed, 3 insertions(+), 3 deletions(-)
17
target/riscv/translate.c | 12 ++++++++----
18
3 files changed, 16 insertions(+), 23 deletions(-)
19
14
20
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/cpu.h
17
--- a/hw/intc/riscv_aclint.c
23
+++ b/target/riscv/cpu.h
18
+++ b/hw/intc/riscv_aclint.c
24
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, MSTATUS_HS_VS, 18, 2)
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
25
/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
20
uint64_t next;
26
FIELD(TB_FLAGS, XL, 20, 2)
21
uint64_t diff;
27
/* If PointerMasking should be applied */
22
28
-FIELD(TB_FLAGS, PM_ENABLED, 22, 1)
23
- uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
29
+FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
24
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
30
+FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
25
31
26
/* Compute the relative hartid w.r.t the socket */
32
#ifdef TARGET_RISCV32
27
hartid = hartid - mtimer->hartid_base;
33
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
28
34
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
29
mtimer->timecmp[hartid] = value;
35
index XXXXXXX..XXXXXXX 100644
30
- if (mtimer->timecmp[hartid] <= rtc_r) {
36
--- a/target/riscv/cpu_helper.c
31
+ if (mtimer->timecmp[hartid] <= rtc) {
37
+++ b/target/riscv/cpu_helper.c
32
/*
38
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
33
* If we're setting an MTIMECMP value in the "past",
39
flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
34
* immediately raise the timer interrupt
40
get_field(env->mstatus_hs, MSTATUS_VS));
35
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
41
}
36
42
- if (riscv_has_ext(env, RVJ)) {
37
/* otherwise, set up the future timer interrupt */
43
- int priv = flags & TB_FLAGS_PRIV_MMU_MASK;
38
qemu_irq_lower(mtimer->timer_irqs[hartid]);
44
- bool pm_enabled = false;
39
- diff = mtimer->timecmp[hartid] - rtc_r;
45
- switch (priv) {
40
+ diff = mtimer->timecmp[hartid] - rtc;
46
- case PRV_U:
41
/* back to ns (note args switched in muldiv64) */
47
- pm_enabled = env->mmte & U_PM_ENABLE;
42
uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
48
- break;
49
- case PRV_S:
50
- pm_enabled = env->mmte & S_PM_ENABLE;
51
- break;
52
- case PRV_M:
53
- pm_enabled = env->mmte & M_PM_ENABLE;
54
- break;
55
- default:
56
- g_assert_not_reached();
57
- }
58
- flags = FIELD_DP32(flags, TB_FLAGS, PM_ENABLED, pm_enabled);
59
- }
60
#endif
61
62
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
63
+ if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
64
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
65
+ }
66
+ if (env->cur_pmbase != 0) {
67
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
68
+ }
69
70
*pflags = flags;
71
}
72
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/riscv/translate.c
75
+++ b/target/riscv/translate.c
76
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
77
/* Space for 3 operands plus 1 extra for address computation. */
78
TCGv temp[4];
79
/* PointerMasking extension */
80
- bool pm_enabled;
81
+ bool pm_mask_enabled;
82
+ bool pm_base_enabled;
83
} DisasContext;
84
85
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
86
@@ -XXX,XX +XXX,XX @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
87
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
88
89
tcg_gen_addi_tl(addr, src1, imm);
90
- if (ctx->pm_enabled) {
91
+ if (ctx->pm_mask_enabled) {
92
tcg_gen_and_tl(addr, addr, pm_mask);
93
- tcg_gen_or_tl(addr, addr, pm_base);
94
} else if (get_xl(ctx) == MXL_RV32) {
95
tcg_gen_ext32u_tl(addr, addr);
96
}
97
+ if (ctx->pm_base_enabled) {
98
+ tcg_gen_or_tl(addr, addr, pm_base);
99
+ }
100
return addr;
101
}
102
103
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
104
ctx->cs = cs;
105
ctx->ntemp = 0;
106
memset(ctx->temp, 0, sizeof(ctx->temp));
107
- ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED);
108
+ ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
109
+ ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
110
ctx->zero = tcg_constant_tl(0);
111
}
112
43
113
--
44
--
114
2.31.1
45
2.41.0
115
116
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
All Zve* extensions support the vector configuration instructions.
3
We should not use types dependend on host arch for target_ucontext.
4
This bug is found when run rv32 applications.
4
5
5
Signed-off-by: Frank Chang <frank.chang@sifive.com>
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220118014522.13613-13-frank.chang@sifive.com
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Message-ID: <20230811055438.1945-1-zhiwei_liu@linux.alibaba.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
12
---
10
target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
13
linux-user/riscv/signal.c | 4 ++--
11
1 file changed, 2 insertions(+), 2 deletions(-)
14
1 file changed, 2 insertions(+), 2 deletions(-)
12
15
13
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c
14
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
--- a/linux-user/riscv/signal.c
16
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/linux-user/riscv/signal.c
17
@@ -XXX,XX +XXX,XX @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
20
@@ -XXX,XX +XXX,XX @@ struct target_sigcontext {
18
TCGv s1, dst;
21
}; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
19
22
20
if (!require_rvv(s) ||
23
struct target_ucontext {
21
- !(has_ext(s, RVV) || s->ext_zve64f)) {
24
- unsigned long uc_flags;
22
+ !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) {
25
- struct target_ucontext *uc_link;
23
return false;
26
+ abi_ulong uc_flags;
24
}
27
+ abi_ptr uc_link;
25
28
target_stack_t uc_stack;
26
@@ -XXX,XX +XXX,XX @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
29
target_sigset_t uc_sigmask;
27
TCGv dst;
30
uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
28
29
if (!require_rvv(s) ||
30
- !(has_ext(s, RVV) || s->ext_zve64f)) {
31
+ !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) {
32
return false;
33
}
34
35
--
31
--
36
2.31.1
32
2.41.0
37
33
38
34
diff view generated by jsdifflib
1
From: Frank Chang <frank.chang@sifive.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Zve32f extension requires the scalar processor to implement the F
3
In this patch, we create the APLIC and IMSIC FDT helper functions and
4
extension and implement all vector floating-point instructions for
4
remove M mode AIA devices when using KVM acceleration.
5
floating-point operands with EEW=32 (i.e., no widening floating-point
6
operations).
7
5
8
Signed-off-by: Frank Chang <frank.chang@sifive.com>
6
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Jim Shu <jim.shu@sifive.com>
10
Message-id: 20220118014522.13613-14-frank.chang@sifive.com
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Message-ID: <20230727102439.22554-2-yongxuan.wang@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
12
---
13
target/riscv/insn_trans/trans_rvv.c.inc | 21 +++++++++++++++++++++
13
hw/riscv/virt.c | 290 +++++++++++++++++++++++-------------------------
14
1 file changed, 21 insertions(+)
14
1 file changed, 137 insertions(+), 153 deletions(-)
15
15
16
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
--- a/hw/riscv/virt.c
19
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/hw/riscv/virt.c
20
@@ -XXX,XX +XXX,XX @@ static bool require_scale_rvf(DisasContext *s)
20
@@ -XXX,XX +XXX,XX @@ static uint32_t imsic_num_bits(uint32_t count)
21
}
21
return ret;
22
}
22
}
23
23
24
+static bool require_zve32f(DisasContext *s)
24
-static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
25
- uint32_t *phandle, uint32_t *intc_phandles,
26
- uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
27
+static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
28
+ uint32_t *intc_phandles, uint32_t msi_phandle,
29
+ bool m_mode, uint32_t imsic_guest_bits)
30
{
31
int cpu, socket;
32
char *imsic_name;
33
MachineState *ms = MACHINE(s);
34
int socket_count = riscv_socket_count(ms);
35
- uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
36
+ uint32_t imsic_max_hart_per_socket;
37
uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
38
39
- *msi_m_phandle = (*phandle)++;
40
- *msi_s_phandle = (*phandle)++;
41
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
42
imsic_regs = g_new0(uint32_t, socket_count * 4);
43
44
- /* M-level IMSIC node */
45
for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
46
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
47
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
48
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
49
}
50
- imsic_max_hart_per_socket = 0;
51
- for (socket = 0; socket < socket_count; socket++) {
52
- imsic_addr = memmap[VIRT_IMSIC_M].base +
53
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
54
- imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
55
- imsic_regs[socket * 4 + 0] = 0;
56
- imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
57
- imsic_regs[socket * 4 + 2] = 0;
58
- imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
59
- if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
60
- imsic_max_hart_per_socket = s->soc[socket].num_harts;
61
- }
62
- }
63
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
64
- (unsigned long)memmap[VIRT_IMSIC_M].base);
65
- qemu_fdt_add_subnode(ms->fdt, imsic_name);
66
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
67
- "riscv,imsics");
68
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
69
- FDT_IMSIC_INT_CELLS);
70
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
71
- NULL, 0);
72
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
73
- NULL, 0);
74
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
75
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
76
- qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
77
- socket_count * sizeof(uint32_t) * 4);
78
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
79
- VIRT_IRQCHIP_NUM_MSIS);
80
- if (socket_count > 1) {
81
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
82
- imsic_num_bits(imsic_max_hart_per_socket));
83
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
84
- imsic_num_bits(socket_count));
85
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
86
- IMSIC_MMIO_GROUP_MIN_SHIFT);
87
- }
88
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_m_phandle);
89
-
90
- g_free(imsic_name);
91
92
- /* S-level IMSIC node */
93
- for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
94
- imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
95
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
96
- }
97
- imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
98
imsic_max_hart_per_socket = 0;
99
for (socket = 0; socket < socket_count; socket++) {
100
- imsic_addr = memmap[VIRT_IMSIC_S].base +
101
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
102
+ imsic_addr = base_addr + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
103
imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
104
s->soc[socket].num_harts;
105
imsic_regs[socket * 4 + 0] = 0;
106
@@ -XXX,XX +XXX,XX @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
107
imsic_max_hart_per_socket = s->soc[socket].num_harts;
108
}
109
}
110
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
111
- (unsigned long)memmap[VIRT_IMSIC_S].base);
112
+
113
+ imsic_name = g_strdup_printf("/soc/imsics@%lx", (unsigned long)base_addr);
114
qemu_fdt_add_subnode(ms->fdt, imsic_name);
115
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
116
- "riscv,imsics");
117
+ qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible", "riscv,imsics");
118
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
119
- FDT_IMSIC_INT_CELLS);
120
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
121
- NULL, 0);
122
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
123
- NULL, 0);
124
+ FDT_IMSIC_INT_CELLS);
125
+ qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
126
+ qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
127
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
128
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
129
+ imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
130
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
131
- socket_count * sizeof(uint32_t) * 4);
132
+ socket_count * sizeof(uint32_t) * 4);
133
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
134
- VIRT_IRQCHIP_NUM_MSIS);
135
+ VIRT_IRQCHIP_NUM_MSIS);
136
+
137
if (imsic_guest_bits) {
138
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,guest-index-bits",
139
- imsic_guest_bits);
140
+ imsic_guest_bits);
141
}
142
+
143
if (socket_count > 1) {
144
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
145
- imsic_num_bits(imsic_max_hart_per_socket));
146
+ imsic_num_bits(imsic_max_hart_per_socket));
147
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
148
- imsic_num_bits(socket_count));
149
+ imsic_num_bits(socket_count));
150
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
151
- IMSIC_MMIO_GROUP_MIN_SHIFT);
152
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
153
}
154
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_s_phandle);
155
- g_free(imsic_name);
156
+ qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
157
158
+ g_free(imsic_name);
159
g_free(imsic_regs);
160
g_free(imsic_cells);
161
}
162
163
-static void create_fdt_socket_aplic(RISCVVirtState *s,
164
- const MemMapEntry *memmap, int socket,
165
- uint32_t msi_m_phandle,
166
- uint32_t msi_s_phandle,
167
- uint32_t *phandle,
168
- uint32_t *intc_phandles,
169
- uint32_t *aplic_phandles)
170
+static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
171
+ uint32_t *phandle, uint32_t *intc_phandles,
172
+ uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
25
+{
173
+{
26
+ /* RVV + Zve32f = RVV. */
174
+ *msi_m_phandle = (*phandle)++;
27
+ if (has_ext(s, RVV)) {
175
+ *msi_s_phandle = (*phandle)++;
28
+ return true;
176
+
177
+ if (!kvm_enabled()) {
178
+ /* M-level IMSIC node */
179
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
180
+ *msi_m_phandle, true, 0);
29
+ }
181
+ }
30
+
182
+
31
+ /* Zve32f doesn't support FP64. (Section 18.2) */
183
+ /* S-level IMSIC node */
32
+ return s->ext_zve32f ? s->sew <= MO_32 : true;
184
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
185
+ *msi_s_phandle, false,
186
+ imsic_num_bits(s->aia_guests + 1));
187
+
33
+}
188
+}
34
+
189
+
35
static bool require_zve64f(DisasContext *s)
190
+static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
191
+ unsigned long aplic_addr, uint32_t aplic_size,
192
+ uint32_t msi_phandle,
193
+ uint32_t *intc_phandles,
194
+ uint32_t aplic_phandle,
195
+ uint32_t aplic_child_phandle,
196
+ bool m_mode)
36
{
197
{
37
/* RVV + Zve64f = RVV. */
198
int cpu;
38
@@ -XXX,XX +XXX,XX @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a)
199
char *aplic_name;
39
require_rvf(s) &&
200
uint32_t *aplic_cells;
40
vext_check_isa_ill(s) &&
201
- unsigned long aplic_addr;
41
vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
202
MachineState *ms = MACHINE(s);
42
+ require_zve32f(s) &&
203
- uint32_t aplic_m_phandle, aplic_s_phandle;
43
require_zve64f(s);
204
205
- aplic_m_phandle = (*phandle)++;
206
- aplic_s_phandle = (*phandle)++;
207
aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
208
209
- /* M-level APLIC node */
210
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
211
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
212
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
213
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
214
}
215
- aplic_addr = memmap[VIRT_APLIC_M].base +
216
- (memmap[VIRT_APLIC_M].size * socket);
217
+
218
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
219
qemu_fdt_add_subnode(ms->fdt, aplic_name);
220
qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
221
qemu_fdt_setprop_cell(ms->fdt, aplic_name,
222
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
223
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
224
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
225
+
226
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
227
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
228
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
229
+ aplic_cells,
230
+ s->soc[socket].num_harts * sizeof(uint32_t) * 2);
231
} else {
232
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
233
- msi_m_phandle);
234
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
235
}
236
+
237
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
238
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
239
+ 0x0, aplic_addr, 0x0, aplic_size);
240
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
241
- VIRT_IRQCHIP_NUM_SOURCES);
242
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
243
- aplic_s_phandle);
244
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
245
- aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
246
+ VIRT_IRQCHIP_NUM_SOURCES);
247
+
248
+ if (aplic_child_phandle) {
249
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
250
+ aplic_child_phandle);
251
+ qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
252
+ aplic_child_phandle, 0x1,
253
+ VIRT_IRQCHIP_NUM_SOURCES);
254
+ }
255
+
256
riscv_socket_fdt_write_id(ms, aplic_name, socket);
257
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_m_phandle);
258
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle);
259
+
260
g_free(aplic_name);
261
+ g_free(aplic_cells);
262
+}
263
264
- /* S-level APLIC node */
265
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
266
- aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
267
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
268
+static void create_fdt_socket_aplic(RISCVVirtState *s,
269
+ const MemMapEntry *memmap, int socket,
270
+ uint32_t msi_m_phandle,
271
+ uint32_t msi_s_phandle,
272
+ uint32_t *phandle,
273
+ uint32_t *intc_phandles,
274
+ uint32_t *aplic_phandles)
275
+{
276
+ char *aplic_name;
277
+ unsigned long aplic_addr;
278
+ MachineState *ms = MACHINE(s);
279
+ uint32_t aplic_m_phandle, aplic_s_phandle;
280
+
281
+ aplic_m_phandle = (*phandle)++;
282
+ aplic_s_phandle = (*phandle)++;
283
+
284
+ if (!kvm_enabled()) {
285
+ /* M-level APLIC node */
286
+ aplic_addr = memmap[VIRT_APLIC_M].base +
287
+ (memmap[VIRT_APLIC_M].size * socket);
288
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
289
+ msi_m_phandle, intc_phandles,
290
+ aplic_m_phandle, aplic_s_phandle,
291
+ true);
292
}
293
+
294
+ /* S-level APLIC node */
295
aplic_addr = memmap[VIRT_APLIC_S].base +
296
(memmap[VIRT_APLIC_S].size * socket);
297
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
298
+ msi_s_phandle, intc_phandles,
299
+ aplic_s_phandle, 0,
300
+ false);
301
+
302
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
303
- qemu_fdt_add_subnode(ms->fdt, aplic_name);
304
- qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
305
- qemu_fdt_setprop_cell(ms->fdt, aplic_name,
306
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
307
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
308
- if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
309
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
310
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
311
- } else {
312
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
313
- msi_s_phandle);
314
- }
315
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
316
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
317
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
318
- VIRT_IRQCHIP_NUM_SOURCES);
319
- riscv_socket_fdt_write_id(ms, aplic_name, socket);
320
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_s_phandle);
321
322
if (!socket) {
323
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
324
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
325
326
g_free(aplic_name);
327
328
- g_free(aplic_cells);
329
aplic_phandles[socket] = aplic_s_phandle;
44
}
330
}
45
331
46
@@ -XXX,XX +XXX,XX @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a)
332
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
47
require_rvf(s) &&
333
int i;
48
vext_check_isa_ill(s) &&
334
hwaddr addr;
49
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
335
uint32_t guest_bits;
50
+ require_zve32f(s) &&
336
- DeviceState *aplic_m;
51
require_zve64f(s);
337
- bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
338
+ DeviceState *aplic_s = NULL;
339
+ DeviceState *aplic_m = NULL;
340
+ bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
341
342
if (msimode) {
343
- /* Per-socket M-level IMSICs */
344
- addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
345
- for (i = 0; i < hart_count; i++) {
346
- riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
347
- base_hartid + i, true, 1,
348
- VIRT_IRQCHIP_NUM_MSIS);
349
+ if (!kvm_enabled()) {
350
+ /* Per-socket M-level IMSICs */
351
+ addr = memmap[VIRT_IMSIC_M].base +
352
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
353
+ for (i = 0; i < hart_count; i++) {
354
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
355
+ base_hartid + i, true, 1,
356
+ VIRT_IRQCHIP_NUM_MSIS);
357
+ }
358
}
359
360
/* Per-socket S-level IMSICs */
361
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
362
}
363
}
364
365
- /* Per-socket M-level APLIC */
366
- aplic_m = riscv_aplic_create(
367
- memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
368
- memmap[VIRT_APLIC_M].size,
369
- (msimode) ? 0 : base_hartid,
370
- (msimode) ? 0 : hart_count,
371
- VIRT_IRQCHIP_NUM_SOURCES,
372
- VIRT_IRQCHIP_NUM_PRIO_BITS,
373
- msimode, true, NULL);
374
-
375
- if (aplic_m) {
376
- /* Per-socket S-level APLIC */
377
- riscv_aplic_create(
378
- memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
379
- memmap[VIRT_APLIC_S].size,
380
- (msimode) ? 0 : base_hartid,
381
- (msimode) ? 0 : hart_count,
382
- VIRT_IRQCHIP_NUM_SOURCES,
383
- VIRT_IRQCHIP_NUM_PRIO_BITS,
384
- msimode, false, aplic_m);
385
+ if (!kvm_enabled()) {
386
+ /* Per-socket M-level APLIC */
387
+ aplic_m = riscv_aplic_create(memmap[VIRT_APLIC_M].base +
388
+ socket * memmap[VIRT_APLIC_M].size,
389
+ memmap[VIRT_APLIC_M].size,
390
+ (msimode) ? 0 : base_hartid,
391
+ (msimode) ? 0 : hart_count,
392
+ VIRT_IRQCHIP_NUM_SOURCES,
393
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
394
+ msimode, true, NULL);
395
}
396
397
- return aplic_m;
398
+ /* Per-socket S-level APLIC */
399
+ aplic_s = riscv_aplic_create(memmap[VIRT_APLIC_S].base +
400
+ socket * memmap[VIRT_APLIC_S].size,
401
+ memmap[VIRT_APLIC_S].size,
402
+ (msimode) ? 0 : base_hartid,
403
+ (msimode) ? 0 : hart_count,
404
+ VIRT_IRQCHIP_NUM_SOURCES,
405
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
406
+ msimode, false, aplic_m);
407
+
408
+ return kvm_enabled() ? aplic_s : aplic_m;
52
}
409
}
53
410
54
@@ -XXX,XX +XXX,XX @@ static bool opfv_check(DisasContext *s, arg_rmr *a)
411
static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
55
vext_check_isa_ill(s) &&
56
/* OPFV instructions ignore vs1 check */
57
vext_check_ss(s, a->rd, a->rs2, a->vm) &&
58
+ require_zve32f(s) &&
59
require_zve64f(s);
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
63
require_rvf(s) &&
64
vext_check_isa_ill(s) &&
65
vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
66
+ require_zve32f(s) &&
67
require_zve64f(s);
68
}
69
70
@@ -XXX,XX +XXX,XX @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
71
require_rvf(s) &&
72
vext_check_isa_ill(s) &&
73
vext_check_ms(s, a->rd, a->rs2) &&
74
+ require_zve32f(s) &&
75
require_zve64f(s);
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
79
require_rvf(s) &&
80
vext_check_isa_ill(s) &&
81
require_align(a->rd, s->lmul) &&
82
+ require_zve32f(s) &&
83
require_zve64f(s)) {
84
gen_set_rm(s, RISCV_FRM_DYN);
85
86
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
87
if (require_rvv(s) &&
88
require_rvf(s) &&
89
vext_check_isa_ill(s) &&
90
+ require_zve32f(s) &&
91
require_zve64f(s)) {
92
gen_set_rm(s, RISCV_FRM_DYN);
93
94
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
95
if (require_rvv(s) &&
96
require_rvf(s) &&
97
vext_check_isa_ill(s) &&
98
+ require_zve32f(s) &&
99
require_zve64f(s)) {
100
gen_set_rm(s, RISCV_FRM_DYN);
101
102
@@ -XXX,XX +XXX,XX @@ static bool fslideup_check(DisasContext *s, arg_rmrr *a)
103
{
104
return slideup_check(s, a) &&
105
require_rvf(s) &&
106
+ require_zve32f(s) &&
107
require_zve64f(s);
108
}
109
110
@@ -XXX,XX +XXX,XX @@ static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
111
{
112
return slidedown_check(s, a) &&
113
require_rvf(s) &&
114
+ require_zve32f(s) &&
115
require_zve64f(s);
116
}
117
118
--
412
--
119
2.31.1
413
2.41.0
120
121
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Get isa info from kvm while kvm init.
3
We check the in-kernel irqchip support when using KVM acceleration.
4
4
5
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
5
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
6
Signed-off-by: Mingwang Li <limingwang@huawei.com>
6
Reviewed-by: Jim Shu <jim.shu@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Message-id: 20220112081329.1835-4-jiangyifei@huawei.com
9
Message-ID: <20230727102439.22554-3-yongxuan.wang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
11
---
12
target/riscv/kvm.c | 34 +++++++++++++++++++++++++++++++++-
12
target/riscv/kvm.c | 10 +++++++++-
13
1 file changed, 33 insertions(+), 1 deletion(-)
13
1 file changed, 9 insertions(+), 1 deletion(-)
14
14
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/kvm.c
17
--- a/target/riscv/kvm.c
18
+++ b/target/riscv/kvm.c
18
+++ b/target/riscv/kvm.c
19
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init(MachineState *ms, KVMState *s)
20
#include "qemu/log.h"
20
21
#include "hw/loader.h"
21
int kvm_arch_irqchip_create(KVMState *s)
22
23
+static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
24
+ uint64_t idx)
25
+{
26
+ uint64_t id = KVM_REG_RISCV | type | idx;
27
+
28
+ switch (riscv_cpu_mxl(env)) {
29
+ case MXL_RV32:
30
+ id |= KVM_REG_SIZE_U32;
31
+ break;
32
+ case MXL_RV64:
33
+ id |= KVM_REG_SIZE_U64;
34
+ break;
35
+ default:
36
+ g_assert_not_reached();
37
+ }
38
+ return id;
39
+}
40
+
41
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
42
KVM_CAP_LAST_INFO
43
};
44
@@ -XXX,XX +XXX,XX @@ void kvm_arch_init_irq_routing(KVMState *s)
45
46
int kvm_arch_init_vcpu(CPUState *cs)
47
{
22
{
48
- return 0;
23
- return 0;
49
+ int ret = 0;
24
+ if (kvm_kernel_irqchip_split()) {
50
+ target_ulong isa;
25
+ error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
51
+ RISCVCPU *cpu = RISCV_CPU(cs);
26
+ exit(1);
52
+ CPURISCVState *env = &cpu->env;
27
+ }
53
+ uint64_t id;
54
+
28
+
55
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
29
+ /*
56
+ KVM_REG_RISCV_CONFIG_REG(isa));
30
+ * We can create the VAIA using the newer device control API.
57
+ ret = kvm_get_one_reg(cs, id, &isa);
31
+ */
58
+ if (ret) {
32
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
59
+ return ret;
60
+ }
61
+ env->misa_ext = isa;
62
+
63
+ return ret;
64
}
33
}
65
34
66
int kvm_arch_msi_data_to_gsi(uint32_t data)
35
int kvm_arch_process_async_events(CPUState *cs)
67
--
36
--
68
2.31.1
37
2.41.0
69
70
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Use char-fe to handle console sbi call, which implement early
3
We create a vAIA chip by using the KVM_DEV_TYPE_RISCV_AIA and then set up
4
console io while apply 'earlycon=sbi' into kernel parameters.
4
the chip with the KVM_DEV_RISCV_AIA_GRP_* APIs.
5
We also extend KVM accelerator to specify the KVM AIA mode. The "riscv-aia"
6
parameter is passed along with --accel in QEMU command-line.
7
1) "riscv-aia=emul": IMSIC is emulated by hypervisor
8
2) "riscv-aia=hwaccel": use hardware guest IMSIC
9
3) "riscv-aia=auto": use the hardware guest IMSICs whenever available
10
otherwise we fallback to software emulation.
5
11
6
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
12
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
7
Signed-off-by: Mingwang Li <limingwang@huawei.com>
13
Reviewed-by: Jim Shu <jim.shu@sifive.com>
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
15
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Message-id: 20220112081329.1835-9-jiangyifei@huawei.com
16
Message-ID: <20230727102439.22554-4-yongxuan.wang@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
18
---
13
target/riscv/sbi_ecall_interface.h | 72 ++++++++++++++++++++++++++++++
19
target/riscv/kvm_riscv.h | 4 +
14
target/riscv/kvm.c | 42 ++++++++++++++++-
20
target/riscv/kvm.c | 186 +++++++++++++++++++++++++++++++++++++++
15
2 files changed, 113 insertions(+), 1 deletion(-)
21
2 files changed, 190 insertions(+)
16
create mode 100644 target/riscv/sbi_ecall_interface.h
17
22
18
diff --git a/target/riscv/sbi_ecall_interface.h b/target/riscv/sbi_ecall_interface.h
23
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
19
new file mode 100644
24
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX
25
--- a/target/riscv/kvm_riscv.h
21
--- /dev/null
26
+++ b/target/riscv/kvm_riscv.h
22
+++ b/target/riscv/sbi_ecall_interface.h
23
@@ -XXX,XX +XXX,XX @@
27
@@ -XXX,XX +XXX,XX @@
24
+/*
28
void kvm_riscv_init_user_properties(Object *cpu_obj);
25
+ * SPDX-License-Identifier: BSD-2-Clause
29
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
26
+ *
30
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
27
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
31
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
28
+ *
32
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
29
+ * Authors:
33
+ uint64_t aplic_base, uint64_t imsic_base,
30
+ * Anup Patel <anup.patel@wdc.com>
34
+ uint64_t guest_num);
31
+ */
35
32
+
36
#endif
33
+#ifndef __SBI_ECALL_INTERFACE_H__
34
+#define __SBI_ECALL_INTERFACE_H__
35
+
36
+/* clang-format off */
37
+
38
+/* SBI Extension IDs */
39
+#define SBI_EXT_0_1_SET_TIMER 0x0
40
+#define SBI_EXT_0_1_CONSOLE_PUTCHAR 0x1
41
+#define SBI_EXT_0_1_CONSOLE_GETCHAR 0x2
42
+#define SBI_EXT_0_1_CLEAR_IPI 0x3
43
+#define SBI_EXT_0_1_SEND_IPI 0x4
44
+#define SBI_EXT_0_1_REMOTE_FENCE_I 0x5
45
+#define SBI_EXT_0_1_REMOTE_SFENCE_VMA 0x6
46
+#define SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID 0x7
47
+#define SBI_EXT_0_1_SHUTDOWN 0x8
48
+#define SBI_EXT_BASE 0x10
49
+#define SBI_EXT_TIME 0x54494D45
50
+#define SBI_EXT_IPI 0x735049
51
+#define SBI_EXT_RFENCE 0x52464E43
52
+#define SBI_EXT_HSM 0x48534D
53
+
54
+/* SBI function IDs for BASE extension*/
55
+#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0
56
+#define SBI_EXT_BASE_GET_IMP_ID 0x1
57
+#define SBI_EXT_BASE_GET_IMP_VERSION 0x2
58
+#define SBI_EXT_BASE_PROBE_EXT 0x3
59
+#define SBI_EXT_BASE_GET_MVENDORID 0x4
60
+#define SBI_EXT_BASE_GET_MARCHID 0x5
61
+#define SBI_EXT_BASE_GET_MIMPID 0x6
62
+
63
+/* SBI function IDs for TIME extension*/
64
+#define SBI_EXT_TIME_SET_TIMER 0x0
65
+
66
+/* SBI function IDs for IPI extension*/
67
+#define SBI_EXT_IPI_SEND_IPI 0x0
68
+
69
+/* SBI function IDs for RFENCE extension*/
70
+#define SBI_EXT_RFENCE_REMOTE_FENCE_I 0x0
71
+#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA 0x1
72
+#define SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID 0x2
73
+#define SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA 0x3
74
+#define SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID 0x4
75
+#define SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA 0x5
76
+#define SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID 0x6
77
+
78
+/* SBI function IDs for HSM extension */
79
+#define SBI_EXT_HSM_HART_START 0x0
80
+#define SBI_EXT_HSM_HART_STOP 0x1
81
+#define SBI_EXT_HSM_HART_GET_STATUS 0x2
82
+
83
+#define SBI_HSM_HART_STATUS_STARTED 0x0
84
+#define SBI_HSM_HART_STATUS_STOPPED 0x1
85
+#define SBI_HSM_HART_STATUS_START_PENDING 0x2
86
+#define SBI_HSM_HART_STATUS_STOP_PENDING 0x3
87
+
88
+#define SBI_SPEC_VERSION_MAJOR_OFFSET 24
89
+#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
90
+#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
91
+#define SBI_EXT_VENDOR_START 0x09000000
92
+#define SBI_EXT_VENDOR_END 0x09FFFFFF
93
+/* clang-format on */
94
+
95
+#endif
96
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
37
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
97
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
98
--- a/target/riscv/kvm.c
39
--- a/target/riscv/kvm.c
99
+++ b/target/riscv/kvm.c
40
+++ b/target/riscv/kvm.c
100
@@ -XXX,XX +XXX,XX @@
41
@@ -XXX,XX +XXX,XX @@
42
#include "exec/address-spaces.h"
43
#include "hw/boards.h"
44
#include "hw/irq.h"
45
+#include "hw/intc/riscv_imsic.h"
101
#include "qemu/log.h"
46
#include "qemu/log.h"
102
#include "hw/loader.h"
47
#include "hw/loader.h"
103
#include "kvm_riscv.h"
48
#include "kvm_riscv.h"
104
+#include "sbi_ecall_interface.h"
49
@@ -XXX,XX +XXX,XX @@
105
+#include "chardev/char-fe.h"
50
#include "chardev/char-fe.h"
51
#include "migration/migration.h"
52
#include "sysemu/runstate.h"
53
+#include "hw/riscv/numa.h"
106
54
107
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
55
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
108
uint64_t idx)
56
uint64_t idx)
109
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_stop_on_emulation_error(CPUState *cs)
57
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_cpu_check_are_resettable(void)
110
return true;
58
return true;
111
}
59
}
112
60
113
+static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
61
+static int aia_mode;
114
+{
62
+
115
+ int ret = 0;
63
+static const char *kvm_aia_mode_str(uint64_t mode)
116
+ unsigned char ch;
64
+{
117
+ switch (run->riscv_sbi.extension_id) {
65
+ switch (mode) {
118
+ case SBI_EXT_0_1_CONSOLE_PUTCHAR:
66
+ case KVM_DEV_RISCV_AIA_MODE_EMUL:
119
+ ch = run->riscv_sbi.args[0];
67
+ return "emul";
120
+ qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
68
+ case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
121
+ break;
69
+ return "hwaccel";
122
+ case SBI_EXT_0_1_CONSOLE_GETCHAR:
70
+ case KVM_DEV_RISCV_AIA_MODE_AUTO:
123
+ ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
71
+ default:
124
+ if (ret == sizeof(ch)) {
72
+ return "auto";
125
+ run->riscv_sbi.args[0] = ch;
73
+ };
126
+ } else {
74
+}
127
+ run->riscv_sbi.args[0] = -1;
75
+
76
+static char *riscv_get_kvm_aia(Object *obj, Error **errp)
77
+{
78
+ return g_strdup(kvm_aia_mode_str(aia_mode));
79
+}
80
+
81
+static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
82
+{
83
+ if (!strcmp(val, "emul")) {
84
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
85
+ } else if (!strcmp(val, "hwaccel")) {
86
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
87
+ } else if (!strcmp(val, "auto")) {
88
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
89
+ } else {
90
+ error_setg(errp, "Invalid KVM AIA mode");
91
+ error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
92
+ }
93
+}
94
+
95
void kvm_arch_accel_class_init(ObjectClass *oc)
96
{
97
+ object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
98
+ riscv_set_kvm_aia);
99
+ object_class_property_set_description(oc, "riscv-aia",
100
+ "Set KVM AIA mode. Valid values are "
101
+ "emul, hwaccel, and auto. Default "
102
+ "is auto.");
103
+ object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
104
+ "auto");
105
+}
106
+
107
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
108
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
109
+ uint64_t aplic_base, uint64_t imsic_base,
110
+ uint64_t guest_num)
111
+{
112
+ int ret, i;
113
+ int aia_fd = -1;
114
+ uint64_t default_aia_mode;
115
+ uint64_t socket_count = riscv_socket_count(machine);
116
+ uint64_t max_hart_per_socket = 0;
117
+ uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
118
+ uint64_t socket_bits, hart_bits, guest_bits;
119
+
120
+ aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
121
+
122
+ if (aia_fd < 0) {
123
+ error_report("Unable to create in-kernel irqchip");
124
+ exit(1);
125
+ }
126
+
127
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
128
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
129
+ &default_aia_mode, false, NULL);
130
+ if (ret < 0) {
131
+ error_report("KVM AIA: failed to get current KVM AIA mode");
132
+ exit(1);
133
+ }
134
+ qemu_log("KVM AIA: default mode is %s\n",
135
+ kvm_aia_mode_str(default_aia_mode));
136
+
137
+ if (default_aia_mode != aia_mode) {
138
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
139
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
140
+ &aia_mode, true, NULL);
141
+ if (ret < 0)
142
+ warn_report("KVM AIA: failed to set KVM AIA mode");
143
+ else
144
+ qemu_log("KVM AIA: set current mode to %s\n",
145
+ kvm_aia_mode_str(aia_mode));
146
+ }
147
+
148
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
149
+ KVM_DEV_RISCV_AIA_CONFIG_SRCS,
150
+ &aia_irq_num, true, NULL);
151
+ if (ret < 0) {
152
+ error_report("KVM AIA: failed to set number of input irq lines");
153
+ exit(1);
154
+ }
155
+
156
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
157
+ KVM_DEV_RISCV_AIA_CONFIG_IDS,
158
+ &aia_msi_num, true, NULL);
159
+ if (ret < 0) {
160
+ error_report("KVM AIA: failed to set number of msi");
161
+ exit(1);
162
+ }
163
+
164
+ socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
165
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
166
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
167
+ &socket_bits, true, NULL);
168
+ if (ret < 0) {
169
+ error_report("KVM AIA: failed to set group_bits");
170
+ exit(1);
171
+ }
172
+
173
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
174
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
175
+ &group_shift, true, NULL);
176
+ if (ret < 0) {
177
+ error_report("KVM AIA: failed to set group_shift");
178
+ exit(1);
179
+ }
180
+
181
+ guest_bits = guest_num == 0 ? 0 :
182
+ find_last_bit(&guest_num, BITS_PER_LONG) + 1;
183
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
184
+ KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
185
+ &guest_bits, true, NULL);
186
+ if (ret < 0) {
187
+ error_report("KVM AIA: failed to set guest_bits");
188
+ exit(1);
189
+ }
190
+
191
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
192
+ KVM_DEV_RISCV_AIA_ADDR_APLIC,
193
+ &aplic_base, true, NULL);
194
+ if (ret < 0) {
195
+ error_report("KVM AIA: failed to set the base address of APLIC");
196
+ exit(1);
197
+ }
198
+
199
+ for (socket = 0; socket < socket_count; socket++) {
200
+ socket_imsic_base = imsic_base + socket * (1U << group_shift);
201
+ hart_count = riscv_socket_hart_count(machine, socket);
202
+ base_hart = riscv_socket_first_hartid(machine, socket);
203
+
204
+ if (max_hart_per_socket < hart_count) {
205
+ max_hart_per_socket = hart_count;
128
+ }
206
+ }
129
+ break;
207
+
130
+ default:
208
+ for (i = 0; i < hart_count; i++) {
131
+ qemu_log_mask(LOG_UNIMP,
209
+ imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
132
+ "%s: un-handled SBI EXIT, specific reasons is %lu\n",
210
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
133
+ __func__, run->riscv_sbi.extension_id);
211
+ KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
134
+ ret = -1;
212
+ &imsic_addr, true, NULL);
135
+ break;
213
+ if (ret < 0) {
136
+ }
214
+ error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
137
+ return ret;
215
+ exit(1);
138
+}
216
+ }
139
+
217
+ }
140
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
218
+ }
141
{
219
+
142
- return 0;
220
+ hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
143
+ int ret = 0;
221
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
144
+ switch (run->exit_reason) {
222
+ KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
145
+ case KVM_EXIT_RISCV_SBI:
223
+ &hart_bits, true, NULL);
146
+ ret = kvm_riscv_handle_sbi(cs, run);
224
+ if (ret < 0) {
147
+ break;
225
+ error_report("KVM AIA: failed to set hart_bits");
148
+ default:
226
+ exit(1);
149
+ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
227
+ }
150
+ __func__, run->exit_reason);
228
+
151
+ ret = -1;
229
+ if (kvm_has_gsi_routing()) {
152
+ break;
230
+ for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
153
+ }
231
+ /* KVM AIA only has one APLIC instance */
154
+ return ret;
232
+ kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
233
+ }
234
+ kvm_gsi_routing_allowed = true;
235
+ kvm_irqchip_commit_routes(kvm_state);
236
+ }
237
+
238
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
239
+ KVM_DEV_RISCV_AIA_CTRL_INIT,
240
+ NULL, true, NULL);
241
+ if (ret < 0) {
242
+ error_report("KVM AIA: initialized fail");
243
+ exit(1);
244
+ }
245
+
246
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
155
}
247
}
156
157
void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
158
--
248
--
159
2.31.1
249
2.41.0
160
161
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Put GPR CSR and FP registers to kvm by KVM_SET_ONE_REG ioctl
3
KVM AIA can't emulate APLIC only. When "aia=aplic" parameter is passed,
4
APLIC devices is emulated by QEMU. For "aia=aplic-imsic", remove the
5
mmio operations of APLIC when using KVM AIA and send wired interrupt
6
signal via KVM_IRQ_LINE API.
7
After KVM AIA enabled, MSI messages are delivered by KVM_SIGNAL_MSI API
8
when the IMSICs receive mmio write requests.
4
9
5
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
10
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
6
Signed-off-by: Mingwang Li <limingwang@huawei.com>
11
Reviewed-by: Jim Shu <jim.shu@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
13
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Message-id: 20220112081329.1835-6-jiangyifei@huawei.com
14
Message-ID: <20230727102439.22554-5-yongxuan.wang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
16
---
12
target/riscv/kvm.c | 104 ++++++++++++++++++++++++++++++++++++++++++++-
17
hw/intc/riscv_aplic.c | 56 ++++++++++++++++++++++++++++++-------------
13
1 file changed, 103 insertions(+), 1 deletion(-)
18
hw/intc/riscv_imsic.c | 25 +++++++++++++++----
19
2 files changed, 61 insertions(+), 20 deletions(-)
14
20
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
21
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/kvm.c
23
--- a/hw/intc/riscv_aplic.c
18
+++ b/target/riscv/kvm.c
24
+++ b/hw/intc/riscv_aplic.c
19
@@ -XXX,XX +XXX,XX @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
25
@@ -XXX,XX +XXX,XX @@
20
} \
26
#include "hw/irq.h"
21
} while (0)
27
#include "target/riscv/cpu.h"
22
28
#include "sysemu/sysemu.h"
23
+#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
29
+#include "sysemu/kvm.h"
24
+ do { \
30
#include "migration/vmstate.h"
25
+ int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
31
26
+ if (ret) { \
32
#define APLIC_MAX_IDC (1UL << 14)
27
+ return ret; \
33
@@ -XXX,XX +XXX,XX @@
28
+ } \
34
29
+ } while (0)
35
#define APLIC_IDC_CLAIMI 0x1c
30
+
36
31
static int kvm_riscv_get_regs_core(CPUState *cs)
37
+/*
32
{
38
+ * KVM AIA only supports APLIC MSI, fallback to QEMU emulation if we want to use
33
int ret = 0;
39
+ * APLIC Wired.
34
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_get_regs_core(CPUState *cs)
40
+ */
35
return ret;
41
+static bool is_kvm_aia(bool msimode)
36
}
37
38
+static int kvm_riscv_put_regs_core(CPUState *cs)
39
+{
42
+{
40
+ int ret = 0;
43
+ return kvm_irqchip_in_kernel() && msimode;
41
+ int i;
42
+ target_ulong reg;
43
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
44
+
45
+ reg = env->pc;
46
+ ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
47
+ if (ret) {
48
+ return ret;
49
+ }
50
+
51
+ for (i = 1; i < 32; i++) {
52
+ uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
53
+ reg = env->gpr[i];
54
+ ret = kvm_set_one_reg(cs, id, &reg);
55
+ if (ret) {
56
+ return ret;
57
+ }
58
+ }
59
+
60
+ return ret;
61
+}
44
+}
62
+
45
+
63
static int kvm_riscv_get_regs_csr(CPUState *cs)
46
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
47
uint32_t word)
64
{
48
{
65
int ret = 0;
49
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
66
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
50
return topi;
67
return ret;
68
}
51
}
69
52
70
+static int kvm_riscv_put_regs_csr(CPUState *cs)
53
+static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
71
+{
54
+{
72
+ int ret = 0;
55
+ kvm_set_irq(kvm_state, irq, !!level);
73
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
74
+
75
+ KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
76
+ KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
77
+ KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
78
+ KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
79
+ KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
80
+ KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
81
+ KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
82
+ KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
83
+ KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
84
+
85
+ return ret;
86
+}
56
+}
87
+
57
+
88
static int kvm_riscv_get_regs_fp(CPUState *cs)
58
static void riscv_aplic_request(void *opaque, int irq, int level)
89
{
59
{
90
int ret = 0;
60
bool update = false;
91
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_get_regs_fp(CPUState *cs)
61
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
92
return ret;
62
uint32_t i;
93
}
63
RISCVAPLICState *aplic = RISCV_APLIC(dev);
94
64
95
+static int kvm_riscv_put_regs_fp(CPUState *cs)
65
- aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
96
+{
66
- aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
97
+ int ret = 0;
67
- aplic->state = g_new0(uint32_t, aplic->num_irqs);
98
+ int i;
68
- aplic->target = g_new0(uint32_t, aplic->num_irqs);
99
+ CPURISCVState *env = &RISCV_CPU(cs)->env;
69
- if (!aplic->msimode) {
70
- for (i = 0; i < aplic->num_irqs; i++) {
71
- aplic->target[i] = 1;
72
+ if (!is_kvm_aia(aplic->msimode)) {
73
+ aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
74
+ aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
75
+ aplic->state = g_new0(uint32_t, aplic->num_irqs);
76
+ aplic->target = g_new0(uint32_t, aplic->num_irqs);
77
+ if (!aplic->msimode) {
78
+ for (i = 0; i < aplic->num_irqs; i++) {
79
+ aplic->target[i] = 1;
80
+ }
81
}
82
- }
83
- aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
84
- aplic->iforce = g_new0(uint32_t, aplic->num_harts);
85
- aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
86
+ aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
87
+ aplic->iforce = g_new0(uint32_t, aplic->num_harts);
88
+ aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
89
90
- memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic,
91
- TYPE_RISCV_APLIC, aplic->aperture_size);
92
- sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
93
+ memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops,
94
+ aplic, TYPE_RISCV_APLIC, aplic->aperture_size);
95
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
96
+ }
97
98
/*
99
* Only root APLICs have hardware IRQ lines. All non-root APLICs
100
* have IRQ lines delegated by their parent APLIC.
101
*/
102
if (!aplic->parent) {
103
- qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
104
+ if (is_kvm_aia(aplic->msimode)) {
105
+ qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
106
+ } else {
107
+ qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
108
+ }
109
}
110
111
/* Create output IRQ lines for non-MSI mode */
112
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
113
qdev_prop_set_bit(dev, "mmode", mmode);
114
115
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
116
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
100
+
117
+
101
+ if (riscv_has_ext(env, RVD)) {
118
+ if (!is_kvm_aia(msimode)) {
102
+ uint64_t reg;
119
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
103
+ for (i = 0; i < 32; i++) {
104
+ reg = env->fpr[i];
105
+ ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
106
+ if (ret) {
107
+ return ret;
108
+ }
109
+ }
110
+ return ret;
111
+ }
120
+ }
121
122
if (parent) {
123
riscv_aplic_add_child(parent, dev);
124
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/hw/intc/riscv_imsic.c
127
+++ b/hw/intc/riscv_imsic.c
128
@@ -XXX,XX +XXX,XX @@
129
#include "target/riscv/cpu.h"
130
#include "target/riscv/cpu_bits.h"
131
#include "sysemu/sysemu.h"
132
+#include "sysemu/kvm.h"
133
#include "migration/vmstate.h"
134
135
#define IMSIC_MMIO_PAGE_LE 0x00
136
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
137
goto err;
138
}
139
140
+#if defined(CONFIG_KVM)
141
+ if (kvm_irqchip_in_kernel()) {
142
+ struct kvm_msi msi;
112
+
143
+
113
+ if (riscv_has_ext(env, RVF)) {
144
+ msi.address_lo = extract64(imsic->mmio.addr + addr, 0, 32);
114
+ uint32_t reg;
145
+ msi.address_hi = extract64(imsic->mmio.addr + addr, 32, 32);
115
+ for (i = 0; i < 32; i++) {
146
+ msi.data = le32_to_cpu(value);
116
+ reg = env->fpr[i];
147
+
117
+ ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
148
+ kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
118
+ if (ret) {
149
+
119
+ return ret;
150
+ return;
120
+ }
121
+ }
122
+ return ret;
123
+ }
151
+ }
152
+#endif
124
+
153
+
125
+ return ret;
154
/* Writes only supported for MSI little-endian registers */
126
+}
155
page = addr >> IMSIC_MMIO_PAGE_SHIFT;
127
+
156
if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
128
+
157
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
129
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
158
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
130
KVM_CAP_LAST_INFO
159
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
131
};
160
132
@@ -XXX,XX +XXX,XX @@ int kvm_arch_get_registers(CPUState *cs)
161
- imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
133
162
- imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
134
int kvm_arch_put_registers(CPUState *cs, int level)
163
- imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
135
{
164
- imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
136
- return 0;
165
+ if (!kvm_irqchip_in_kernel()) {
137
+ int ret = 0;
166
+ imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
138
+
167
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
139
+ ret = kvm_riscv_put_regs_core(cs);
168
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
140
+ if (ret) {
169
+ imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
141
+ return ret;
142
+ }
170
+ }
143
+
171
144
+ ret = kvm_riscv_put_regs_csr(cs);
172
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
145
+ if (ret) {
173
imsic, TYPE_RISCV_IMSIC,
146
+ return ret;
147
+ }
148
+
149
+ ret = kvm_riscv_put_regs_fp(cs);
150
+ if (ret) {
151
+ return ret;
152
+ }
153
+
154
+ return ret;
155
}
156
157
int kvm_arch_release_virq_post(int virq)
158
--
174
--
159
2.31.1
175
2.41.0
160
161
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
The mask comes from the pointer masking extension, or the max value
3
Select KVM AIA when the host kernel has in-kernel AIA chip support.
4
corresponding to XLEN bits.
4
Since KVM AIA only has one APLIC instance, we map the QEMU APLIC
5
devices to KVM APLIC.
5
6
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Jim Shu <jim.shu@sifive.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Message-id: 20220120122050.41546-20-zhiwei_liu@c-sky.com
10
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Message-ID: <20230727102439.22554-6-yongxuan.wang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
13
---
12
target/riscv/vector_helper.c | 25 +++++++++++++++----------
14
hw/riscv/virt.c | 94 +++++++++++++++++++++++++++++++++----------------
13
1 file changed, 15 insertions(+), 10 deletions(-)
15
1 file changed, 63 insertions(+), 31 deletions(-)
14
16
15
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
17
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/vector_helper.c
19
--- a/hw/riscv/virt.c
18
+++ b/target/riscv/vector_helper.c
20
+++ b/hw/riscv/virt.c
19
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t esz)
21
@@ -XXX,XX +XXX,XX @@
20
return scale < 0 ? vlenb >> -scale : vlenb << scale;
22
#include "hw/riscv/virt.h"
21
}
23
#include "hw/riscv/boot.h"
22
24
#include "hw/riscv/numa.h"
23
+static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
25
+#include "kvm_riscv.h"
26
#include "hw/intc/riscv_aclint.h"
27
#include "hw/intc/riscv_aplic.h"
28
#include "hw/intc/riscv_imsic.h"
29
@@ -XXX,XX +XXX,XX @@
30
#error "Can't accommodate all IMSIC groups in address space"
31
#endif
32
33
+/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
34
+static bool virt_use_kvm_aia(RISCVVirtState *s)
24
+{
35
+{
25
+ return (addr & env->cur_pmmask) | env->cur_pmbase;
36
+ return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
26
+}
37
+}
27
+
38
+
28
/*
39
static const MemMapEntry virt_memmap[] = {
29
* This function checks watchpoint before real load operation.
40
[VIRT_DEBUG] = { 0x0, 0x100 },
30
*
41
[VIRT_MROM] = { 0x1000, 0xf000 },
31
@@ -XXX,XX +XXX,XX @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
42
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
32
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
43
uint32_t *intc_phandles,
33
target_ulong curlen = MIN(pagelen, len);
44
uint32_t aplic_phandle,
34
45
uint32_t aplic_child_phandle,
35
- probe_access(env, addr, curlen, access_type,
46
- bool m_mode)
36
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
47
+ bool m_mode, int num_harts)
37
cpu_mmu_index(env, false), ra);
48
{
38
if (len > curlen) {
49
int cpu;
39
addr += curlen;
50
char *aplic_name;
40
curlen = len - curlen;
51
uint32_t *aplic_cells;
41
- probe_access(env, addr, curlen, access_type,
52
MachineState *ms = MACHINE(s);
42
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
53
43
cpu_mmu_index(env, false), ra);
54
- aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
55
+ aplic_cells = g_new0(uint32_t, num_harts * 2);
56
57
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
58
+ for (cpu = 0; cpu < num_harts; cpu++) {
59
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
60
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
44
}
61
}
45
}
62
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
46
@@ -XXX,XX +XXX,XX @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
63
47
k = 0;
64
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
48
while (k < nf) {
65
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
49
target_ulong addr = base + stride * i + (k << esz);
66
- aplic_cells,
50
- ldst_elem(env, addr, i + k * max_elems, vd, ra);
67
- s->soc[socket].num_harts * sizeof(uint32_t) * 2);
51
+ ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
68
+ aplic_cells, num_harts * sizeof(uint32_t) * 2);
52
k++;
69
} else {
70
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
71
}
72
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
73
uint32_t msi_s_phandle,
74
uint32_t *phandle,
75
uint32_t *intc_phandles,
76
- uint32_t *aplic_phandles)
77
+ uint32_t *aplic_phandles,
78
+ int num_harts)
79
{
80
char *aplic_name;
81
unsigned long aplic_addr;
82
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
83
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
84
msi_m_phandle, intc_phandles,
85
aplic_m_phandle, aplic_s_phandle,
86
- true);
87
+ true, num_harts);
88
}
89
90
/* S-level APLIC node */
91
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
92
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
93
msi_s_phandle, intc_phandles,
94
aplic_s_phandle, 0,
95
- false);
96
+ false, num_harts);
97
98
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
99
100
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
101
*msi_pcie_phandle = msi_s_phandle;
102
}
103
104
- phandle_pos = ms->smp.cpus;
105
- for (socket = (socket_count - 1); socket >= 0; socket--) {
106
- phandle_pos -= s->soc[socket].num_harts;
107
-
108
- if (s->aia_type == VIRT_AIA_TYPE_NONE) {
109
- create_fdt_socket_plic(s, memmap, socket, phandle,
110
- &intc_phandles[phandle_pos], xplic_phandles);
111
- } else {
112
- create_fdt_socket_aplic(s, memmap, socket,
113
- msi_m_phandle, msi_s_phandle, phandle,
114
- &intc_phandles[phandle_pos], xplic_phandles);
115
+ /* KVM AIA only has one APLIC instance */
116
+ if (virt_use_kvm_aia(s)) {
117
+ create_fdt_socket_aplic(s, memmap, 0,
118
+ msi_m_phandle, msi_s_phandle, phandle,
119
+ &intc_phandles[0], xplic_phandles,
120
+ ms->smp.cpus);
121
+ } else {
122
+ phandle_pos = ms->smp.cpus;
123
+ for (socket = (socket_count - 1); socket >= 0; socket--) {
124
+ phandle_pos -= s->soc[socket].num_harts;
125
+
126
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
127
+ create_fdt_socket_plic(s, memmap, socket, phandle,
128
+ &intc_phandles[phandle_pos],
129
+ xplic_phandles);
130
+ } else {
131
+ create_fdt_socket_aplic(s, memmap, socket,
132
+ msi_m_phandle, msi_s_phandle, phandle,
133
+ &intc_phandles[phandle_pos],
134
+ xplic_phandles,
135
+ s->soc[socket].num_harts);
136
+ }
53
}
137
}
54
}
138
}
55
@@ -XXX,XX +XXX,XX @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
139
56
k = 0;
140
g_free(intc_phandles);
57
while (k < nf) {
141
58
target_ulong addr = base + ((i * nf + k) << esz);
142
- for (socket = 0; socket < socket_count; socket++) {
59
- ldst_elem(env, addr, i + k * max_elems, vd, ra);
143
- if (socket == 0) {
60
+ ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
144
- *irq_mmio_phandle = xplic_phandles[socket];
61
k++;
145
- *irq_virtio_phandle = xplic_phandles[socket];
146
- *irq_pcie_phandle = xplic_phandles[socket];
147
- }
148
- if (socket == 1) {
149
- *irq_virtio_phandle = xplic_phandles[socket];
150
- *irq_pcie_phandle = xplic_phandles[socket];
151
- }
152
- if (socket == 2) {
153
- *irq_pcie_phandle = xplic_phandles[socket];
154
+ if (virt_use_kvm_aia(s)) {
155
+ *irq_mmio_phandle = xplic_phandles[0];
156
+ *irq_virtio_phandle = xplic_phandles[0];
157
+ *irq_pcie_phandle = xplic_phandles[0];
158
+ } else {
159
+ for (socket = 0; socket < socket_count; socket++) {
160
+ if (socket == 0) {
161
+ *irq_mmio_phandle = xplic_phandles[socket];
162
+ *irq_virtio_phandle = xplic_phandles[socket];
163
+ *irq_pcie_phandle = xplic_phandles[socket];
164
+ }
165
+ if (socket == 1) {
166
+ *irq_virtio_phandle = xplic_phandles[socket];
167
+ *irq_pcie_phandle = xplic_phandles[socket];
168
+ }
169
+ if (socket == 2) {
170
+ *irq_pcie_phandle = xplic_phandles[socket];
171
+ }
62
}
172
}
63
}
173
}
64
@@ -XXX,XX +XXX,XX @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
174
65
k = 0;
175
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
66
while (k < nf) {
67
abi_ptr addr = get_index_addr(base, i, vs2) + (k << esz);
68
- ldst_elem(env, addr, i + k * max_elems, vd, ra);
69
+ ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
70
k++;
71
}
176
}
72
}
177
}
73
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
178
74
if (!vm && !vext_elem_mask(v0, i)) {
179
+ if (virt_use_kvm_aia(s)) {
75
continue;
180
+ kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
76
}
181
+ VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
77
- addr = base + i * (nf << esz);
182
+ memmap[VIRT_APLIC_S].base,
78
+ addr = adjust_addr(env, base + i * (nf << esz));
183
+ memmap[VIRT_IMSIC_S].base,
79
if (i == 0) {
184
+ s->aia_guests);
80
probe_pages(env, addr, nf << esz, ra, MMU_DATA_LOAD);
185
+ }
81
} else {
186
+
82
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
187
if (riscv_is_32bit(&s->soc[0])) {
83
break;
188
#if HOST_LONG_BITS == 64
84
}
189
/* limit RAM size in a 32-bit system */
85
remain -= offset;
86
- addr += offset;
87
+ addr = adjust_addr(env, addr + offset);
88
}
89
}
90
}
91
@@ -XXX,XX +XXX,XX @@ ProbeSuccess:
92
}
93
while (k < nf) {
94
target_ulong addr = base + ((i * nf + k) << esz);
95
- ldst_elem(env, addr, i + k * max_elems, vd, ra);
96
+ ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
97
k++;
98
}
99
}
100
@@ -XXX,XX +XXX,XX @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
101
/* load/store rest of elements of current segment pointed by vstart */
102
for (pos = off; pos < max_elems; pos++, env->vstart++) {
103
target_ulong addr = base + ((pos + k * max_elems) << esz);
104
- ldst_elem(env, addr, pos + k * max_elems, vd, ra);
105
+ ldst_elem(env, adjust_addr(env, addr), pos + k * max_elems, vd, ra);
106
}
107
k++;
108
}
109
@@ -XXX,XX +XXX,XX @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
110
for (; k < nf; k++) {
111
for (i = 0; i < max_elems; i++, env->vstart++) {
112
target_ulong addr = base + ((i + k * max_elems) << esz);
113
- ldst_elem(env, addr, i + k * max_elems, vd, ra);
114
+ ldst_elem(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
115
}
116
}
117
118
--
190
--
119
2.31.1
191
2.41.0
120
121
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Conor Dooley <conor.dooley@microchip.com>
2
2
3
The read from PC for translation is in cpu_get_tb_cpu_state, before translation.
3
On a dtb dumped from the virt machine, dt-validate complains:
4
soc: pmu: {'riscv,event-to-mhpmcounters': [[1, 1, 524281], [2, 2, 524284], [65561, 65561, 524280], [65563, 65563, 524280], [65569, 65569, 524280]], 'compatible': ['riscv,pmu']} should not be valid under {'type': 'object'}
5
from schema $id: http://devicetree.org/schemas/simple-bus.yaml#
6
That's pretty cryptic, but running the dtb back through dtc produces
7
something a lot more reasonable:
8
Warning (simple_bus_reg): /soc/pmu: missing or empty reg/ranges property
4
9
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
10
Moving the riscv,pmu node out of the soc bus solves the problem.
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
8
Message-id: 20220120122050.41546-7-zhiwei_liu@c-sky.com
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Message-ID: <20230727-groom-decline-2c57ce42841c@spud>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
17
---
11
target/riscv/cpu_helper.c | 2 +-
18
hw/riscv/virt.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
19
1 file changed, 1 insertion(+), 1 deletion(-)
13
20
14
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
21
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
15
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu_helper.c
23
--- a/hw/riscv/virt.c
17
+++ b/target/riscv/cpu_helper.c
24
+++ b/hw/riscv/virt.c
18
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
25
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pmu(RISCVVirtState *s)
19
26
MachineState *ms = MACHINE(s);
20
uint32_t flags = 0;
27
RISCVCPU hart = s->soc[0].harts[0];
21
28
22
- *pc = env->pc;
29
- pmu_name = g_strdup_printf("/soc/pmu");
23
+ *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
30
+ pmu_name = g_strdup_printf("/pmu");
24
*cs_base = 0;
31
qemu_fdt_add_subnode(ms->fdt, pmu_name);
25
32
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
26
if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
33
riscv_pmu_generate_fdt_node(ms->fdt, hart.cfg.pmu_num, pmu_name);
27
--
34
--
28
2.31.1
35
2.41.0
29
30
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
2
3
Current xlen has been used in helper functions and many other places.
3
The Svadu specification updated the name of the *envcfg bit from
4
The computation of current xlen is not so trivial, so that we should
4
HADE to ADUE.
5
recompute it as little as possible.
6
5
7
Fortunately, xlen only changes in very seldom cases, such as exception,
6
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
misa write, mstatus write, cpu reset, migration load. So that we can only
7
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
recompute xlen in this places and cache it into CPURISCVState.
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
9
Message-ID: <20230816141916.66898-1-liweiwei@iscas.ac.cn>
11
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-id: 20220120122050.41546-6-zhiwei_liu@c-sky.com
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
11
---
16
target/riscv/cpu.h | 31 +++++++++++++++++++++++++++++++
12
target/riscv/cpu_bits.h | 8 ++++----
17
target/riscv/cpu.c | 1 +
13
target/riscv/cpu.c | 4 ++--
18
target/riscv/cpu_helper.c | 34 ++--------------------------------
14
target/riscv/cpu_helper.c | 6 +++---
19
target/riscv/csr.c | 2 ++
15
target/riscv/csr.c | 12 ++++++------
20
target/riscv/machine.c | 10 ++++++++++
16
4 files changed, 15 insertions(+), 15 deletions(-)
21
5 files changed, 46 insertions(+), 32 deletions(-)
22
17
23
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
18
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
24
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
25
--- a/target/riscv/cpu.h
20
--- a/target/riscv/cpu_bits.h
26
+++ b/target/riscv/cpu.h
21
+++ b/target/riscv/cpu_bits.h
27
@@ -XXX,XX +XXX,XX @@ struct CPURISCVState {
22
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
28
uint32_t misa_mxl_max; /* max mxl for this cpu */
23
#define MENVCFG_CBIE (3UL << 4)
29
uint32_t misa_ext; /* current extensions */
24
#define MENVCFG_CBCFE BIT(6)
30
uint32_t misa_ext_mask; /* max ext for this cpu */
25
#define MENVCFG_CBZE BIT(7)
31
+ uint32_t xl; /* current xlen */
26
-#define MENVCFG_HADE (1ULL << 61)
32
27
+#define MENVCFG_ADUE (1ULL << 61)
33
/* 128-bit helpers upper part return value */
28
#define MENVCFG_PBMTE (1ULL << 62)
34
target_ulong retxh;
29
#define MENVCFG_STCE (1ULL << 63)
35
@@ -XXX,XX +XXX,XX @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
30
36
}
31
/* For RV32 */
37
#endif
32
-#define MENVCFGH_HADE BIT(29)
38
33
+#define MENVCFGH_ADUE BIT(29)
39
+#if defined(TARGET_RISCV32)
34
#define MENVCFGH_PBMTE BIT(30)
40
+#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
35
#define MENVCFGH_STCE BIT(31)
41
+#else
36
42
+static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
37
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
43
+{
38
#define HENVCFG_CBIE MENVCFG_CBIE
44
+ RISCVMXL xl = env->misa_mxl;
39
#define HENVCFG_CBCFE MENVCFG_CBCFE
45
+#if !defined(CONFIG_USER_ONLY)
40
#define HENVCFG_CBZE MENVCFG_CBZE
46
+ /*
41
-#define HENVCFG_HADE MENVCFG_HADE
47
+ * When emulating a 32-bit-only cpu, use RV32.
42
+#define HENVCFG_ADUE MENVCFG_ADUE
48
+ * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
43
#define HENVCFG_PBMTE MENVCFG_PBMTE
49
+ * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
44
#define HENVCFG_STCE MENVCFG_STCE
50
+ * back to RV64 for lower privs.
45
51
+ */
46
/* For RV32 */
52
+ if (xl != MXL_RV32) {
47
-#define HENVCFGH_HADE MENVCFGH_HADE
53
+ switch (env->priv) {
48
+#define HENVCFGH_ADUE MENVCFGH_ADUE
54
+ case PRV_M:
49
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
55
+ break;
50
#define HENVCFGH_STCE MENVCFGH_STCE
56
+ case PRV_U:
51
57
+ xl = get_field(env->mstatus, MSTATUS64_UXL);
58
+ break;
59
+ default: /* PRV_S | PRV_H */
60
+ xl = get_field(env->mstatus, MSTATUS64_SXL);
61
+ break;
62
+ }
63
+ }
64
+#endif
65
+ return xl;
66
+}
67
+#endif
68
+
69
/*
70
* Encode LMUL to lmul as follows:
71
* LMUL vlmul lmul
72
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
52
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
73
index XXXXXXX..XXXXXXX 100644
53
index XXXXXXX..XXXXXXX 100644
74
--- a/target/riscv/cpu.c
54
--- a/target/riscv/cpu.c
75
+++ b/target/riscv/cpu.c
55
+++ b/target/riscv/cpu.c
76
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset(DeviceState *dev)
56
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
77
/* mmte is supposed to have pm.current hardwired to 1 */
57
env->two_stage_lookup = false;
78
env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
58
79
#endif
59
env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
80
+ env->xl = riscv_cpu_mxl(env);
60
- (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
81
cs->exception_index = RISCV_EXCP_NONE;
61
+ (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
82
env->load_res = -1;
62
env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
83
set_default_nan_mode(1, &env->fp_status);
63
- (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
64
+ (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
65
66
/* Initialized default priorities of local interrupts. */
67
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
84
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
68
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
85
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
86
--- a/target/riscv/cpu_helper.c
70
--- a/target/riscv/cpu_helper.c
87
+++ b/target/riscv/cpu_helper.c
71
+++ b/target/riscv/cpu_helper.c
88
@@ -XXX,XX +XXX,XX @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
72
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
89
#endif
90
}
91
92
-static RISCVMXL cpu_get_xl(CPURISCVState *env)
93
-{
94
-#if defined(TARGET_RISCV32)
95
- return MXL_RV32;
96
-#elif defined(CONFIG_USER_ONLY)
97
- return MXL_RV64;
98
-#else
99
- RISCVMXL xl = riscv_cpu_mxl(env);
100
-
101
- /*
102
- * When emulating a 32-bit-only cpu, use RV32.
103
- * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
104
- * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
105
- * back to RV64 for lower privs.
106
- */
107
- if (xl != MXL_RV32) {
108
- switch (env->priv) {
109
- case PRV_M:
110
- break;
111
- case PRV_U:
112
- xl = get_field(env->mstatus, MSTATUS64_UXL);
113
- break;
114
- default: /* PRV_S | PRV_H */
115
- xl = get_field(env->mstatus, MSTATUS64_SXL);
116
- break;
117
- }
118
- }
119
- return xl;
120
-#endif
121
-}
122
-
123
void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
124
target_ulong *cs_base, uint32_t *pflags)
125
{
126
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
127
}
73
}
128
#endif
74
129
75
bool pbmte = env->menvcfg & MENVCFG_PBMTE;
130
- flags = FIELD_DP32(flags, TB_FLAGS, XL, cpu_get_xl(env));
76
- bool hade = env->menvcfg & MENVCFG_HADE;
131
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
77
+ bool adue = env->menvcfg & MENVCFG_ADUE;
132
78
133
*pflags = flags;
79
if (first_stage && two_stage && env->virt_enabled) {
134
}
80
pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
135
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
81
- hade = hade && (env->henvcfg & HENVCFG_HADE);
82
+ adue = adue && (env->henvcfg & HENVCFG_ADUE);
136
}
83
}
137
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
84
138
env->priv = newpriv;
85
int ptshift = (levels - 1) * ptidxbits;
139
+ env->xl = cpu_recompute_xl(env);
86
@@ -XXX,XX +XXX,XX @@ restart:
140
87
141
/*
88
/* Page table updates need to be atomic with MTTCG enabled */
142
* Clear the load reservation - otherwise a reservation placed in one
89
if (updated_pte != pte && !is_debug) {
90
- if (!hade) {
91
+ if (!adue) {
92
return TRANSLATE_FAIL;
93
}
94
143
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
95
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
144
index XXXXXXX..XXXXXXX 100644
96
index XXXXXXX..XXXXXXX 100644
145
--- a/target/riscv/csr.c
97
--- a/target/riscv/csr.c
146
+++ b/target/riscv/csr.c
98
+++ b/target/riscv/csr.c
147
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
99
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
148
mstatus = set_field(mstatus, MSTATUS64_UXL, xl);
100
if (riscv_cpu_mxl(env) == MXL_RV64) {
101
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
102
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
103
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
104
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
149
}
105
}
150
env->mstatus = mstatus;
106
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
151
+ env->xl = cpu_recompute_xl(env);
107
152
108
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
109
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
110
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
111
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
112
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
113
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
114
uint64_t valh = (uint64_t)val << 32;
115
116
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
117
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
118
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
119
* henvcfg.hade is read_only 0 when menvcfg.hade = 0
120
*/
121
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
122
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
123
env->menvcfg);
153
return RISCV_EXCP_NONE;
124
return RISCV_EXCP_NONE;
154
}
125
}
155
@@ -XXX,XX +XXX,XX @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
126
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
156
/* flush translation cache */
127
}
157
tb_flush(env_cpu(env));
128
158
env->misa_ext = val;
129
if (riscv_cpu_mxl(env) == MXL_RV64) {
159
+ env->xl = riscv_cpu_mxl(env);
130
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE);
131
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
132
}
133
134
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
135
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
136
return ret;
137
}
138
139
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
140
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
141
env->menvcfg)) >> 32;
160
return RISCV_EXCP_NONE;
142
return RISCV_EXCP_NONE;
161
}
143
}
162
144
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
163
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
145
target_ulong val)
164
index XXXXXXX..XXXXXXX 100644
146
{
165
--- a/target/riscv/machine.c
147
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
166
+++ b/target/riscv/machine.c
148
- HENVCFG_HADE);
167
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_kvmtimer = {
149
+ HENVCFG_ADUE);
168
}
150
uint64_t valh = (uint64_t)val << 32;
169
};
151
RISCVException ret;
170
152
171
+static int riscv_cpu_post_load(void *opaque, int version_id)
172
+{
173
+ RISCVCPU *cpu = opaque;
174
+ CPURISCVState *env = &cpu->env;
175
+
176
+ env->xl = cpu_recompute_xl(env);
177
+ return 0;
178
+}
179
+
180
const VMStateDescription vmstate_riscv_cpu = {
181
.name = "cpu",
182
.version_id = 3,
183
.minimum_version_id = 3,
184
+ .post_load = riscv_cpu_post_load,
185
.fields = (VMStateField[]) {
186
VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32),
187
VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32),
188
--
153
--
189
2.31.1
154
2.41.0
190
191
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
When swap regs for hypervisor, the value of vsstatus or mstatus_hs
3
In the same emulated RISC-V host, the 'host' KVM CPU takes 4 times
4
should have the right XLEN. Otherwise, it will propagate to mstatus.
4
longer to boot than the 'rv64' KVM CPU.
5
5
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
The reason is an unintended behavior of riscv_cpu_satp_mode_finalize()
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
when satp_mode.supported = 0, i.e. when cpu_init() does not set
8
Message-id: 20220120122050.41546-22-zhiwei_liu@c-sky.com
8
satp_mode_max_supported(). satp_mode_max_from_map(map) does:
9
10
31 - __builtin_clz(map)
11
12
This means that, if satp_mode.supported = 0, satp_mode_supported_max
13
wil be '31 - 32'. But this is C, so satp_mode_supported_max will gladly
14
set it to UINT_MAX (4294967295). After that, if the user didn't set a
15
satp_mode, set_satp_mode_default_map(cpu) will make
16
17
cfg.satp_mode.map = cfg.satp_mode.supported
18
19
So satp_mode.map = 0. And then satp_mode_map_max will be set to
20
satp_mode_max_from_map(cpu->cfg.satp_mode.map), i.e. also UINT_MAX. The
21
guard "satp_mode_map_max > satp_mode_supported_max" doesn't protect us
22
here since both are UINT_MAX.
23
24
And finally we have 2 loops:
25
26
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
27
28
Which are, in fact, 2 loops from UINT_MAX -1 to -1. This is where the
29
extra delay when booting the 'host' CPU is coming from.
30
31
Commit 43d1de32f8 already set a precedence for satp_mode.supported = 0
32
in a different manner. We're doing the same here. If supported == 0,
33
interpret as 'the CPU wants the OS to handle satp mode alone' and skip
34
satp_mode_finalize().
35
36
We'll also put a guard in satp_mode_max_from_map() to assert out if map
37
is 0 since the function is not ready to deal with it.
38
39
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
40
Fixes: 6f23aaeb9b ("riscv: Allow user to set the satp mode")
41
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
42
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
43
Message-ID: <20230817152903.694926-1-dbarboza@ventanamicro.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
44
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
45
---
11
target/riscv/cpu.c | 10 ++++++++++
46
target/riscv/cpu.c | 23 ++++++++++++++++++++---
12
1 file changed, 10 insertions(+)
47
1 file changed, 20 insertions(+), 3 deletions(-)
13
48
14
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
49
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
15
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu.c
51
--- a/target/riscv/cpu.c
17
+++ b/target/riscv/cpu.c
52
+++ b/target/riscv/cpu.c
18
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset(DeviceState *dev)
53
@@ -XXX,XX +XXX,XX @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
19
*/
54
20
env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
55
uint8_t satp_mode_max_from_map(uint32_t map)
21
env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
56
{
22
+ if (riscv_has_ext(env, RVH)) {
57
+ /*
23
+ env->vsstatus = set_field(env->vsstatus,
58
+ * 'map = 0' will make us return (31 - 32), which C will
24
+ MSTATUS64_SXL, env->misa_mxl);
59
+ * happily overflow to UINT_MAX. There's no good result to
25
+ env->vsstatus = set_field(env->vsstatus,
60
+ * return if 'map = 0' (e.g. returning 0 will be ambiguous
26
+ MSTATUS64_UXL, env->misa_mxl);
61
+ * with the result for 'map = 1').
27
+ env->mstatus_hs = set_field(env->mstatus_hs,
62
+ *
28
+ MSTATUS64_SXL, env->misa_mxl);
63
+ * Assert out if map = 0. Callers will have to deal with
29
+ env->mstatus_hs = set_field(env->mstatus_hs,
64
+ * it outside of this function.
30
+ MSTATUS64_UXL, env->misa_mxl);
65
+ */
31
+ }
66
+ g_assert(map > 0);
32
}
67
+
33
env->mcause = 0;
68
/* map here has at least one bit set, so no problem with clz */
34
env->pc = env->resetvec;
69
return 31 - __builtin_clz(map);
70
}
71
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
72
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
73
{
74
bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
75
- uint8_t satp_mode_map_max;
76
- uint8_t satp_mode_supported_max =
77
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
78
+ uint8_t satp_mode_map_max, satp_mode_supported_max;
79
+
80
+ /* The CPU wants the OS to decide which satp mode to use */
81
+ if (cpu->cfg.satp_mode.supported == 0) {
82
+ return;
83
+ }
84
+
85
+ satp_mode_supported_max =
86
+ satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
87
88
if (cpu->cfg.satp_mode.map == 0) {
89
if (cpu->cfg.satp_mode.init == 0) {
35
--
90
--
36
2.31.1
91
2.41.0
37
38
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Vineet Gupta <vineetg@rivosinc.com>
2
2
3
In some cases, we must restore the guest PC to the address of the start of
3
zicond is now codegen supported in both llvm and gcc.
4
the TB, such as when the instruction counter hits zero. So extend pc register
5
according to current xlen for these cases.
6
4
7
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
5
This change allows seamless enabling/testing of zicond in downstream
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
projects. e.g. currently riscv-gnu-toolchain parses elf attributes
7
to create a cmdline for qemu but fails short of enabling it because of
8
the "x-" prefix.
9
10
Signed-off-by: Vineet Gupta <vineetg@rivosinc.com>
11
Message-ID: <20230808181715.436395-1-vineetg@rivosinc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20220120122050.41546-8-zhiwei_liu@c-sky.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
14
---
13
target/riscv/cpu.c | 22 +++++++++++++++++++---
15
target/riscv/cpu.c | 2 +-
14
1 file changed, 19 insertions(+), 3 deletions(-)
16
1 file changed, 1 insertion(+), 1 deletion(-)
15
17
16
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.c
20
--- a/target/riscv/cpu.c
19
+++ b/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
20
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
22
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
21
{
23
DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
22
RISCVCPU *cpu = RISCV_CPU(cs);
24
DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
23
CPURISCVState *env = &cpu->env;
25
DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
24
- env->pc = value;
26
+ DEFINE_PROP_BOOL("zicond", RISCVCPU, cfg.ext_zicond, false),
25
+
27
26
+ if (env->xl == MXL_RV32) {
28
/* Vendor-specific custom extensions */
27
+ env->pc = (int32_t)value;
29
DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
28
+ } else {
30
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
29
+ env->pc = value;
31
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
30
+ }
32
31
}
33
/* These are experimental so mark with 'x-' */
32
34
- DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
33
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
35
34
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs,
36
/* ePMP 0.9.3 */
35
{
37
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
36
RISCVCPU *cpu = RISCV_CPU(cs);
37
CPURISCVState *env = &cpu->env;
38
- env->pc = tb->pc;
39
+ RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
40
+
41
+ if (xl == MXL_RV32) {
42
+ env->pc = (int32_t)tb->pc;
43
+ } else {
44
+ env->pc = tb->pc;
45
+ }
46
}
47
48
static bool riscv_cpu_has_work(CPUState *cs)
49
@@ -XXX,XX +XXX,XX @@ static bool riscv_cpu_has_work(CPUState *cs)
50
void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
51
target_ulong *data)
52
{
53
- env->pc = data[0];
54
+ RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
55
+ if (xl == MXL_RV32) {
56
+ env->pc = (int32_t)data[0];
57
+ } else {
58
+ env->pc = data[0];
59
+ }
60
}
61
62
static void riscv_cpu_reset(DeviceState *dev)
63
--
38
--
64
2.31.1
39
2.41.0
65
66
diff view generated by jsdifflib
1
From: Yanan Wang <wangyanan55@huawei.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
The pointer assignment "const char *p = path;" in function
3
A build with --enable-debug and without KVM will fail as follows:
4
qemu_fdt_add_path is unnecessary. Let's remove it and just
4
5
use the "path" passed in. No functional change.
5
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_riscv_virt.c.o: in function `virt_machine_init':
6
./qemu/build/../hw/riscv/virt.c:1465: undefined reference to `kvm_riscv_aia_create'
7
8
This happens because the code block with "if virt_use_kvm_aia(s)" isn't
9
being ignored by the debug build, resulting in an undefined reference to
10
a KVM only function.
11
12
Add a 'kvm_enabled()' conditional together with virt_use_kvm_aia() will
13
make the compiler crop the kvm_riscv_aia_create() call entirely from a
14
non-KVM build. Note that adding the 'kvm_enabled()' conditional inside
15
virt_use_kvm_aia() won't fix the build because this function would need
16
to be inlined multiple times to make the compiler zero out the entire
17
block.
18
19
While we're at it, use kvm_enabled() in all instances where
20
virt_use_kvm_aia() is checked to allow the compiler to elide these other
21
kvm-only instances as well.
6
22
7
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
23
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
24
Fixes: dbdb99948e ("target/riscv: select KVM AIA in riscv virt machine")
9
Reviewed-by: Andrew Jones <drjones@redhat.com>
25
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
26
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Reviewed-by: Thomas Huth <thuth@redhat.com>
27
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
12
Message-id: 20220111032758.27804-1-wangyanan55@huawei.com
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-ID: <20230830133503.711138-2-dbarboza@ventanamicro.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
31
---
15
softmmu/device_tree.c | 9 ++++-----
32
hw/riscv/virt.c | 6 +++---
16
1 file changed, 4 insertions(+), 5 deletions(-)
33
1 file changed, 3 insertions(+), 3 deletions(-)
17
34
18
diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c
35
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
19
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
20
--- a/softmmu/device_tree.c
37
--- a/hw/riscv/virt.c
21
+++ b/softmmu/device_tree.c
38
+++ b/hw/riscv/virt.c
22
@@ -XXX,XX +XXX,XX @@ int qemu_fdt_add_subnode(void *fdt, const char *name)
39
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
23
int qemu_fdt_add_path(void *fdt, const char *path)
24
{
25
const char *name;
26
- const char *p = path;
27
int namelen, retval;
28
int parent = 0;
29
30
@@ -XXX,XX +XXX,XX @@ int qemu_fdt_add_path(void *fdt, const char *path)
31
}
40
}
32
41
33
do {
42
/* KVM AIA only has one APLIC instance */
34
- name = p + 1;
43
- if (virt_use_kvm_aia(s)) {
35
- p = strchr(name, '/');
44
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
36
- namelen = p != NULL ? p - name : strlen(name);
45
create_fdt_socket_aplic(s, memmap, 0,
37
+ name = path + 1;
46
msi_m_phandle, msi_s_phandle, phandle,
38
+ path = strchr(name, '/');
47
&intc_phandles[0], xplic_phandles,
39
+ namelen = path != NULL ? path - name : strlen(name);
48
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
40
49
41
retval = fdt_subnode_offset_namelen(fdt, parent, name, namelen);
50
g_free(intc_phandles);
42
if (retval < 0 && retval != -FDT_ERR_NOTFOUND) {
51
43
@@ -XXX,XX +XXX,XX @@ int qemu_fdt_add_path(void *fdt, const char *path)
52
- if (virt_use_kvm_aia(s)) {
53
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
54
*irq_mmio_phandle = xplic_phandles[0];
55
*irq_virtio_phandle = xplic_phandles[0];
56
*irq_pcie_phandle = xplic_phandles[0];
57
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
44
}
58
}
45
59
}
46
parent = retval;
60
47
- } while (p);
61
- if (virt_use_kvm_aia(s)) {
48
+ } while (path);
62
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
49
63
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
50
return retval;
64
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
51
}
65
memmap[VIRT_APLIC_S].base,
52
--
66
--
53
2.31.1
67
2.41.0
54
68
55
69
diff view generated by jsdifflib
1
From: Yifei Jiang <jiangyifei@huawei.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
We hope that virtual time adjusts with vm state changing. When a vm
3
Commit 6df0b37e2ab breaks a --enable-debug build in a non-KVM
4
is stopped, guest virtual time should stop counting and kvm_timer
4
environment with the following error:
5
should be stopped. When the vm is resumed, guest virtual time should
6
continue to count and kvm_timer should be restored.
7
5
8
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_intc_riscv_aplic.c.o: in function `riscv_kvm_aplic_request':
9
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
./qemu/build/../hw/intc/riscv_aplic.c:486: undefined reference to `kvm_set_irq'
10
Reviewed-by: Anup Patel <anup.patel@wdc.com>
8
collect2: error: ld returned 1 exit status
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
12
Message-id: 20220112081329.1835-12-jiangyifei@huawei.com
10
This happens because the debug build will poke into the
11
'if (is_kvm_aia(aplic->msimode))' block and fail to find a reference to
12
the KVM only function riscv_kvm_aplic_request().
13
14
There are multiple solutions to fix this. We'll go with the same
15
solution from the previous patch, i.e. add a kvm_enabled() conditional
16
to filter out the block. But there's a catch: riscv_kvm_aplic_request()
17
is a local function that would end up being used if the compiler crops
18
the block, and this won't work. Quoting Richard Henderson's explanation
19
in [1]:
20
21
"(...) the compiler won't eliminate entire unused functions with -O0"
22
23
We'll solve it by moving riscv_kvm_aplic_request() to kvm.c and add its
24
declaration in kvm_riscv.h, where all other KVM specific public
25
functions are already declared. Other archs handles KVM specific code in
26
this manner and we expect to do the same from now on.
27
28
[1] https://lore.kernel.org/qemu-riscv/d2f1ad02-eb03-138f-9d08-db676deeed05@linaro.org/
29
30
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
32
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Message-ID: <20230830133503.711138-3-dbarboza@ventanamicro.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
35
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
36
---
15
target/riscv/kvm.c | 15 +++++++++++++++
37
target/riscv/kvm_riscv.h | 1 +
16
1 file changed, 15 insertions(+)
38
hw/intc/riscv_aplic.c | 8 ++------
39
target/riscv/kvm.c | 5 +++++
40
3 files changed, 8 insertions(+), 6 deletions(-)
17
41
42
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/kvm_riscv.h
45
+++ b/target/riscv/kvm_riscv.h
46
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
47
uint64_t aia_irq_num, uint64_t aia_msi_num,
48
uint64_t aplic_base, uint64_t imsic_base,
49
uint64_t guest_num);
50
+void riscv_kvm_aplic_request(void *opaque, int irq, int level);
51
52
#endif
53
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/intc/riscv_aplic.c
56
+++ b/hw/intc/riscv_aplic.c
57
@@ -XXX,XX +XXX,XX @@
58
#include "target/riscv/cpu.h"
59
#include "sysemu/sysemu.h"
60
#include "sysemu/kvm.h"
61
+#include "kvm_riscv.h"
62
#include "migration/vmstate.h"
63
64
#define APLIC_MAX_IDC (1UL << 14)
65
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
66
return topi;
67
}
68
69
-static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
70
-{
71
- kvm_set_irq(kvm_state, irq, !!level);
72
-}
73
-
74
static void riscv_aplic_request(void *opaque, int irq, int level)
75
{
76
bool update = false;
77
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
78
* have IRQ lines delegated by their parent APLIC.
79
*/
80
if (!aplic->parent) {
81
- if (is_kvm_aia(aplic->msimode)) {
82
+ if (kvm_enabled() && is_kvm_aia(aplic->msimode)) {
83
qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
84
} else {
85
qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
18
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
86
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
19
index XXXXXXX..XXXXXXX 100644
87
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/kvm.c
88
--- a/target/riscv/kvm.c
21
+++ b/target/riscv/kvm.c
89
+++ b/target/riscv/kvm.c
22
@@ -XXX,XX +XXX,XX @@
90
@@ -XXX,XX +XXX,XX @@
23
#include "sbi_ecall_interface.h"
91
#include "sysemu/runstate.h"
24
#include "chardev/char-fe.h"
92
#include "hw/riscv/numa.h"
25
#include "migration/migration.h"
93
26
+#include "sysemu/runstate.h"
94
+void riscv_kvm_aplic_request(void *opaque, int irq, int level)
27
95
+{
96
+ kvm_set_irq(kvm_state, irq, !!level);
97
+}
98
+
28
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
99
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
29
uint64_t idx)
100
uint64_t idx)
30
@@ -XXX,XX +XXX,XX @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
31
return cpu->cpu_index;
32
}
33
34
+static void kvm_riscv_vm_state_change(void *opaque, bool running,
35
+ RunState state)
36
+{
37
+ CPUState *cs = opaque;
38
+
39
+ if (running) {
40
+ kvm_riscv_put_regs_timer(cs);
41
+ } else {
42
+ kvm_riscv_get_regs_timer(cs);
43
+ }
44
+}
45
+
46
void kvm_arch_init_irq_routing(KVMState *s)
47
{
101
{
48
}
49
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
50
CPURISCVState *env = &cpu->env;
51
uint64_t id;
52
53
+ qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
54
+
55
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
56
KVM_REG_RISCV_CONFIG_REG(isa));
57
ret = kvm_get_one_reg(cs, id, &isa);
58
--
102
--
59
2.31.1
103
2.41.0
60
104
61
105
diff view generated by jsdifflib
Deleted patch
1
From: Yifei Jiang <jiangyifei@huawei.com>
2
1
3
Add virtual time context description to vmstate_kvmtimer. After cpu being
4
loaded, virtual time context is updated to KVM.
5
6
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
7
Signed-off-by: Mingwang Li <limingwang@huawei.com>
8
Reviewed-by: Anup Patel <anup.patel@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20220112081329.1835-13-jiangyifei@huawei.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/machine.c | 30 ++++++++++++++++++++++++++++++
14
1 file changed, 30 insertions(+)
15
16
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/machine.c
19
+++ b/target/riscv/machine.c
20
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_rv128 = {
21
}
22
};
23
24
+static bool kvmtimer_needed(void *opaque)
25
+{
26
+ return kvm_enabled();
27
+}
28
+
29
+static int cpu_post_load(void *opaque, int version_id)
30
+{
31
+ RISCVCPU *cpu = opaque;
32
+ CPURISCVState *env = &cpu->env;
33
+
34
+ env->kvm_timer_dirty = true;
35
+ return 0;
36
+}
37
+
38
+static const VMStateDescription vmstate_kvmtimer = {
39
+ .name = "cpu/kvmtimer",
40
+ .version_id = 1,
41
+ .minimum_version_id = 1,
42
+ .needed = kvmtimer_needed,
43
+ .post_load = cpu_post_load,
44
+ .fields = (VMStateField[]) {
45
+ VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU),
46
+ VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU),
47
+ VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU),
48
+
49
+ VMSTATE_END_OF_LIST()
50
+ }
51
+};
52
+
53
const VMStateDescription vmstate_riscv_cpu = {
54
.name = "cpu",
55
.version_id = 3,
56
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
57
&vmstate_vector,
58
&vmstate_pointermasking,
59
&vmstate_rv128,
60
+ &vmstate_kvmtimer,
61
NULL
62
}
63
};
64
--
65
2.31.1
66
67
diff view generated by jsdifflib
Deleted patch
1
From: Yifei Jiang <jiangyifei@huawei.com>
2
1
3
Add riscv kvm support in meson.build file.
4
5
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
6
Signed-off-by: Mingwang Li <limingwang@huawei.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Anup Patel <anup@brainfault.org>
9
Message-id: 20220112081329.1835-14-jiangyifei@huawei.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
meson.build | 2 ++
13
1 file changed, 2 insertions(+)
14
15
diff --git a/meson.build b/meson.build
16
index XXXXXXX..XXXXXXX 100644
17
--- a/meson.build
18
+++ b/meson.build
19
@@ -XXX,XX +XXX,XX @@ elif cpu in ['ppc', 'ppc64']
20
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
21
elif cpu in ['mips', 'mips64']
22
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
23
+elif cpu in ['riscv']
24
+ kvm_targets = ['riscv32-softmmu', 'riscv64-softmmu']
25
else
26
kvm_targets = []
27
endif
28
--
29
2.31.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
All Zve* extensions support all vector integer instructions,
4
except that the vmulh integer multiply variants that return the
5
high word of the product (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx,
6
vmulhsu.vv, vmulhsu.vx) are not included for EEW=64 in Zve64*.
7
8
Signed-off-by: Frank Chang <frank.chang@sifive.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20220118014522.13613-5-frank.chang@sifive.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/insn_trans/trans_rvv.c.inc | 39 +++++++++++++++++++++----
14
1 file changed, 33 insertions(+), 6 deletions(-)
15
16
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
20
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
21
GEN_OPIVX_TRANS(vmax_vx, opivx_check)
22
23
/* Vector Single-Width Integer Multiply Instructions */
24
+
25
+static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
26
+{
27
+ /*
28
+ * All Zve* extensions support all vector integer instructions,
29
+ * except that the vmulh integer multiply variants
30
+ * that return the high word of the product
31
+ * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
32
+ * are not included for EEW=64 in Zve64*. (Section 18.2)
33
+ */
34
+ return opivv_check(s, a) &&
35
+ (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
36
+}
37
+
38
+static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
39
+{
40
+ /*
41
+ * All Zve* extensions support all vector integer instructions,
42
+ * except that the vmulh integer multiply variants
43
+ * that return the high word of the product
44
+ * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
45
+ * are not included for EEW=64 in Zve64*. (Section 18.2)
46
+ */
47
+ return opivx_check(s, a) &&
48
+ (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
49
+}
50
+
51
GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
52
-GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
53
-GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
54
-GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
55
+GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
56
+GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
57
+GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
58
GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
59
-GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
60
-GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
61
-GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
62
+GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
63
+GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
64
+GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
65
66
/* Vector Integer Divide Instructions */
67
GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
68
--
69
2.31.1
70
71
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
All Zve* extensions support all vector fixed-point arithmetic
4
instructions, except that vsmul.vv and vsmul.vx are not supported
5
for EEW=64 in Zve64*.
6
7
Signed-off-by: Frank Chang <frank.chang@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20220118014522.13613-6-frank.chang@sifive.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/insn_trans/trans_rvv.c.inc | 27 +++++++++++++++++++++++--
13
1 file changed, 25 insertions(+), 2 deletions(-)
14
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vasub_vx, opivx_check)
20
GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
21
22
/* Vector Single-Width Fractional Multiply with Rounding and Saturation */
23
-GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
24
-GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
25
+
26
+static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
27
+{
28
+ /*
29
+ * All Zve* extensions support all vector fixed-point arithmetic
30
+ * instructions, except that vsmul.vv and vsmul.vx are not supported
31
+ * for EEW=64 in Zve64*. (Section 18.2)
32
+ */
33
+ return opivv_check(s, a) &&
34
+ (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
35
+}
36
+
37
+static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
38
+{
39
+ /*
40
+ * All Zve* extensions support all vector fixed-point arithmetic
41
+ * instructions, except that vsmul.vv and vsmul.vx are not supported
42
+ * for EEW=64 in Zve64*. (Section 18.2)
43
+ */
44
+ return opivx_check(s, a) &&
45
+ (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true);
46
+}
47
+
48
+GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
49
+GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check)
50
51
/* Vector Single-Width Scaling Shift Instructions */
52
GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
53
--
54
2.31.1
55
56
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
Zve64f extension requires the scalar processor to implement the F
4
extension and implement all vector floating-point instructions for
5
floating-point operands with EEW=32 (i.e., no widening floating-point
6
operations).
7
8
Signed-off-by: Frank Chang <frank.chang@sifive.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-id: 20220118014522.13613-7-frank.chang@sifive.com
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/insn_trans/trans_rvv.c.inc | 41 +++++++++++++++++++------
14
1 file changed, 31 insertions(+), 10 deletions(-)
15
16
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
20
@@ -XXX,XX +XXX,XX @@ static bool require_scale_rvf(DisasContext *s)
21
}
22
}
23
24
+static bool require_zve64f(DisasContext *s)
25
+{
26
+ /* RVV + Zve64f = RVV. */
27
+ if (has_ext(s, RVV)) {
28
+ return true;
29
+ }
30
+
31
+ /* Zve64f doesn't support FP64. (Section 18.2) */
32
+ return s->ext_zve64f ? s->sew <= MO_32 : true;
33
+}
34
+
35
/* Destination vector register group cannot overlap source mask register. */
36
static bool require_vm(int vm, int vd)
37
{
38
@@ -XXX,XX +XXX,XX @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a)
39
return require_rvv(s) &&
40
require_rvf(s) &&
41
vext_check_isa_ill(s) &&
42
- vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
43
+ vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) &&
44
+ require_zve64f(s);
45
}
46
47
/* OPFVV without GVEC IR */
48
@@ -XXX,XX +XXX,XX @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a)
49
return require_rvv(s) &&
50
require_rvf(s) &&
51
vext_check_isa_ill(s) &&
52
- vext_check_ss(s, a->rd, a->rs2, a->vm);
53
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
54
+ require_zve64f(s);
55
}
56
57
/* OPFVF without GVEC IR */
58
@@ -XXX,XX +XXX,XX @@ static bool opfv_check(DisasContext *s, arg_rmr *a)
59
require_rvf(s) &&
60
vext_check_isa_ill(s) &&
61
/* OPFV instructions ignore vs1 check */
62
- vext_check_ss(s, a->rd, a->rs2, a->vm);
63
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
64
+ require_zve64f(s);
65
}
66
67
static bool do_opfv(DisasContext *s, arg_rmr *a,
68
@@ -XXX,XX +XXX,XX @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
69
return require_rvv(s) &&
70
require_rvf(s) &&
71
vext_check_isa_ill(s) &&
72
- vext_check_mss(s, a->rd, a->rs1, a->rs2);
73
+ vext_check_mss(s, a->rd, a->rs1, a->rs2) &&
74
+ require_zve64f(s);
75
}
76
77
GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
78
@@ -XXX,XX +XXX,XX @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
79
return require_rvv(s) &&
80
require_rvf(s) &&
81
vext_check_isa_ill(s) &&
82
- vext_check_ms(s, a->rd, a->rs2);
83
+ vext_check_ms(s, a->rd, a->rs2) &&
84
+ require_zve64f(s);
85
}
86
87
GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
88
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
89
if (require_rvv(s) &&
90
require_rvf(s) &&
91
vext_check_isa_ill(s) &&
92
- require_align(a->rd, s->lmul)) {
93
+ require_align(a->rd, s->lmul) &&
94
+ require_zve64f(s)) {
95
gen_set_rm(s, RISCV_FRM_DYN);
96
97
TCGv_i64 t1;
98
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
99
{
100
if (require_rvv(s) &&
101
require_rvf(s) &&
102
- vext_check_isa_ill(s)) {
103
+ vext_check_isa_ill(s) &&
104
+ require_zve64f(s)) {
105
gen_set_rm(s, RISCV_FRM_DYN);
106
107
unsigned int ofs = (8 << s->sew);
108
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
109
{
110
if (require_rvv(s) &&
111
require_rvf(s) &&
112
- vext_check_isa_ill(s)) {
113
+ vext_check_isa_ill(s) &&
114
+ require_zve64f(s)) {
115
gen_set_rm(s, RISCV_FRM_DYN);
116
117
/* The instructions ignore LMUL and vector register group. */
118
@@ -XXX,XX +XXX,XX @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
119
static bool fslideup_check(DisasContext *s, arg_rmrr *a)
120
{
121
return slideup_check(s, a) &&
122
- require_rvf(s);
123
+ require_rvf(s) &&
124
+ require_zve64f(s);
125
}
126
127
static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
128
{
129
return slidedown_check(s, a) &&
130
- require_rvf(s);
131
+ require_rvf(s) &&
132
+ require_zve64f(s);
133
}
134
135
GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
136
--
137
2.31.1
138
139
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
Vector single-width floating-point reduction operations for EEW=32 are
4
supported for Zve64f extension.
5
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20220118014522.13613-8-frank.chang@sifive.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 3 ++-
12
1 file changed, 2 insertions(+), 1 deletion(-)
13
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
@@ -XXX,XX +XXX,XX @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
19
static bool freduction_check(DisasContext *s, arg_rmrr *a)
20
{
21
return reduction_check(s, a) &&
22
- require_rvf(s);
23
+ require_rvf(s) &&
24
+ require_zve64f(s);
25
}
26
27
GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
28
--
29
2.31.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
Vector narrowing conversion instructions are provided to and from all
4
supported integer EEWs for Zve64f extension.
5
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20220118014522.13613-10-frank.chang@sifive.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 9 ++++++---
12
1 file changed, 6 insertions(+), 3 deletions(-)
13
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
19
{
20
return opfv_narrow_check(s, a) &&
21
require_rvf(s) &&
22
- (s->sew != MO_64);
23
+ (s->sew != MO_64) &&
24
+ require_zve64f(s);
25
}
26
27
static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
28
{
29
return opfv_narrow_check(s, a) &&
30
require_scale_rvf(s) &&
31
- (s->sew != MO_8);
32
+ (s->sew != MO_8) &&
33
+ require_scale_zve64f(s);
34
}
35
36
#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
37
@@ -XXX,XX +XXX,XX @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
38
require_scale_rvf(s) &&
39
vext_check_isa_ill(s) &&
40
/* OPFV narrowing instructions ignore vs1 check */
41
- vext_check_sd(s, a->rd, a->rs2, a->vm);
42
+ vext_check_sd(s, a->rd, a->rs2, a->vm) &&
43
+ require_scale_zve64f(s);
44
}
45
46
#define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
47
--
48
2.31.1
49
50
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
Vector single-width floating-point reduction operations for EEW=32 are
4
supported for Zve32f extension.
5
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20220118014522.13613-15-frank.chang@sifive.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/insn_trans/trans_rvv.c.inc | 1 +
12
1 file changed, 1 insertion(+)
13
14
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/insn_trans/trans_rvv.c.inc
17
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool freduction_check(DisasContext *s, arg_rmrr *a)
19
{
20
return reduction_check(s, a) &&
21
require_rvf(s) &&
22
+ require_zve32f(s) &&
23
require_zve64f(s);
24
}
25
26
--
27
2.31.1
28
29
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Robbin Ehn <rehn@rivosinc.com>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
This patch adds the new extensions in
4
linux 6.5 to the hwprobe syscall.
5
6
And fixes RVC check to OR with correct value.
7
The previous variable contains 0 therefore it
8
did work.
9
10
Signed-off-by: Robbin Ehn <rehn@rivosinc.com>
4
Acked-by: Richard Henderson <richard.henderson@linaro.org>
11
Acked-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20220120122050.41546-18-zhiwei_liu@c-sky.com
13
Message-ID: <bc82203b72d7efb30f1b4a8f9eb3d94699799dc8.camel@rivosinc.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
15
---
9
target/riscv/cpu.h | 1 -
16
linux-user/syscall.c | 14 +++++++++++++-
10
1 file changed, 1 deletion(-)
17
1 file changed, 13 insertions(+), 1 deletion(-)
11
18
12
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
19
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
13
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/cpu.h
21
--- a/linux-user/syscall.c
15
+++ b/target/riscv/cpu.h
22
+++ b/linux-user/syscall.c
16
@@ -XXX,XX +XXX,XX @@ FIELD(VTYPE, VTA, 6, 1)
23
@@ -XXX,XX +XXX,XX @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
17
FIELD(VTYPE, VMA, 7, 1)
24
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
18
FIELD(VTYPE, VEDIV, 8, 2)
25
#define RISCV_HWPROBE_IMA_FD (1 << 0)
19
FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
26
#define RISCV_HWPROBE_IMA_C (1 << 1)
20
-FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 1, 1)
27
+#define RISCV_HWPROBE_IMA_V (1 << 2)
21
28
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
22
struct CPURISCVState {
29
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
23
target_ulong gpr[32];
30
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
31
32
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
33
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
34
@@ -XXX,XX +XXX,XX @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
35
riscv_has_ext(env, RVD) ?
36
RISCV_HWPROBE_IMA_FD : 0;
37
value |= riscv_has_ext(env, RVC) ?
38
- RISCV_HWPROBE_IMA_C : pair->value;
39
+ RISCV_HWPROBE_IMA_C : 0;
40
+ value |= riscv_has_ext(env, RVV) ?
41
+ RISCV_HWPROBE_IMA_V : 0;
42
+ value |= cfg->ext_zba ?
43
+ RISCV_HWPROBE_EXT_ZBA : 0;
44
+ value |= cfg->ext_zbb ?
45
+ RISCV_HWPROBE_EXT_ZBB : 0;
46
+ value |= cfg->ext_zbs ?
47
+ RISCV_HWPROBE_EXT_ZBS : 0;
48
__put_user(value, &pair->value);
49
break;
50
case RISCV_HWPROBE_KEY_CPUPERF_0:
24
--
51
--
25
2.31.1
52
2.41.0
26
27
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Ard Biesheuvel <ardb@kernel.org>
2
2
3
Now that RISC-V Spike machine can use BIN BIOS images, we remove
3
Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
4
the macros used for ELF BIOS image names.
4
implement the first half of the key schedule derivation. This does not
5
actually involve shifting rows, so clone the same value into all four
6
columns of the AES vector to counter that operation.
5
7
6
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
8
Cc: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
10
Cc: Palmer Dabbelt <palmer@dabbelt.com>
11
Cc: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20230831154118.138727-1-ardb@kernel.org>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
17
---
11
include/hw/riscv/boot.h | 2 --
18
target/riscv/crypto_helper.c | 17 +++++------------
12
hw/riscv/spike.c | 4 ++--
19
1 file changed, 5 insertions(+), 12 deletions(-)
13
2 files changed, 2 insertions(+), 4 deletions(-)
14
20
15
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
21
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/riscv/boot.h
23
--- a/target/riscv/crypto_helper.c
18
+++ b/include/hw/riscv/boot.h
24
+++ b/target/riscv/crypto_helper.c
19
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
20
#include "hw/riscv/riscv_hart.h"
26
21
27
uint8_t enc_rnum = rnum;
22
#define RISCV32_BIOS_BIN "opensbi-riscv32-generic-fw_dynamic.bin"
28
uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
23
-#define RISCV32_BIOS_ELF "opensbi-riscv32-generic-fw_dynamic.elf"
29
- uint8_t rcon_ = 0;
24
#define RISCV64_BIOS_BIN "opensbi-riscv64-generic-fw_dynamic.bin"
30
- target_ulong result;
25
-#define RISCV64_BIOS_ELF "opensbi-riscv64-generic-fw_dynamic.elf"
31
+ AESState t, rc = {};
26
32
27
bool riscv_is_32bit(RISCVHartArrayState *harts);
33
if (enc_rnum != 0xA) {
28
34
temp = ror32(temp, 8); /* Rotate right by 8 */
29
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
35
- rcon_ = round_consts[enc_rnum];
30
index XXXXXXX..XXXXXXX 100644
36
+ rc.w[0] = rc.w[1] = round_consts[enc_rnum];
31
--- a/hw/riscv/spike.c
32
+++ b/hw/riscv/spike.c
33
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
34
*/
35
if (riscv_is_32bit(&s->soc[0])) {
36
firmware_end_addr = riscv_find_and_load_firmware(machine,
37
- RISCV32_BIOS_ELF, memmap[SPIKE_DRAM].base,
38
+ RISCV32_BIOS_BIN, memmap[SPIKE_DRAM].base,
39
htif_symbol_callback);
40
} else {
41
firmware_end_addr = riscv_find_and_load_firmware(machine,
42
- RISCV64_BIOS_ELF, memmap[SPIKE_DRAM].base,
43
+ RISCV64_BIOS_BIN, memmap[SPIKE_DRAM].base,
44
htif_symbol_callback);
45
}
37
}
46
38
39
- temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
40
- ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
41
- ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
42
- ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
43
+ t.w[0] = t.w[1] = t.w[2] = t.w[3] = temp;
44
+ aesenc_SB_SR_AK(&t, &t, &rc, false);
45
46
- temp ^= rcon_;
47
-
48
- result = ((uint64_t)temp << 32) | temp;
49
-
50
- return result;
51
+ return t.d[0];
52
}
53
54
target_ulong HELPER(aes64im)(target_ulong rs1)
47
--
55
--
48
2.31.1
56
2.41.0
49
57
50
58
diff view generated by jsdifflib
Deleted patch
1
From: Anup Patel <apatel@ventanamicro.com>
2
1
3
Now that all RISC-V machines can use OpenSBI BIN images, we remove
4
OpenSBI ELF images and also exclude these images from BIOS build.
5
6
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
.gitlab-ci.d/opensbi.yml | 2 --
12
pc-bios/meson.build | 2 --
13
pc-bios/opensbi-riscv32-generic-fw_dynamic.elf | Bin 838904 -> 0 bytes
14
pc-bios/opensbi-riscv64-generic-fw_dynamic.elf | Bin 934696 -> 0 bytes
15
roms/Makefile | 2 --
16
5 files changed, 6 deletions(-)
17
delete mode 100644 pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
18
delete mode 100644 pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
19
20
diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml
21
index XXXXXXX..XXXXXXX 100644
22
--- a/.gitlab-ci.d/opensbi.yml
23
+++ b/.gitlab-ci.d/opensbi.yml
24
@@ -XXX,XX +XXX,XX @@ build-opensbi:
25
artifacts:
26
paths: # 'artifacts.zip' will contains the following files:
27
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
28
- - pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
29
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
30
- - pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
31
- opensbi32-generic-stdout.log
32
- opensbi32-generic-stderr.log
33
- opensbi64-generic-stdout.log
34
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
35
index XXXXXXX..XXXXXXX 100644
36
--- a/pc-bios/meson.build
37
+++ b/pc-bios/meson.build
38
@@ -XXX,XX +XXX,XX @@ blobs = files(
39
'hppa-firmware.img',
40
'opensbi-riscv32-generic-fw_dynamic.bin',
41
'opensbi-riscv64-generic-fw_dynamic.bin',
42
- 'opensbi-riscv32-generic-fw_dynamic.elf',
43
- 'opensbi-riscv64-generic-fw_dynamic.elf',
44
'npcm7xx_bootrom.bin',
45
)
46
47
diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
48
deleted file mode 100644
49
index XXXXXXX..XXXXXXX
50
Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.elf and /dev/null differ
51
diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf b/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
52
deleted file mode 100644
53
index XXXXXXX..XXXXXXX
54
Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.elf and /dev/null differ
55
diff --git a/roms/Makefile b/roms/Makefile
56
index XXXXXXX..XXXXXXX 100644
57
--- a/roms/Makefile
58
+++ b/roms/Makefile
59
@@ -XXX,XX +XXX,XX @@ opensbi32-generic:
60
        CROSS_COMPILE=$(riscv32_cross_prefix) \
61
        PLATFORM="generic"
62
    cp opensbi/build/platform/generic/firmware/fw_dynamic.bin ../pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
63
-    cp opensbi/build/platform/generic/firmware/fw_dynamic.elf ../pc-bios/opensbi-riscv32-generic-fw_dynamic.elf
64
65
opensbi64-generic:
66
    $(MAKE) -C opensbi \
67
        CROSS_COMPILE=$(riscv64_cross_prefix) \
68
        PLATFORM="generic"
69
    cp opensbi/build/platform/generic/firmware/fw_dynamic.bin ../pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
70
-    cp opensbi/build/platform/generic/firmware/fw_dynamic.elf ../pc-bios/opensbi-riscv64-generic-fw_dynamic.elf
71
72
MESON = meson
73
NINJA = ninja
74
--
75
2.31.1
76
77
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
When pc is written, it is sign-extended to fill the widest supported XLEN.
3
riscv_trigger_init() had been called on reset events that can happen
4
several times for a CPU and it allocated timers for itrigger. If old
5
timers were present, they were simply overwritten by the new timers,
6
resulting in a memory leak.
4
7
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
8
Divide riscv_trigger_init() into two functions, namely
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
riscv_trigger_realize() and riscv_trigger_reset() and call them in
10
appropriate timing. The timer allocation will happen only once for a
11
CPU in riscv_trigger_realize().
12
13
Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
14
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20220120122050.41546-5-zhiwei_liu@c-sky.com
18
Message-ID: <20230818034059.9146-1-akihiko.odaki@daynix.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
20
---
11
target/riscv/translate.c | 25 ++++++++++++++++---
21
target/riscv/debug.h | 3 ++-
12
.../riscv/insn_trans/trans_privileged.c.inc | 2 +-
22
target/riscv/cpu.c | 8 +++++++-
13
target/riscv/insn_trans/trans_rvi.c.inc | 5 ++--
23
target/riscv/debug.c | 15 ++++++++++++---
14
target/riscv/insn_trans/trans_rvv.c.inc | 4 +--
24
3 files changed, 21 insertions(+), 5 deletions(-)
15
4 files changed, 27 insertions(+), 9 deletions(-)
16
25
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
26
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
18
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
28
--- a/target/riscv/debug.h
20
+++ b/target/riscv/translate.c
29
+++ b/target/riscv/debug.h
21
@@ -XXX,XX +XXX,XX @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
30
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
22
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
31
bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
32
bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
33
34
-void riscv_trigger_init(CPURISCVState *env);
35
+void riscv_trigger_realize(CPURISCVState *env);
36
+void riscv_trigger_reset_hold(CPURISCVState *env);
37
38
bool riscv_itrigger_enabled(CPURISCVState *env);
39
void riscv_itrigger_update_priv(CPURISCVState *env);
40
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/riscv/cpu.c
43
+++ b/target/riscv/cpu.c
44
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
45
46
#ifndef CONFIG_USER_ONLY
47
if (cpu->cfg.debug) {
48
- riscv_trigger_init(env);
49
+ riscv_trigger_reset_hold(env);
50
}
51
52
if (kvm_enabled()) {
53
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
54
55
riscv_cpu_register_gdb_regs_for_features(cs);
56
57
+#ifndef CONFIG_USER_ONLY
58
+ if (cpu->cfg.debug) {
59
+ riscv_trigger_realize(&cpu->env);
60
+ }
61
+#endif
62
+
63
qemu_init_vcpu(cs);
64
cpu_reset(cs);
65
66
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/debug.c
69
+++ b/target/riscv/debug.c
70
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
71
return false;
23
}
72
}
24
73
25
+static void gen_set_pc_imm(DisasContext *ctx, target_ulong dest)
74
-void riscv_trigger_init(CPURISCVState *env)
75
+void riscv_trigger_realize(CPURISCVState *env)
26
+{
76
+{
27
+ if (get_xl(ctx) == MXL_RV32) {
77
+ int i;
28
+ dest = (int32_t)dest;
29
+ }
30
+ tcg_gen_movi_tl(cpu_pc, dest);
31
+}
32
+
78
+
33
+static void gen_set_pc(DisasContext *ctx, TCGv dest)
79
+ for (i = 0; i < RV_MAX_TRIGGERS; i++) {
34
+{
80
+ env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
35
+ if (get_xl(ctx) == MXL_RV32) {
81
+ riscv_itrigger_timer_cb, env);
36
+ tcg_gen_ext32s_tl(cpu_pc, dest);
37
+ } else {
38
+ tcg_gen_mov_tl(cpu_pc, dest);
39
+ }
82
+ }
40
+}
83
+}
41
+
84
+
42
static void generate_exception(DisasContext *ctx, int excp)
85
+void riscv_trigger_reset_hold(CPURISCVState *env)
43
{
86
{
44
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
87
target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
45
+ gen_set_pc_imm(ctx, ctx->base.pc_next);
88
int i;
46
gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
89
@@ -XXX,XX +XXX,XX @@ void riscv_trigger_init(CPURISCVState *env)
47
ctx->base.is_jmp = DISAS_NORETURN;
90
env->tdata3[i] = 0;
48
}
91
env->cpu_breakpoint[i] = NULL;
49
92
env->cpu_watchpoint[i] = NULL;
50
static void generate_exception_mtval(DisasContext *ctx, int excp)
93
- env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
51
{
94
- riscv_itrigger_timer_cb, env);
52
- tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
95
+ timer_del(env->itrigger_timer[i]);
53
+ gen_set_pc_imm(ctx, ctx->base.pc_next);
54
tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
55
gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
56
ctx->base.is_jmp = DISAS_NORETURN;
57
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
58
{
59
if (translator_use_goto_tb(&ctx->base, dest)) {
60
tcg_gen_goto_tb(n);
61
- tcg_gen_movi_tl(cpu_pc, dest);
62
+ gen_set_pc_imm(ctx, dest);
63
tcg_gen_exit_tb(ctx->base.tb, n);
64
} else {
65
- tcg_gen_movi_tl(cpu_pc, dest);
66
+ gen_set_pc_imm(ctx, dest);
67
tcg_gen_lookup_and_goto_ptr();
68
}
96
}
69
}
97
}
70
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
71
index XXXXXXX..XXXXXXX 100644
72
--- a/target/riscv/insn_trans/trans_privileged.c.inc
73
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
74
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
75
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
76
{
77
#ifndef CONFIG_USER_ONLY
78
- tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
79
+ gen_set_pc_imm(ctx, ctx->pc_succ_insn);
80
gen_helper_wfi(cpu_env);
81
return true;
82
#else
83
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/riscv/insn_trans/trans_rvi.c.inc
86
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
87
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
88
tcg_gen_addi_tl(cpu_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
89
tcg_gen_andi_tl(cpu_pc, cpu_pc, (target_ulong)-2);
90
91
+ gen_set_pc(ctx, cpu_pc);
92
if (!has_ext(ctx, RVC)) {
93
TCGv t0 = tcg_temp_new();
94
95
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
96
* FENCE_I is a no-op in QEMU,
97
* however we need to end the translation block
98
*/
99
- tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
100
+ gen_set_pc_imm(ctx, ctx->pc_succ_insn);
101
tcg_gen_exit_tb(NULL, 0);
102
ctx->base.is_jmp = DISAS_NORETURN;
103
return true;
104
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
105
static bool do_csr_post(DisasContext *ctx)
106
{
107
/* We may have changed important cpu state -- exit to main loop. */
108
- tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
109
+ gen_set_pc_imm(ctx, ctx->pc_succ_insn);
110
tcg_gen_exit_tb(NULL, 0);
111
ctx->base.is_jmp = DISAS_NORETURN;
112
return true;
113
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/riscv/insn_trans/trans_rvv.c.inc
116
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
118
gen_set_gpr(s, rd, dst);
119
mark_vs_dirty(s);
120
121
- tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
122
+ gen_set_pc_imm(s, s->pc_succ_insn);
123
tcg_gen_lookup_and_goto_ptr();
124
s->base.is_jmp = DISAS_NORETURN;
125
126
@@ -XXX,XX +XXX,XX @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
127
gen_helper_vsetvl(dst, cpu_env, s1, s2);
128
gen_set_gpr(s, rd, dst);
129
mark_vs_dirty(s);
130
- tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
131
+ gen_set_pc_imm(s, s->pc_succ_insn);
132
tcg_gen_lookup_and_goto_ptr();
133
s->base.is_jmp = DISAS_NORETURN;
134
135
--
98
--
136
2.31.1
99
2.41.0
137
100
138
101
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Leon Schuermann <leons@opentitan.org>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
When the rule-lock bypass (RLB) bit is set in the mseccfg CSR, the PMP
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
configuration lock bits must not apply. While this behavior is
5
implemented for the pmpcfgX CSRs, this bit is not respected for
6
changes to the pmpaddrX CSRs. This patch ensures that pmpaddrX CSR
7
writes work even on locked regions when the global rule-lock bypass is
8
enabled.
9
10
Signed-off-by: Leon Schuermann <leons@opentitan.org>
11
Reviewed-by: Mayuresh Chitale <mchitale@ventanamicro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20220120122050.41546-2-zhiwei_liu@c-sky.com
13
Message-ID: <20230829215046.1430463-1-leon@is.currently.online>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
15
---
9
target/riscv/csr.c | 19 +++++++++++++++++++
16
target/riscv/pmp.c | 4 ++++
10
target/riscv/pmp.c | 12 ++++--------
17
1 file changed, 4 insertions(+)
11
2 files changed, 23 insertions(+), 8 deletions(-)
12
18
13
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/csr.c
16
+++ b/target/riscv/csr.c
17
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
18
return RISCV_EXCP_NONE;
19
}
20
21
+static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
22
+{
23
+ /* TODO: RV128 restriction check */
24
+ if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
25
+ return false;
26
+ }
27
+ return true;
28
+}
29
+
30
static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
31
target_ulong *val)
32
{
33
+ uint32_t reg_index = csrno - CSR_PMPCFG0;
34
+
35
+ if (!check_pmp_reg_index(env, reg_index)) {
36
+ return RISCV_EXCP_ILLEGAL_INST;
37
+ }
38
*val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
39
return RISCV_EXCP_NONE;
40
}
41
@@ -XXX,XX +XXX,XX @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
42
static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
43
target_ulong val)
44
{
45
+ uint32_t reg_index = csrno - CSR_PMPCFG0;
46
+
47
+ if (!check_pmp_reg_index(env, reg_index)) {
48
+ return RISCV_EXCP_ILLEGAL_INST;
49
+ }
50
pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
51
return RISCV_EXCP_NONE;
52
}
53
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
19
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
54
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
55
--- a/target/riscv/pmp.c
21
--- a/target/riscv/pmp.c
56
+++ b/target/riscv/pmp.c
22
+++ b/target/riscv/pmp.c
57
@@ -XXX,XX +XXX,XX @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
23
@@ -XXX,XX +XXX,XX @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
24
*/
25
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
58
{
26
{
59
int i;
27
+ /* mseccfg.RLB is set */
60
uint8_t cfg_val;
28
+ if (MSECCFG_RLB_ISSET(env)) {
61
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
29
+ return 0;
62
30
+ }
63
trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
31
64
32
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
65
- if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
33
return 1;
66
- qemu_log_mask(LOG_GUEST_ERROR,
67
- "ignoring pmpcfg write - incorrect address\n");
68
- return;
69
- }
70
-
71
- for (i = 0; i < sizeof(target_ulong); i++) {
72
+ for (i = 0; i < pmpcfg_nums; i++) {
73
cfg_val = (val >> 8 * i) & 0xff;
74
pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
75
}
76
@@ -XXX,XX +XXX,XX @@ target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
77
int i;
78
target_ulong cfg_val = 0;
79
target_ulong val = 0;
80
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
81
82
- for (i = 0; i < sizeof(target_ulong); i++) {
83
+ for (i = 0; i < pmpcfg_nums; i++) {
84
val = pmp_read_cfg(env, (reg_index * 4) + i);
85
cfg_val |= (val << (i * 8));
86
}
87
--
34
--
88
2.31.1
35
2.41.0
89
90
diff view generated by jsdifflib
Deleted patch
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
1
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20220120122050.41546-10-zhiwei_liu@c-sky.com
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/csr.c | 3 +++
10
1 file changed, 3 insertions(+)
11
12
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/csr.c
15
+++ b/target/riscv/csr.c
16
@@ -XXX,XX +XXX,XX @@ static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
17
int csr_priv = get_field(csrno, 0x300);
18
int pm_current;
19
20
+ if (env->debugger) {
21
+ return false;
22
+ }
23
/*
24
* If priv lvls differ that means we're accessing csr from higher priv lvl,
25
* so allow the access
26
--
27
2.31.1
28
29
diff view generated by jsdifflib
Deleted patch
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
1
3
Write mask is representing the bits we care about.
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-id: 20220120122050.41546-11-zhiwei_liu@c-sky.com
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/op_helper.c | 3 ++-
12
target/riscv/insn_trans/trans_rvi.c.inc | 12 ++++++++----
13
2 files changed, 10 insertions(+), 5 deletions(-)
14
15
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/op_helper.c
18
+++ b/target/riscv/op_helper.c
19
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
20
21
void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
22
{
23
- RISCVException ret = riscv_csrrw(env, csr, NULL, src, -1);
24
+ target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
25
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask);
26
27
if (ret != RISCV_EXCP_NONE) {
28
riscv_raise_exception(env, ret, GETPC());
29
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/insn_trans/trans_rvi.c.inc
32
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
33
@@ -XXX,XX +XXX,XX @@ static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
34
35
static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
36
{
37
- if (get_xl(ctx) < MXL_RV128) {
38
+ RISCVMXL xl = get_xl(ctx);
39
+ if (xl < MXL_RV128) {
40
TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
41
42
/*
43
@@ -XXX,XX +XXX,XX @@ static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
44
return do_csrw(ctx, a->csr, src);
45
}
46
47
- TCGv mask = tcg_constant_tl(-1);
48
+ TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
49
+ (target_ulong)-1);
50
return do_csrrw(ctx, a->rd, a->csr, src, mask);
51
} else {
52
TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
53
@@ -XXX,XX +XXX,XX @@ static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
54
55
static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
56
{
57
- if (get_xl(ctx) < MXL_RV128) {
58
+ RISCVMXL xl = get_xl(ctx);
59
+ if (xl < MXL_RV128) {
60
TCGv src = tcg_constant_tl(a->rs1);
61
62
/*
63
@@ -XXX,XX +XXX,XX @@ static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
64
return do_csrw(ctx, a->csr, src);
65
}
66
67
- TCGv mask = tcg_constant_tl(-1);
68
+ TCGv mask = tcg_constant_tl(xl == MXL_RV32 ? UINT32_MAX :
69
+ (target_ulong)-1);
70
return do_csrrw(ctx, a->rd, a->csr, src, mask);
71
} else {
72
TCGv src = tcg_constant_tl(a->rs1);
73
--
74
2.31.1
75
76
diff view generated by jsdifflib
Deleted patch
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
2
1
3
Replace the array of pm_mask/pm_base with scalar variables.
4
Remove the cached array value in DisasContext.
5
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-id: 20220120122050.41546-13-zhiwei_liu@c-sky.com
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/translate.c | 32 ++++++++------------------------
13
1 file changed, 8 insertions(+), 24 deletions(-)
14
15
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/translate.c
18
+++ b/target/riscv/translate.c
19
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
20
static TCGv load_res;
21
static TCGv load_val;
22
/* globals for PM CSRs */
23
-static TCGv pm_mask[4];
24
-static TCGv pm_base[4];
25
+static TCGv pm_mask;
26
+static TCGv pm_base;
27
28
#include "exec/gen-icount.h"
29
30
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
31
TCGv temp[4];
32
/* PointerMasking extension */
33
bool pm_enabled;
34
- TCGv pm_mask;
35
- TCGv pm_base;
36
} DisasContext;
37
38
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
39
@@ -XXX,XX +XXX,XX @@ static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src)
40
return src;
41
} else {
42
temp = temp_new(s);
43
- tcg_gen_andc_tl(temp, src, s->pm_mask);
44
- tcg_gen_or_tl(temp, temp, s->pm_base);
45
+ tcg_gen_andc_tl(temp, src, pm_mask);
46
+ tcg_gen_or_tl(temp, temp, pm_base);
47
return temp;
48
}
49
}
50
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
51
ctx->ntemp = 0;
52
memset(ctx->temp, 0, sizeof(ctx->temp));
53
ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED);
54
- int priv = tb_flags & TB_FLAGS_PRIV_MMU_MASK;
55
- ctx->pm_mask = pm_mask[priv];
56
- ctx->pm_base = pm_base[priv];
57
-
58
ctx->zero = tcg_constant_tl(0);
59
}
60
61
@@ -XXX,XX +XXX,XX @@ void riscv_translate_init(void)
62
"load_res");
63
load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val),
64
"load_val");
65
-#ifndef CONFIG_USER_ONLY
66
/* Assign PM CSRs to tcg globals */
67
- pm_mask[PRV_U] =
68
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmmask), "upmmask");
69
- pm_base[PRV_U] =
70
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmbase), "upmbase");
71
- pm_mask[PRV_S] =
72
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmmask), "spmmask");
73
- pm_base[PRV_S] =
74
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmbase), "spmbase");
75
- pm_mask[PRV_M] =
76
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmmask), "mpmmask");
77
- pm_base[PRV_M] =
78
- tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmbase), "mpmbase");
79
-#endif
80
+ pm_mask = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmmask),
81
+ "pmmask");
82
+ pm_base = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, cur_pmbase),
83
+ "pmbase");
84
}
85
--
86
2.31.1
87
88
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Tommy Wu <tommy.wu@sifive.com>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
According to the new spec, when vsiselect has a reserved value, attempts
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
from M-mode or HS-mode to access vsireg, or from VS-mode to access
5
Message-id: 20220120122050.41546-24-zhiwei_liu@c-sky.com
5
sireg, should preferably raise an illegal instruction exception.
6
7
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
8
Reviewed-by: Frank Chang <frank.chang@sifive.com>
9
Message-ID: <20230816061647.600672-1-tommy.wu@sifive.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
11
---
8
target/riscv/csr.c | 8 ++++----
12
target/riscv/csr.c | 7 +++++--
9
1 file changed, 4 insertions(+), 4 deletions(-)
13
1 file changed, 5 insertions(+), 2 deletions(-)
10
14
11
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
15
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/csr.c
17
--- a/target/riscv/csr.c
14
+++ b/target/riscv/csr.c
18
+++ b/target/riscv/csr.c
15
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
19
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
16
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
20
static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
17
MSTATUS_TW | MSTATUS_VS;
21
target_ulong new_val, target_ulong wr_mask)
18
19
- if (xl != MXL_RV32) {
20
+ if (xl != MXL_RV32 || env->debugger) {
21
/*
22
* RV32: MPV and GVA are not in mstatus. The current plan is to
23
* add them to mstatush. For now, we just don't support it.
24
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
25
{
22
{
26
uint64_t mask = sstatus_v1_10_mask;
23
- bool virt;
27
uint64_t sstatus = env->mstatus & mask;
24
+ bool virt, isel_reserved;
28
- if (env->xl != MXL_RV32) {
25
uint8_t *iprio;
29
+ if (env->xl != MXL_RV32 || env->debugger) {
26
int ret = -EINVAL;
30
mask |= SSTATUS64_UXL;
27
target_ulong priv, isel, vgein;
28
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
29
30
/* Decode register details from CSR number */
31
virt = false;
32
+ isel_reserved = false;
33
switch (csrno) {
34
case CSR_MIREG:
35
iprio = env->miprio;
36
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
37
riscv_cpu_mxl_bits(env)),
38
val, new_val, wr_mask);
39
}
40
+ } else {
41
+ isel_reserved = true;
31
}
42
}
32
43
33
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
44
done:
34
target_ulong *val)
45
if (ret) {
35
{
46
- return (env->virt_enabled && virt) ?
36
target_ulong mask = (sstatus_v1_10_mask);
47
+ return (env->virt_enabled && virt && !isel_reserved) ?
37
- if (env->xl != MXL_RV32) {
48
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
38
+ if (env->xl != MXL_RV32 || env->debugger) {
39
mask |= SSTATUS64_UXL;
40
}
49
}
41
/* TODO: Use SXL not MXL. */
50
return RISCV_EXCP_NONE;
42
@@ -XXX,XX +XXX,XX @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
43
{
44
target_ulong mask = (sstatus_v1_10_mask);
45
46
- if (env->xl != MXL_RV32) {
47
+ if (env->xl != MXL_RV32 || env->debugger) {
48
if ((val & SSTATUS64_UXL) != 0) {
49
mask |= SSTATUS64_UXL;
50
}
51
--
51
--
52
2.31.1
52
2.41.0
53
54
diff view generated by jsdifflib
1
From: LIU Zhiwei <zhiwei_liu@c-sky.com>
1
From: Nikita Shubin <n.shubin@yadro.com>
2
2
3
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
3
As per ISA:
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
5
"For CSRRWI, if rd=x0, then the instruction shall not read the CSR and
6
shall not cause any of the side effects that might occur on a CSR read."
7
8
trans_csrrwi() and trans_csrrw() call do_csrw() if rd=x0, do_csrw() calls
9
riscv_csrrw_do64(), via helper_csrw() passing NULL as *ret_value.
10
11
Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-id: 20220120122050.41546-23-zhiwei_liu@c-sky.com
13
Message-ID: <20230808090914.17634-1-nikita.shubin@maquefel.me>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
15
---
9
target/riscv/cpu_bits.h | 3 +++
16
target/riscv/csr.c | 24 +++++++++++++++---------
10
target/riscv/csr.c | 28 ++++++++++++++++++++++------
17
1 file changed, 15 insertions(+), 9 deletions(-)
11
2 files changed, 25 insertions(+), 6 deletions(-)
12
18
13
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu_bits.h
16
+++ b/target/riscv/cpu_bits.h
17
@@ -XXX,XX +XXX,XX @@ typedef enum {
18
#define COUNTEREN_IR (1 << 2)
19
#define COUNTEREN_HPM3 (1 << 3)
20
21
+/* vsstatus CSR bits */
22
+#define VSSTATUS64_UXL 0x0000000300000000ULL
23
+
24
/* Privilege modes */
25
#define PRV_U 0
26
#define PRV_S 1
27
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
19
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
28
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
29
--- a/target/riscv/csr.c
21
--- a/target/riscv/csr.c
30
+++ b/target/riscv/csr.c
22
+++ b/target/riscv/csr.c
31
@@ -XXX,XX +XXX,XX @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
23
@@ -XXX,XX +XXX,XX @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
32
(1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
24
target_ulong write_mask)
33
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
34
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
35
- SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS | (target_ulong)SSTATUS64_UXL;
36
+ SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
37
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
38
static const target_ulong hip_writable_mask = MIP_VSSIP;
39
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
40
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
41
{
25
{
42
uint64_t mstatus = env->mstatus;
26
RISCVException ret;
43
uint64_t mask = 0;
27
- target_ulong old_value;
44
+ RISCVMXL xl = riscv_cpu_mxl(env);
28
+ target_ulong old_value = 0;
45
29
46
/* flush tlb on mstatus fields that affect VM */
30
/* execute combined read/write operation if it exists */
47
if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
31
if (csr_ops[csrno].op) {
48
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
32
return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
49
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
33
}
50
MSTATUS_TW | MSTATUS_VS;
34
51
35
- /* if no accessor exists then return failure */
52
- if (riscv_cpu_mxl(env) != MXL_RV32) {
36
- if (!csr_ops[csrno].read) {
53
+ if (xl != MXL_RV32) {
37
- return RISCV_EXCP_ILLEGAL_INST;
54
/*
38
- }
55
* RV32: MPV and GVA are not in mstatus. The current plan is to
39
- /* read old value */
56
* add them to mstatush. For now, we just don't support it.
40
- ret = csr_ops[csrno].read(env, csrno, &old_value);
57
*/
41
- if (ret != RISCV_EXCP_NONE) {
58
mask |= MSTATUS_MPV | MSTATUS_GVA;
42
- return ret;
59
+ if ((val & MSTATUS64_UXL) != 0) {
43
+ /*
60
+ mask |= MSTATUS64_UXL;
44
+ * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
45
+ * and we can't throw side effects caused by CSR reads.
46
+ */
47
+ if (ret_value) {
48
+ /* if no accessor exists then return failure */
49
+ if (!csr_ops[csrno].read) {
50
+ return RISCV_EXCP_ILLEGAL_INST;
51
+ }
52
+ /* read old value */
53
+ ret = csr_ops[csrno].read(env, csrno, &old_value);
54
+ if (ret != RISCV_EXCP_NONE) {
55
+ return ret;
61
+ }
56
+ }
62
}
57
}
63
58
64
mstatus = (mstatus & ~mask) | (val & mask);
59
/* write value if writable and write mask set, otherwise drop writes */
65
66
- RISCVMXL xl = riscv_cpu_mxl(env);
67
if (xl > MXL_RV32) {
68
- /* SXL and UXL fields are for now read only */
69
+ /* SXL field is for now read only */
70
mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
71
- mstatus = set_field(mstatus, MSTATUS64_UXL, xl);
72
}
73
env->mstatus = mstatus;
74
env->xl = cpu_recompute_xl(env);
75
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
76
{
77
uint64_t mask = sstatus_v1_10_mask;
78
uint64_t sstatus = env->mstatus & mask;
79
+ if (env->xl != MXL_RV32) {
80
+ mask |= SSTATUS64_UXL;
81
+ }
82
83
*val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
84
return RISCV_EXCP_NONE;
85
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
86
target_ulong *val)
87
{
88
target_ulong mask = (sstatus_v1_10_mask);
89
-
90
+ if (env->xl != MXL_RV32) {
91
+ mask |= SSTATUS64_UXL;
92
+ }
93
/* TODO: Use SXL not MXL. */
94
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
95
return RISCV_EXCP_NONE;
96
@@ -XXX,XX +XXX,XX @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
97
target_ulong val)
98
{
99
target_ulong mask = (sstatus_v1_10_mask);
100
+
101
+ if (env->xl != MXL_RV32) {
102
+ if ((val & SSTATUS64_UXL) != 0) {
103
+ mask |= SSTATUS64_UXL;
104
+ }
105
+ }
106
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
107
return write_mstatus(env, CSR_MSTATUS, newval);
108
}
109
@@ -XXX,XX +XXX,XX @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
110
target_ulong val)
111
{
112
uint64_t mask = (target_ulong)-1;
113
+ if ((val & VSSTATUS64_UXL) == 0) {
114
+ mask &= ~VSSTATUS64_UXL;
115
+ }
116
env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
117
return RISCV_EXCP_NONE;
118
}
119
--
60
--
120
2.31.1
61
2.41.0
121
122
diff view generated by jsdifflib