1
From: Alistair Francis <alistair.francis@wdc.com>
1
The following changes since commit c5ea91da443b458352c1b629b490ee6631775cb4:
2
2
3
The following changes since commit d495e432c04a6394126c35cf96517749708b410f:
3
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging (2023-09-08 10:06:25 -0400)
4
5
Merge tag 'pull-aspeed-20220630' of https://github.com/legoater/qemu into staging (2022-06-30 22:04:12 +0530)
6
4
7
are available in the Git repository at:
5
are available in the Git repository at:
8
6
9
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20220703-1
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20230911
10
8
11
for you to fetch changes up to 435774992e82d2d16f025afbb20b4f7be9b242b0:
9
for you to fetch changes up to e7a03409f29e2da59297d55afbaec98c96e43e3a:
12
10
13
target/riscv: Update default priority table for local interrupts (2022-07-03 10:03:20 +1000)
11
target/riscv: don't read CSR in riscv_csrrw_do64 (2023-09-11 11:45:55 +1000)
14
12
15
----------------------------------------------------------------
13
----------------------------------------------------------------
16
Fifth RISC-V PR for QEMU 7.1
14
First RISC-V PR for 8.2
17
15
18
* Fix register zero guarding for auipc and lui
16
* Remove 'host' CPU from TCG
19
* Ensure bins (mtval) is set correctly
17
* riscv_htif Fixup printing on big endian hosts
20
* Minimize the calls to decode_save_opc
18
* Add zmmul isa string
21
* Guard against PMP ranges with a negative size
19
* Add smepmp isa string
22
* Implement mcountinhibit CSR
20
* Fix page_check_range use in fault-only-first
23
* Add support for hpmcounters/hpmevents
21
* Use existing lookup tables for MixColumns
24
* Improve PMU implenentation
22
* Add RISC-V vector cryptographic instruction set support
25
* Support mcycle/minstret write operation
23
* Implement WARL behaviour for mcountinhibit/mcounteren
26
* Fixup MSECCFG minimum priv check
24
* Add Zihintntl extension ISA string to DTS
27
* Ibex (OpenTitan) fixup priv version
25
* Fix zfa fleq.d and fltq.d
28
* Fix bug resulting in always using latest priv spec
26
* Fix upper/lower mtime write calculation
29
* Reduce FDT address alignment constraints
27
* Make rtc variable names consistent
30
* Set minumum priv spec version for mcountinhibit
28
* Use abi type for linux-user target_ucontext
31
* AIA update to v0.3 of the spec
29
* Add RISC-V KVM AIA Support
30
* Fix riscv,pmu DT node path in the virt machine
31
* Update CSR bits name for svadu extension
32
* Mark zicond non-experimental
33
* Fix satp_mode_finalize() when satp_mode.supported = 0
34
* Fix non-KVM --enable-debug build
35
* Add new extensions to hwprobe
36
* Use accelerated helper for AES64KS1I
37
* Allocate itrigger timers only once
38
* Respect mseccfg.RLB for pmpaddrX changes
39
* Align the AIA model to v1.0 ratified spec
40
* Don't read the CSR in riscv_csrrw_do64
32
41
33
----------------------------------------------------------------
42
----------------------------------------------------------------
34
Alistair Francis (3):
43
Akihiko Odaki (1):
35
target/riscv: Fixup MSECCFG minimum priv check
44
target/riscv: Allocate itrigger timers only once
36
target/riscv: Ibex: Support priv version 1.11
37
hw/riscv: boot: Reduce FDT address alignment constraints
38
45
39
Anup Patel (4):
46
Ard Biesheuvel (2):
40
target/riscv: Don't force update priv spec version to latest
47
target/riscv: Use existing lookup tables for MixColumns
41
target/riscv: Set minumum priv spec version for mcountinhibit
48
target/riscv: Use accelerated helper for AES64KS1I
42
target/riscv: Remove CSRs that set/clear an IMSIC interrupt file bits
43
target/riscv: Update default priority table for local interrupts
44
49
45
Atish Patra (7):
50
Conor Dooley (1):
46
target/riscv: Fix PMU CSR predicate function
51
hw/riscv: virt: Fix riscv,pmu DT node path
47
target/riscv: Implement PMU CSR predicate function for S-mode
48
target/riscv: pmu: Rename the counters extension to pmu
49
target/riscv: pmu: Make number of counters configurable
50
target/riscv: Implement mcountinhibit CSR
51
target/riscv: Add support for hpmcounters/hpmevents
52
target/riscv: Support mcycle/minstret write operation
53
52
54
Nicolas Pitre (1):
53
Daniel Henrique Barboza (6):
55
target/riscv/pmp: guard against PMP ranges with a negative size
54
target/riscv/cpu.c: do not run 'host' CPU with TCG
55
target/riscv/cpu.c: add zmmul isa string
56
target/riscv/cpu.c: add smepmp isa string
57
target/riscv: fix satp_mode_finalize() when satp_mode.supported = 0
58
hw/riscv/virt.c: fix non-KVM --enable-debug build
59
hw/intc/riscv_aplic.c fix non-KVM --enable-debug build
56
60
57
Richard Henderson (3):
61
Dickon Hood (2):
58
target/riscv: Set env->bins in gen_exception_illegal
62
target/riscv: Refactor translation of vector-widening instruction
59
target/riscv: Remove generate_exception_mtval
63
target/riscv: Add Zvbb ISA extension support
60
target/riscv: Minimize the calls to decode_save_opc
61
64
62
Víctor Colombo (1):
65
Jason Chien (3):
63
target/riscv: Remove condition guarding register zero for auipc and lui
66
target/riscv: Add Zihintntl extension ISA string to DTS
67
hw/intc: Fix upper/lower mtime write calculation
68
hw/intc: Make rtc variable names consistent
64
69
65
target/riscv/cpu.h | 24 +-
70
Kiran Ostrolenk (4):
66
target/riscv/cpu_bits.h | 30 +-
71
target/riscv: Refactor some of the generic vector functionality
67
target/riscv/pmu.h | 28 +
72
target/riscv: Refactor vector-vector translation macro
68
hw/riscv/boot.c | 4 +-
73
target/riscv: Refactor some of the generic vector functionality
69
target/riscv/cpu.c | 17 +-
74
target/riscv: Add Zvknh ISA extension support
70
target/riscv/cpu_helper.c | 134 ++--
75
71
target/riscv/csr.c | 857 +++++++++++++++----------
76
LIU Zhiwei (3):
72
target/riscv/machine.c | 25 +
77
target/riscv: Fix page_check_range use in fault-only-first
73
target/riscv/pmp.c | 3 +
78
target/riscv: Fix zfa fleq.d and fltq.d
74
target/riscv/pmu.c | 32 +
79
linux-user/riscv: Use abi type for target_ucontext
75
target/riscv/translate.c | 31 +-
80
76
target/riscv/insn_trans/trans_privileged.c.inc | 4 +
81
Lawrence Hunter (2):
77
target/riscv/insn_trans/trans_rvh.c.inc | 2 +
82
target/riscv: Add Zvbc ISA extension support
78
target/riscv/insn_trans/trans_rvi.c.inc | 10 +-
83
target/riscv: Add Zvksh ISA extension support
79
target/riscv/meson.build | 3 +-
84
80
tests/tcg/riscv64/Makefile.softmmu-target | 21 +
85
Leon Schuermann (1):
81
tests/tcg/riscv64/issue1060.S | 53 ++
86
target/riscv/pmp.c: respect mseccfg.RLB for pmpaddrX changes
82
tests/tcg/riscv64/semihost.ld | 21 +
87
83
18 files changed, 843 insertions(+), 456 deletions(-)
88
Max Chou (3):
84
create mode 100644 target/riscv/pmu.h
89
crypto: Create sm4_subword
85
create mode 100644 target/riscv/pmu.c
90
crypto: Add SM4 constant parameter CK
86
create mode 100644 tests/tcg/riscv64/Makefile.softmmu-target
91
target/riscv: Add Zvksed ISA extension support
87
create mode 100644 tests/tcg/riscv64/issue1060.S
92
88
create mode 100644 tests/tcg/riscv64/semihost.ld
93
Nazar Kazakov (4):
94
target/riscv: Remove redundant "cpu_vl == 0" checks
95
target/riscv: Move vector translation checks
96
target/riscv: Add Zvkned ISA extension support
97
target/riscv: Add Zvkg ISA extension support
98
99
Nikita Shubin (1):
100
target/riscv: don't read CSR in riscv_csrrw_do64
101
102
Rob Bradford (1):
103
target/riscv: Implement WARL behaviour for mcountinhibit/mcounteren
104
105
Robbin Ehn (1):
106
linux-user/riscv: Add new extensions to hwprobe
107
108
Thomas Huth (2):
109
hw/char/riscv_htif: Fix printing of console characters on big endian hosts
110
hw/char/riscv_htif: Fix the console syscall on big endian hosts
111
112
Tommy Wu (1):
113
target/riscv: Align the AIA model to v1.0 ratified spec
114
115
Vineet Gupta (1):
116
riscv: zicond: make non-experimental
117
118
Weiwei Li (1):
119
target/riscv: Update CSR bits name for svadu extension
120
121
Yong-Xuan Wang (5):
122
target/riscv: support the AIA device emulation with KVM enabled
123
target/riscv: check the in-kernel irqchip support
124
target/riscv: Create an KVM AIA irqchip
125
target/riscv: update APLIC and IMSIC to support KVM AIA
126
target/riscv: select KVM AIA in riscv virt machine
127
128
include/crypto/aes.h | 7 +
129
include/crypto/sm4.h | 9 +
130
target/riscv/cpu_bits.h | 8 +-
131
target/riscv/cpu_cfg.h | 9 +
132
target/riscv/debug.h | 3 +-
133
target/riscv/helper.h | 98 +++
134
target/riscv/kvm_riscv.h | 5 +
135
target/riscv/vector_internals.h | 228 +++++++
136
target/riscv/insn32.decode | 58 ++
137
crypto/aes.c | 4 +-
138
crypto/sm4.c | 10 +
139
hw/char/riscv_htif.c | 12 +-
140
hw/intc/riscv_aclint.c | 11 +-
141
hw/intc/riscv_aplic.c | 52 +-
142
hw/intc/riscv_imsic.c | 25 +-
143
hw/riscv/virt.c | 374 ++++++------
144
linux-user/riscv/signal.c | 4 +-
145
linux-user/syscall.c | 14 +-
146
target/arm/tcg/crypto_helper.c | 10 +-
147
target/riscv/cpu.c | 83 ++-
148
target/riscv/cpu_helper.c | 6 +-
149
target/riscv/crypto_helper.c | 51 +-
150
target/riscv/csr.c | 54 +-
151
target/riscv/debug.c | 15 +-
152
target/riscv/kvm.c | 201 ++++++-
153
target/riscv/pmp.c | 4 +
154
target/riscv/translate.c | 1 +
155
target/riscv/vcrypto_helper.c | 970 ++++++++++++++++++++++++++++++
156
target/riscv/vector_helper.c | 245 +-------
157
target/riscv/vector_internals.c | 81 +++
158
target/riscv/insn_trans/trans_rvv.c.inc | 171 +++---
159
target/riscv/insn_trans/trans_rvvk.c.inc | 606 +++++++++++++++++++
160
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 +-
161
target/riscv/meson.build | 4 +-
162
34 files changed, 2785 insertions(+), 652 deletions(-)
163
create mode 100644 target/riscv/vector_internals.h
164
create mode 100644 target/riscv/vcrypto_helper.c
165
create mode 100644 target/riscv/vector_internals.c
166
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
The riscv_cpu_realize() sets priv spec version to v1.12 when it is
3
The 'host' CPU is available in a CONFIG_KVM build and it's currently
4
when "env->priv_ver == 0" (i.e. default v1.10) because the enum
4
available for all accels, but is a KVM only CPU. This means that in a
5
value of priv spec v1.10 is zero.
5
RISC-V KVM capable host we can do things like this:
6
6
7
Due to above issue, the sifive_u machine will see priv spec v1.12
7
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
8
instead of priv spec v1.10.
8
qemu-system-riscv64: H extension requires priv spec 1.12.0
9
9
10
To fix this issue, we set latest priv spec version (i.e. v1.12)
10
This CPU does not have a priv spec because we don't filter its extensions
11
for base rv64/rv32 cpu and riscv_cpu_realize() will override priv
11
via priv spec. We shouldn't be reaching riscv_cpu_realize_tcg() at all
12
spec version only when "cpu->cfg.priv_spec != NULL".
12
with the 'host' CPU.
13
13
14
Fixes: 7100fe6c2441 ("target/riscv: Enable privileged spec version 1.12")
14
We don't have a way to filter the 'host' CPU out of the available CPU
15
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
15
options (-cpu help) if the build includes both KVM and TCG. What we can
16
Reviewed-by: Frank Chang <frank.chang@sifive.com>
16
do is to error out during riscv_cpu_realize_tcg() if the user chooses
17
the 'host' CPU with accel=tcg:
18
19
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
20
qemu-system-riscv64: 'host' CPU is not compatible with TCG acceleration
21
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
18
Reviewed-by: Atish Patra <atishp@rivosinc.com>
24
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
19
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
25
Message-Id: <20230721133411.474105-1-dbarboza@ventanamicro.com>
20
Message-Id: <20220611080107.391981-2-apatel@ventanamicro.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
26
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
---
27
---
23
target/riscv/cpu.c | 12 ++++++++----
28
target/riscv/cpu.c | 5 +++++
24
1 file changed, 8 insertions(+), 4 deletions(-)
29
1 file changed, 5 insertions(+)
25
30
26
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
31
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
27
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
28
--- a/target/riscv/cpu.c
33
--- a/target/riscv/cpu.c
29
+++ b/target/riscv/cpu.c
34
+++ b/target/riscv/cpu.c
30
@@ -XXX,XX +XXX,XX @@ static void rv64_base_cpu_init(Object *obj)
35
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
31
/* We set this in the realise function */
32
set_misa(env, MXL_RV64, 0);
33
register_cpu_props(DEVICE(obj));
34
+ /* Set latest version of privileged specification */
35
+ set_priv_version(env, PRIV_VERSION_1_12_0);
36
}
37
38
static void rv64_sifive_u_cpu_init(Object *obj)
39
@@ -XXX,XX +XXX,XX @@ static void rv128_base_cpu_init(Object *obj)
40
/* We set this in the realise function */
41
set_misa(env, MXL_RV128, 0);
42
register_cpu_props(DEVICE(obj));
43
+ /* Set latest version of privileged specification */
44
+ set_priv_version(env, PRIV_VERSION_1_12_0);
45
}
46
#else
47
static void rv32_base_cpu_init(Object *obj)
48
@@ -XXX,XX +XXX,XX @@ static void rv32_base_cpu_init(Object *obj)
49
/* We set this in the realise function */
50
set_misa(env, MXL_RV32, 0);
51
register_cpu_props(DEVICE(obj));
52
+ /* Set latest version of privileged specification */
53
+ set_priv_version(env, PRIV_VERSION_1_12_0);
54
}
55
56
static void rv32_sifive_u_cpu_init(Object *obj)
57
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
58
CPURISCVState *env = &cpu->env;
36
CPURISCVState *env = &cpu->env;
59
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
60
CPUClass *cc = CPU_CLASS(mcc);
61
- int priv_version = 0;
62
+ int priv_version = -1;
63
Error *local_err = NULL;
37
Error *local_err = NULL;
64
38
65
cpu_exec_realizefn(cs, &local_err);
39
+ if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) {
66
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
40
+ error_setg(errp, "'host' CPU is not compatible with TCG acceleration");
67
}
41
+ return;
68
}
42
+ }
69
43
+
70
- if (priv_version) {
44
riscv_cpu_validate_misa_mxl(cpu, &local_err);
71
+ if (priv_version >= PRIV_VERSION_1_10_0) {
45
if (local_err != NULL) {
72
set_priv_version(env, priv_version);
46
error_propagate(errp, local_err);
73
- } else if (!env->priv_ver) {
74
- set_priv_version(env, PRIV_VERSION_1_12_0);
75
}
76
77
if (cpu->cfg.mmu) {
78
--
47
--
79
2.36.1
48
2.41.0
49
50
diff view generated by jsdifflib
New patch
1
From: Thomas Huth <thuth@redhat.com>
1
2
3
The character that should be printed is stored in the 64 bit "payload"
4
variable. The code currently tries to print it by taking the address
5
of the variable and passing this pointer to qemu_chr_fe_write(). However,
6
this only works on little endian hosts where the least significant bits
7
are stored on the lowest address. To do this in a portable way, we have
8
to store the value in an uint8_t variable instead.
9
10
Fixes: 5033606780 ("RISC-V HTIF Console")
11
Signed-off-by: Thomas Huth <thuth@redhat.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Bin Meng <bmeng@tinylab.org>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Message-Id: <20230721094720.902454-2-thuth@redhat.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
19
hw/char/riscv_htif.c | 3 ++-
20
1 file changed, 2 insertions(+), 1 deletion(-)
21
22
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/hw/char/riscv_htif.c
25
+++ b/hw/char/riscv_htif.c
26
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
27
s->tohost = 0; /* clear to indicate we read */
28
return;
29
} else if (cmd == HTIF_CONSOLE_CMD_PUTC) {
30
- qemu_chr_fe_write(&s->chr, (uint8_t *)&payload, 1);
31
+ uint8_t ch = (uint8_t)payload;
32
+ qemu_chr_fe_write(&s->chr, &ch, 1);
33
resp = 0x100 | (uint8_t)payload;
34
} else {
35
qemu_log("HTIF device %d: unknown command\n", device);
36
--
37
2.41.0
38
39
diff view generated by jsdifflib
New patch
1
From: Thomas Huth <thuth@redhat.com>
1
2
3
Values that have been read via cpu_physical_memory_read() from the
4
guest's memory have to be swapped in case the host endianess differs
5
from the guest.
6
7
Fixes: a6e13e31d5 ("riscv_htif: Support console output via proxy syscall")
8
Signed-off-by: Thomas Huth <thuth@redhat.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Reviewed-by: Bin Meng <bmeng@tinylab.org>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-Id: <20230721094720.902454-3-thuth@redhat.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
hw/char/riscv_htif.c | 9 +++++----
16
1 file changed, 5 insertions(+), 4 deletions(-)
17
18
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/char/riscv_htif.c
21
+++ b/hw/char/riscv_htif.c
22
@@ -XXX,XX +XXX,XX @@
23
#include "qemu/timer.h"
24
#include "qemu/error-report.h"
25
#include "exec/address-spaces.h"
26
+#include "exec/tswap.h"
27
#include "sysemu/dma.h"
28
29
#define RISCV_DEBUG_HTIF 0
30
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
31
} else {
32
uint64_t syscall[8];
33
cpu_physical_memory_read(payload, syscall, sizeof(syscall));
34
- if (syscall[0] == PK_SYS_WRITE &&
35
- syscall[1] == HTIF_DEV_CONSOLE &&
36
- syscall[3] == HTIF_CONSOLE_CMD_PUTC) {
37
+ if (tswap64(syscall[0]) == PK_SYS_WRITE &&
38
+ tswap64(syscall[1]) == HTIF_DEV_CONSOLE &&
39
+ tswap64(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) {
40
uint8_t ch;
41
- cpu_physical_memory_read(syscall[2], &ch, 1);
42
+ cpu_physical_memory_read(tswap64(syscall[2]), &ch, 1);
43
qemu_chr_fe_write(&s->chr, &ch, 1);
44
resp = 0x100 | (uint8_t)payload;
45
} else {
46
--
47
2.41.0
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
zmmul was promoted from experimental to ratified in commit 6d00ffad4e95.
4
Add a riscv,isa string for it.
5
6
Fixes: 6d00ffad4e95 ("target/riscv: move zmmul out of the experimental properties")
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-Id: <20230720132424.371132-2-dbarboza@ventanamicro.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/cpu.c | 1 +
14
1 file changed, 1 insertion(+)
15
16
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.c
19
+++ b/target/riscv/cpu.c
20
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
21
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
22
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
23
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
24
+ ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
25
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
26
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
27
ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
28
--
29
2.41.0
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
The cpu->cfg.epmp extension is still experimental, but it already has a
4
'smepmp' riscv,isa string. Add it.
5
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-Id: <20230720132424.371132-3-dbarboza@ventanamicro.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu.c | 1 +
13
1 file changed, 1 insertion(+)
14
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.c
18
+++ b/target/riscv/cpu.c
19
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
20
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
21
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
22
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
23
+ ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp),
24
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
25
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
26
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
27
--
28
2.41.0
diff view generated by jsdifflib
New patch
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
2
3
Commit bef6f008b98(accel/tcg: Return bool from page_check_range) converts
4
integer return value to bool type. However, it wrongly converted the use
5
of the API in riscv fault-only-first, where page_check_range < = 0, should
6
be converted to !page_check_range.
7
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-ID: <20230729031618.821-1-zhiwei_liu@linux.alibaba.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/vector_helper.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/vector_helper.c
19
+++ b/target/riscv/vector_helper.c
20
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
21
cpu_mmu_index(env, false));
22
if (host) {
23
#ifdef CONFIG_USER_ONLY
24
- if (page_check_range(addr, offset, PAGE_READ)) {
25
+ if (!page_check_range(addr, offset, PAGE_READ)) {
26
vl = i;
27
goto ProbeSuccess;
28
}
29
--
30
2.41.0
diff view generated by jsdifflib
New patch
1
From: Ard Biesheuvel <ardb@kernel.org>
1
2
3
The AES MixColumns and InvMixColumns operations are relatively
4
expensive 4x4 matrix multiplications in GF(2^8), which is why C
5
implementations usually rely on precomputed lookup tables rather than
6
performing the calculations on demand.
7
8
Given that we already carry those tables in QEMU, we can just grab the
9
right value in the implementation of the RISC-V AES32 instructions. Note
10
that the tables in question are permuted according to the respective
11
Sbox, so we can omit the Sbox lookup as well in this case.
12
13
Cc: Richard Henderson <richard.henderson@linaro.org>
14
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
15
Cc: Zewen Ye <lustrew@foxmail.com>
16
Cc: Weiwei Li <liweiwei@iscas.ac.cn>
17
Cc: Junqiang Wang <wangjunqiang@iscas.ac.cn>
18
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-ID: <20230731084043.1791984-1-ardb@kernel.org>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
---
23
include/crypto/aes.h | 7 +++++++
24
crypto/aes.c | 4 ++--
25
target/riscv/crypto_helper.c | 34 ++++------------------------------
26
3 files changed, 13 insertions(+), 32 deletions(-)
27
28
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/crypto/aes.h
31
+++ b/include/crypto/aes.h
32
@@ -XXX,XX +XXX,XX @@ void AES_decrypt(const unsigned char *in, unsigned char *out,
33
extern const uint8_t AES_sbox[256];
34
extern const uint8_t AES_isbox[256];
35
36
+/*
37
+AES_Te0[x] = S [x].[02, 01, 01, 03];
38
+AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
39
+*/
40
+
41
+extern const uint32_t AES_Te0[256], AES_Td0[256];
42
+
43
#endif
44
diff --git a/crypto/aes.c b/crypto/aes.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/crypto/aes.c
47
+++ b/crypto/aes.c
48
@@ -XXX,XX +XXX,XX @@ AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
49
AES_Td4[x] = Si[x].[01, 01, 01, 01];
50
*/
51
52
-static const uint32_t AES_Te0[256] = {
53
+const uint32_t AES_Te0[256] = {
54
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
55
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
56
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
57
@@ -XXX,XX +XXX,XX @@ static const uint32_t AES_Te4[256] = {
58
0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
59
};
60
61
-static const uint32_t AES_Td0[256] = {
62
+const uint32_t AES_Td0[256] = {
63
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
64
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
65
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
66
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/crypto_helper.c
69
+++ b/target/riscv/crypto_helper.c
70
@@ -XXX,XX +XXX,XX @@
71
#include "crypto/aes-round.h"
72
#include "crypto/sm4.h"
73
74
-#define AES_XTIME(a) \
75
- ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
76
-
77
-#define AES_GFMUL(a, b) (( \
78
- (((b) & 0x1) ? (a) : 0) ^ \
79
- (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
80
- (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
81
- (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
82
-
83
-static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd)
84
-{
85
- uint32_t u;
86
-
87
- if (fwd) {
88
- u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) |
89
- (AES_GFMUL(x, 2) << 0);
90
- } else {
91
- u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) |
92
- (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0);
93
- }
94
- return u;
95
-}
96
-
97
#define sext32_xlen(x) (target_ulong)(int32_t)(x)
98
99
static inline target_ulong aes32_operation(target_ulong shamt,
100
@@ -XXX,XX +XXX,XX @@ static inline target_ulong aes32_operation(target_ulong shamt,
101
bool enc, bool mix)
102
{
103
uint8_t si = rs2 >> shamt;
104
- uint8_t so;
105
uint32_t mixed;
106
target_ulong res;
107
108
if (enc) {
109
- so = AES_sbox[si];
110
if (mix) {
111
- mixed = aes_mixcolumn_byte(so, true);
112
+ mixed = be32_to_cpu(AES_Te0[si]);
113
} else {
114
- mixed = so;
115
+ mixed = AES_sbox[si];
116
}
117
} else {
118
- so = AES_isbox[si];
119
if (mix) {
120
- mixed = aes_mixcolumn_byte(so, false);
121
+ mixed = be32_to_cpu(AES_Td0[si]);
122
} else {
123
- mixed = so;
124
+ mixed = AES_isbox[si];
125
}
126
}
127
mixed = rol32(mixed, shamt);
128
--
129
2.41.0
130
131
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
mcycle/minstret are actually WARL registers and can be written with any
3
Take some functions/macros out of `vector_helper` and put them in a new
4
given value. With SBI PMU extension, it will be used to store a initial
4
module called `vector_internals`. This ensures they can be used by both
5
value provided from supervisor OS. The Qemu also need prohibit the counter
5
vector and vector-crypto helpers (latter implemented in proceeding
6
increment if mcountinhibit is set.
6
commits).
7
7
8
Support mcycle/minstret through generic counter infrastructure.
8
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
9
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Max Chou <max.chou@sifive.com>
11
Signed-off-by: Atish Patra <atish.patra@wdc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Atish Patra <atishp@rivosinc.com>
12
Message-ID: <20230711165917.2629866-2-max.chou@sifive.com>
13
Message-Id: <20220620231603.2547260-8-atishp@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
14
---
16
target/riscv/cpu.h | 23 ++++--
15
target/riscv/vector_internals.h | 182 +++++++++++++++++++++++++++++
17
target/riscv/pmu.h | 28 +++++++
16
target/riscv/vector_helper.c | 201 +-------------------------------
18
target/riscv/csr.c | 155 ++++++++++++++++++++++++++++-----------
17
target/riscv/vector_internals.c | 81 +++++++++++++
19
target/riscv/machine.c | 25 ++++++-
18
target/riscv/meson.build | 1 +
20
target/riscv/pmu.c | 32 ++++++++
19
4 files changed, 265 insertions(+), 200 deletions(-)
21
target/riscv/meson.build | 3 +-
20
create mode 100644 target/riscv/vector_internals.h
22
6 files changed, 213 insertions(+), 53 deletions(-)
21
create mode 100644 target/riscv/vector_internals.c
23
create mode 100644 target/riscv/pmu.h
24
create mode 100644 target/riscv/pmu.c
25
22
26
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
23
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/riscv/cpu.h
29
+++ b/target/riscv/cpu.h
30
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState CPURISCVState;
31
#endif
32
33
#define RV_VLEN_MAX 1024
34
-#define RV_MAX_MHPMEVENTS 29
35
+#define RV_MAX_MHPMEVENTS 32
36
#define RV_MAX_MHPMCOUNTERS 32
37
38
FIELD(VTYPE, VLMUL, 0, 3)
39
@@ -XXX,XX +XXX,XX @@ FIELD(VTYPE, VMA, 7, 1)
40
FIELD(VTYPE, VEDIV, 8, 2)
41
FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
42
43
+typedef struct PMUCTRState {
44
+ /* Current value of a counter */
45
+ target_ulong mhpmcounter_val;
46
+ /* Current value of a counter in RV32*/
47
+ target_ulong mhpmcounterh_val;
48
+ /* Snapshot values of counter */
49
+ target_ulong mhpmcounter_prev;
50
+ /* Snapshort value of a counter in RV32 */
51
+ target_ulong mhpmcounterh_prev;
52
+ bool started;
53
+} PMUCTRState;
54
+
55
struct CPUArchState {
56
target_ulong gpr[32];
57
target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
58
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
59
60
target_ulong mcountinhibit;
61
62
- /* PMU counter configured values */
63
- target_ulong mhpmcounter_val[RV_MAX_MHPMCOUNTERS];
64
-
65
- /* for RV32 */
66
- target_ulong mhpmcounterh_val[RV_MAX_MHPMCOUNTERS];
67
+ /* PMU counter state */
68
+ PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
69
70
- /* PMU event selector configured values */
71
+ /* PMU event selector configured values. First three are unused*/
72
target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
73
74
target_ulong sscratch;
75
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
76
new file mode 100644
24
new file mode 100644
77
index XXXXXXX..XXXXXXX
25
index XXXXXXX..XXXXXXX
78
--- /dev/null
26
--- /dev/null
79
+++ b/target/riscv/pmu.h
27
+++ b/target/riscv/vector_internals.h
80
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@
81
+/*
29
+/*
82
+ * RISC-V PMU header file.
30
+ * RISC-V Vector Extension Internals
83
+ *
31
+ *
84
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
32
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
85
+ *
33
+ *
86
+ * This program is free software; you can redistribute it and/or modify it
34
+ * This program is free software; you can redistribute it and/or modify it
87
+ * under the terms and conditions of the GNU General Public License,
35
+ * under the terms and conditions of the GNU General Public License,
88
+ * version 2 or later, as published by the Free Software Foundation.
36
+ * version 2 or later, as published by the Free Software Foundation.
89
+ *
37
+ *
...
...
94
+ *
42
+ *
95
+ * You should have received a copy of the GNU General Public License along with
43
+ * You should have received a copy of the GNU General Public License along with
96
+ * this program. If not, see <http://www.gnu.org/licenses/>.
44
+ * this program. If not, see <http://www.gnu.org/licenses/>.
97
+ */
45
+ */
98
+
46
+
47
+#ifndef TARGET_RISCV_VECTOR_INTERNALS_H
48
+#define TARGET_RISCV_VECTOR_INTERNALS_H
49
+
99
+#include "qemu/osdep.h"
50
+#include "qemu/osdep.h"
100
+#include "qemu/log.h"
51
+#include "qemu/bitops.h"
101
+#include "cpu.h"
52
+#include "cpu.h"
102
+#include "qemu/main-loop.h"
53
+#include "tcg/tcg-gvec-desc.h"
103
+#include "exec/exec-all.h"
54
+#include "internals.h"
104
+
55
+
105
+bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
56
+static inline uint32_t vext_nf(uint32_t desc)
106
+ uint32_t target_ctr);
57
+{
107
+bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env,
58
+ return FIELD_EX32(simd_data(desc), VDATA, NF);
108
+ uint32_t target_ctr);
59
+}
109
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
60
+
61
+/*
62
+ * Note that vector data is stored in host-endian 64-bit chunks,
63
+ * so addressing units smaller than that needs a host-endian fixup.
64
+ */
65
+#if HOST_BIG_ENDIAN
66
+#define H1(x) ((x) ^ 7)
67
+#define H1_2(x) ((x) ^ 6)
68
+#define H1_4(x) ((x) ^ 4)
69
+#define H2(x) ((x) ^ 3)
70
+#define H4(x) ((x) ^ 1)
71
+#define H8(x) ((x))
72
+#else
73
+#define H1(x) (x)
74
+#define H1_2(x) (x)
75
+#define H1_4(x) (x)
76
+#define H2(x) (x)
77
+#define H4(x) (x)
78
+#define H8(x) (x)
79
+#endif
80
+
81
+/*
82
+ * Encode LMUL to lmul as following:
83
+ * LMUL vlmul lmul
84
+ * 1 000 0
85
+ * 2 001 1
86
+ * 4 010 2
87
+ * 8 011 3
88
+ * - 100 -
89
+ * 1/8 101 -3
90
+ * 1/4 110 -2
91
+ * 1/2 111 -1
92
+ */
93
+static inline int32_t vext_lmul(uint32_t desc)
94
+{
95
+ return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
96
+}
97
+
98
+static inline uint32_t vext_vm(uint32_t desc)
99
+{
100
+ return FIELD_EX32(simd_data(desc), VDATA, VM);
101
+}
102
+
103
+static inline uint32_t vext_vma(uint32_t desc)
104
+{
105
+ return FIELD_EX32(simd_data(desc), VDATA, VMA);
106
+}
107
+
108
+static inline uint32_t vext_vta(uint32_t desc)
109
+{
110
+ return FIELD_EX32(simd_data(desc), VDATA, VTA);
111
+}
112
+
113
+static inline uint32_t vext_vta_all_1s(uint32_t desc)
114
+{
115
+ return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
116
+}
117
+
118
+/*
119
+ * Earlier designs (pre-0.9) had a varying number of bits
120
+ * per mask value (MLEN). In the 0.9 design, MLEN=1.
121
+ * (Section 4.5)
122
+ */
123
+static inline int vext_elem_mask(void *v0, int index)
124
+{
125
+ int idx = index / 64;
126
+ int pos = index % 64;
127
+ return (((uint64_t *)v0)[idx] >> pos) & 1;
128
+}
129
+
130
+/*
131
+ * Get number of total elements, including prestart, body and tail elements.
132
+ * Note that when LMUL < 1, the tail includes the elements past VLMAX that
133
+ * are held in the same vector register.
134
+ */
135
+static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
136
+ uint32_t esz)
137
+{
138
+ uint32_t vlenb = simd_maxsz(desc);
139
+ uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
140
+ int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
141
+ ctzl(esz) - ctzl(sew) + vext_lmul(desc);
142
+ return (vlenb << emul) / esz;
143
+}
144
+
145
+/* set agnostic elements to 1s */
146
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
147
+ uint32_t tot);
148
+
149
+/* expand macro args before macro */
150
+#define RVVCALL(macro, ...) macro(__VA_ARGS__)
151
+
152
+/* (TD, T1, T2, TX1, TX2) */
153
+#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
154
+#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
155
+#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
156
+#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
157
+
158
+/* operation of two vector elements */
159
+typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
160
+
161
+#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
162
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
163
+{ \
164
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
165
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
166
+ *((TD *)vd + HD(i)) = OP(s2, s1); \
167
+}
168
+
169
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
170
+ CPURISCVState *env, uint32_t desc,
171
+ opivv2_fn *fn, uint32_t esz);
172
+
173
+/* generate the helpers for OPIVV */
174
+#define GEN_VEXT_VV(NAME, ESZ) \
175
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
176
+ void *vs2, CPURISCVState *env, \
177
+ uint32_t desc) \
178
+{ \
179
+ do_vext_vv(vd, v0, vs1, vs2, env, desc, \
180
+ do_##NAME, ESZ); \
181
+}
182
+
183
+typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
184
+
185
+/*
186
+ * (T1)s1 gives the real operator type.
187
+ * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
188
+ */
189
+#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
190
+static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
191
+{ \
192
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
193
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
194
+}
195
+
196
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
197
+ CPURISCVState *env, uint32_t desc,
198
+ opivx2_fn fn, uint32_t esz);
199
+
200
+/* generate the helpers for OPIVX */
201
+#define GEN_VEXT_VX(NAME, ESZ) \
202
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
203
+ void *vs2, CPURISCVState *env, \
204
+ uint32_t desc) \
205
+{ \
206
+ do_vext_vx(vd, v0, s1, vs2, env, desc, \
207
+ do_##NAME, ESZ); \
208
+}
209
+
210
+#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
211
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
110
index XXXXXXX..XXXXXXX 100644
212
index XXXXXXX..XXXXXXX 100644
111
--- a/target/riscv/csr.c
213
--- a/target/riscv/vector_helper.c
112
+++ b/target/riscv/csr.c
214
+++ b/target/riscv/vector_helper.c
113
@@ -XXX,XX +XXX,XX @@
215
@@ -XXX,XX +XXX,XX @@
114
#include "qemu/log.h"
216
#include "fpu/softfloat.h"
115
#include "qemu/timer.h"
217
#include "tcg/tcg-gvec-desc.h"
116
#include "cpu.h"
218
#include "internals.h"
117
+#include "pmu.h"
219
+#include "vector_internals.h"
118
#include "qemu/main-loop.h"
220
#include <math.h>
119
#include "exec/exec-all.h"
221
120
#include "sysemu/cpu-timers.h"
222
target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
121
@@ -XXX,XX +XXX,XX @@ static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
223
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
224
return vl;
122
}
225
}
123
226
124
/* User Timers and Counters */
227
-/*
125
-static RISCVException read_instret(CPURISCVState *env, int csrno,
228
- * Note that vector data is stored in host-endian 64-bit chunks,
126
- target_ulong *val)
229
- * so addressing units smaller than that needs a host-endian fixup.
127
+static target_ulong get_ticks(bool shift)
230
- */
231
-#if HOST_BIG_ENDIAN
232
-#define H1(x) ((x) ^ 7)
233
-#define H1_2(x) ((x) ^ 6)
234
-#define H1_4(x) ((x) ^ 4)
235
-#define H2(x) ((x) ^ 3)
236
-#define H4(x) ((x) ^ 1)
237
-#define H8(x) ((x))
238
-#else
239
-#define H1(x) (x)
240
-#define H1_2(x) (x)
241
-#define H1_4(x) (x)
242
-#define H2(x) (x)
243
-#define H4(x) (x)
244
-#define H8(x) (x)
245
-#endif
246
-
247
-static inline uint32_t vext_nf(uint32_t desc)
248
-{
249
- return FIELD_EX32(simd_data(desc), VDATA, NF);
250
-}
251
-
252
-static inline uint32_t vext_vm(uint32_t desc)
253
-{
254
- return FIELD_EX32(simd_data(desc), VDATA, VM);
255
-}
256
-
257
-/*
258
- * Encode LMUL to lmul as following:
259
- * LMUL vlmul lmul
260
- * 1 000 0
261
- * 2 001 1
262
- * 4 010 2
263
- * 8 011 3
264
- * - 100 -
265
- * 1/8 101 -3
266
- * 1/4 110 -2
267
- * 1/2 111 -1
268
- */
269
-static inline int32_t vext_lmul(uint32_t desc)
270
-{
271
- return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
272
-}
273
-
274
-static inline uint32_t vext_vta(uint32_t desc)
275
-{
276
- return FIELD_EX32(simd_data(desc), VDATA, VTA);
277
-}
278
-
279
-static inline uint32_t vext_vma(uint32_t desc)
280
-{
281
- return FIELD_EX32(simd_data(desc), VDATA, VMA);
282
-}
283
-
284
-static inline uint32_t vext_vta_all_1s(uint32_t desc)
285
-{
286
- return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
287
-}
288
-
289
/*
290
* Get the maximum number of elements can be operated.
291
*
292
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
293
return scale < 0 ? vlenb >> -scale : vlenb << scale;
294
}
295
296
-/*
297
- * Get number of total elements, including prestart, body and tail elements.
298
- * Note that when LMUL < 1, the tail includes the elements past VLMAX that
299
- * are held in the same vector register.
300
- */
301
-static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
302
- uint32_t esz)
303
-{
304
- uint32_t vlenb = simd_maxsz(desc);
305
- uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
306
- int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
307
- ctzl(esz) - ctzl(sew) + vext_lmul(desc);
308
- return (vlenb << emul) / esz;
309
-}
310
-
311
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
128
{
312
{
129
+ int64_t val;
313
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
130
+ target_ulong result;
314
@@ -XXX,XX +XXX,XX @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
131
+
132
#if !defined(CONFIG_USER_ONLY)
133
if (icount_enabled()) {
134
- *val = icount_get();
135
+ val = icount_get();
136
} else {
137
- *val = cpu_get_host_ticks();
138
+ val = cpu_get_host_ticks();
139
}
315
}
140
#else
141
- *val = cpu_get_host_ticks();
142
+ val = cpu_get_host_ticks();
143
#endif
144
- return RISCV_EXCP_NONE;
145
-}
146
147
-static RISCVException read_instreth(CPURISCVState *env, int csrno,
148
- target_ulong *val)
149
-{
150
-#if !defined(CONFIG_USER_ONLY)
151
- if (icount_enabled()) {
152
- *val = icount_get() >> 32;
153
+ if (shift) {
154
+ result = val >> 32;
155
} else {
156
- *val = cpu_get_host_ticks() >> 32;
157
+ result = val;
158
}
159
-#else
160
- *val = cpu_get_host_ticks() >> 32;
161
-#endif
162
- return RISCV_EXCP_NONE;
163
+
164
+ return result;
165
}
316
}
166
317
167
#if defined(CONFIG_USER_ONLY)
318
-/* set agnostic elements to 1s */
168
@@ -XXX,XX +XXX,XX @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
319
-static void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
169
return RISCV_EXCP_NONE;
320
- uint32_t tot)
321
-{
322
- if (is_agnostic == 0) {
323
- /* policy undisturbed */
324
- return;
325
- }
326
- if (tot - cnt == 0) {
327
- return;
328
- }
329
- memset(base + cnt, -1, tot - cnt);
330
-}
331
-
332
static inline void vext_set_elem_mask(void *v0, int index,
333
uint8_t value)
334
{
335
@@ -XXX,XX +XXX,XX @@ static inline void vext_set_elem_mask(void *v0, int index,
336
((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
170
}
337
}
171
338
172
+static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
339
-/*
173
+{
340
- * Earlier designs (pre-0.9) had a varying number of bits
174
+ *val = get_ticks(false);
341
- * per mask value (MLEN). In the 0.9 design, MLEN=1.
175
+ return RISCV_EXCP_NONE;
342
- * (Section 4.5)
176
+}
343
- */
177
+
344
-static inline int vext_elem_mask(void *v0, int index)
178
+static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
345
-{
179
+{
346
- int idx = index / 64;
180
+ *val = get_ticks(true);
347
- int pos = index % 64;
181
+ return RISCV_EXCP_NONE;
348
- return (((uint64_t *)v0)[idx] >> pos) & 1;
182
+}
349
-}
183
+
350
-
184
#else /* CONFIG_USER_ONLY */
351
/* elements operations for load and store */
185
352
typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
186
static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
353
uint32_t idx, void *vd, uintptr_t retaddr);
187
{
354
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
188
- int evt_index = csrno - CSR_MHPMEVENT3;
355
* Vector Integer Arithmetic Instructions
189
+ int evt_index = csrno - CSR_MCOUNTINHIBIT;
356
*/
190
357
191
*val = env->mhpmevent_val[evt_index];
358
-/* expand macro args before macro */
192
359
-#define RVVCALL(macro, ...) macro(__VA_ARGS__)
193
@@ -XXX,XX +XXX,XX @@ static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
360
-
194
361
/* (TD, T1, T2, TX1, TX2) */
195
static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
362
#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
196
{
363
#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
197
- int evt_index = csrno - CSR_MHPMEVENT3;
364
#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
198
+ int evt_index = csrno - CSR_MCOUNTINHIBIT;
365
#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
199
366
-#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
200
env->mhpmevent_val[evt_index] = val;
367
-#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
201
368
-#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
202
@@ -XXX,XX +XXX,XX @@ static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
369
-#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
203
370
#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
204
static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
371
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
205
{
372
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
206
- int ctr_index = csrno - CSR_MHPMCOUNTER3 + 3;
373
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
207
+ int ctr_idx = csrno - CSR_MCYCLE;
374
#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
208
+ PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
375
#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
209
376
210
- env->mhpmcounter_val[ctr_index] = val;
377
-/* operation of two vector elements */
211
+ counter->mhpmcounter_val = val;
378
-typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
212
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
379
-
213
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
380
-#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
214
+ counter->mhpmcounter_prev = get_ticks(false);
381
-static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
215
+ } else {
382
-{ \
216
+ /* Other counters can keep incrementing from the given value */
383
- TX1 s1 = *((T1 *)vs1 + HS1(i)); \
217
+ counter->mhpmcounter_prev = val;
384
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
218
+ }
385
- *((TD *)vd + HD(i)) = OP(s2, s1); \
219
386
-}
220
return RISCV_EXCP_NONE;
387
#define DO_SUB(N, M) (N - M)
221
}
388
#define DO_RSUB(N, M) (M - N)
222
389
223
static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
390
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB)
224
{
391
RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB)
225
- int ctr_index = csrno - CSR_MHPMCOUNTER3H + 3;
392
RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
226
+ int ctr_idx = csrno - CSR_MCYCLEH;
393
227
+ PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
394
-static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
228
395
- CPURISCVState *env, uint32_t desc,
229
- env->mhpmcounterh_val[ctr_index] = val;
396
- opivv2_fn *fn, uint32_t esz)
230
+ counter->mhpmcounterh_val = val;
397
-{
231
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
398
- uint32_t vm = vext_vm(desc);
232
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
399
- uint32_t vl = env->vl;
233
+ counter->mhpmcounterh_prev = get_ticks(true);
400
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
234
+ } else {
401
- uint32_t vta = vext_vta(desc);
235
+ counter->mhpmcounterh_prev = val;
402
- uint32_t vma = vext_vma(desc);
236
+ }
403
- uint32_t i;
237
+
404
-
238
+ return RISCV_EXCP_NONE;
405
- for (i = env->vstart; i < vl; i++) {
239
+}
406
- if (!vm && !vext_elem_mask(v0, i)) {
240
+
407
- /* set masked-off elements to 1s */
241
+static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
408
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
242
+ bool upper_half, uint32_t ctr_idx)
409
- continue;
243
+{
410
- }
244
+ PMUCTRState counter = env->pmu_ctrs[ctr_idx];
411
- fn(vd, vs1, vs2, i);
245
+ target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev :
412
- }
246
+ counter.mhpmcounter_prev;
413
- env->vstart = 0;
247
+ target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val :
414
- /* set tail elements to 1s */
248
+ counter.mhpmcounter_val;
415
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
249
+
416
-}
250
+ if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
417
-
251
+ /**
418
-/* generate the helpers for OPIVV */
252
+ * Counter should not increment if inhibit bit is set. We can't really
419
-#define GEN_VEXT_VV(NAME, ESZ) \
253
+ * stop the icount counting. Just return the counter value written by
420
-void HELPER(NAME)(void *vd, void *v0, void *vs1, \
254
+ * the supervisor to indicate that counter was not incremented.
421
- void *vs2, CPURISCVState *env, \
255
+ */
422
- uint32_t desc) \
256
+ if (!counter.started) {
423
-{ \
257
+ *val = ctr_val;
424
- do_vext_vv(vd, v0, vs1, vs2, env, desc, \
258
+ return RISCV_EXCP_NONE;
425
- do_##NAME, ESZ); \
259
+ } else {
426
-}
260
+ /* Mark that the counter has been stopped */
427
-
261
+ counter.started = false;
428
GEN_VEXT_VV(vadd_vv_b, 1)
262
+ }
429
GEN_VEXT_VV(vadd_vv_h, 2)
263
+ }
430
GEN_VEXT_VV(vadd_vv_w, 4)
264
+
431
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VV(vsub_vv_h, 2)
265
+ /**
432
GEN_VEXT_VV(vsub_vv_w, 4)
266
+ * The kernel computes the perf delta by subtracting the current value from
433
GEN_VEXT_VV(vsub_vv_d, 8)
267
+ * the value it initialized previously (ctr_val).
434
268
+ */
435
-typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
269
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
436
-
270
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
437
-/*
271
+ *val = get_ticks(upper_half) - ctr_prev + ctr_val;
438
- * (T1)s1 gives the real operator type.
272
+ } else {
439
- * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
273
+ *val = ctr_val;
440
- */
274
+ }
441
-#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
275
442
-static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
276
return RISCV_EXCP_NONE;
443
-{ \
277
}
444
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
278
445
- *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
279
static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
446
-}
280
{
447
281
- int ctr_index;
448
RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD)
282
+ uint16_t ctr_index;
449
RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD)
283
450
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB)
284
if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
451
RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB)
285
- ctr_index = csrno - CSR_MHPMCOUNTER3 + 3;
452
RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
286
+ ctr_index = csrno - CSR_MCYCLE;
453
287
} else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
454
-static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
288
- ctr_index = csrno - CSR_HPMCOUNTER3 + 3;
455
- CPURISCVState *env, uint32_t desc,
289
+ ctr_index = csrno - CSR_CYCLE;
456
- opivx2_fn fn, uint32_t esz)
290
} else {
457
-{
291
return RISCV_EXCP_ILLEGAL_INST;
458
- uint32_t vm = vext_vm(desc);
292
}
459
- uint32_t vl = env->vl;
293
- *val = env->mhpmcounter_val[ctr_index];
460
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
294
461
- uint32_t vta = vext_vta(desc);
295
- return RISCV_EXCP_NONE;
462
- uint32_t vma = vext_vma(desc);
296
+ return riscv_pmu_read_ctr(env, val, false, ctr_index);
463
- uint32_t i;
297
}
464
-
298
465
- for (i = env->vstart; i < vl; i++) {
299
static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
466
- if (!vm && !vext_elem_mask(v0, i)) {
300
{
467
- /* set masked-off elements to 1s */
301
- int ctr_index;
468
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
302
+ uint16_t ctr_index;
469
- continue;
303
470
- }
304
if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
471
- fn(vd, s1, vs2, i);
305
- ctr_index = csrno - CSR_MHPMCOUNTER3H + 3;
472
- }
306
+ ctr_index = csrno - CSR_MCYCLEH;
473
- env->vstart = 0;
307
} else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
474
- /* set tail elements to 1s */
308
- ctr_index = csrno - CSR_HPMCOUNTER3H + 3;
475
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
309
+ ctr_index = csrno - CSR_CYCLEH;
476
-}
310
} else {
477
-
311
return RISCV_EXCP_ILLEGAL_INST;
478
-/* generate the helpers for OPIVX */
312
}
479
-#define GEN_VEXT_VX(NAME, ESZ) \
313
- *val = env->mhpmcounterh_val[ctr_index];
480
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
314
481
- void *vs2, CPURISCVState *env, \
315
- return RISCV_EXCP_NONE;
482
- uint32_t desc) \
316
+ return riscv_pmu_read_ctr(env, val, true, ctr_index);
483
-{ \
317
}
484
- do_vext_vx(vd, v0, s1, vs2, env, desc, \
318
485
- do_##NAME, ESZ); \
319
-
486
-}
320
static RISCVException read_time(CPURISCVState *env, int csrno,
487
-
321
target_ulong *val)
488
GEN_VEXT_VX(vadd_vx_b, 1)
322
{
489
GEN_VEXT_VX(vadd_vx_h, 2)
323
@@ -XXX,XX +XXX,XX @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
490
GEN_VEXT_VX(vadd_vx_w, 4)
324
static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
491
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
325
target_ulong val)
326
{
327
+ int cidx;
328
+ PMUCTRState *counter;
329
+
330
if (env->priv_ver < PRIV_VERSION_1_11_0) {
331
return RISCV_EXCP_ILLEGAL_INST;
332
}
333
334
env->mcountinhibit = val;
335
+
336
+ /* Check if any other counter is also monitoring cycles/instructions */
337
+ for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
338
+ if (!get_field(env->mcountinhibit, BIT(cidx))) {
339
+ counter = &env->pmu_ctrs[cidx];
340
+ counter->started = true;
341
+ }
342
+ }
343
+
344
return RISCV_EXCP_NONE;
345
}
346
347
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
348
[CSR_VLENB] = { "vlenb", vs, read_vlenb,
349
.min_priv_ver = PRIV_VERSION_1_12_0 },
350
/* User Timers and Counters */
351
- [CSR_CYCLE] = { "cycle", ctr, read_instret },
352
- [CSR_INSTRET] = { "instret", ctr, read_instret },
353
- [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth },
354
- [CSR_INSTRETH] = { "instreth", ctr32, read_instreth },
355
+ [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
356
+ [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
357
+ [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
358
+ [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
359
360
/*
361
* In privileged mode, the monitor will have to emulate TIME CSRs only if
362
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
363
364
#if !defined(CONFIG_USER_ONLY)
365
/* Machine Timers and Counters */
366
- [CSR_MCYCLE] = { "mcycle", any, read_instret },
367
- [CSR_MINSTRET] = { "minstret", any, read_instret },
368
- [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth },
369
- [CSR_MINSTRETH] = { "minstreth", any32, read_instreth },
370
+ [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter, write_mhpmcounter},
371
+ [CSR_MINSTRET] = { "minstret", any, read_hpmcounter, write_mhpmcounter},
372
+ [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh, write_mhpmcounterh},
373
+ [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh, write_mhpmcounterh},
374
375
/* Machine Information Registers */
376
[CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
377
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
378
index XXXXXXX..XXXXXXX 100644
379
--- a/target/riscv/machine.c
380
+++ b/target/riscv/machine.c
381
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_envcfg = {
382
VMSTATE_UINT64(env.menvcfg, RISCVCPU),
383
VMSTATE_UINTTL(env.senvcfg, RISCVCPU),
384
VMSTATE_UINT64(env.henvcfg, RISCVCPU),
385
+ VMSTATE_END_OF_LIST()
386
+ }
387
+};
388
+
389
+static bool pmu_needed(void *opaque)
390
+{
391
+ RISCVCPU *cpu = opaque;
392
393
+ return cpu->cfg.pmu_num;
394
+}
395
+
396
+static const VMStateDescription vmstate_pmu_ctr_state = {
397
+ .name = "cpu/pmu",
398
+ .version_id = 1,
399
+ .minimum_version_id = 1,
400
+ .needed = pmu_needed,
401
+ .fields = (VMStateField[]) {
402
+ VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
403
+ VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
404
+ VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
405
+ VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
406
+ VMSTATE_BOOL(started, PMUCTRState),
407
VMSTATE_END_OF_LIST()
408
}
409
};
410
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
411
VMSTATE_UINTTL(env.scounteren, RISCVCPU),
412
VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
413
VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
414
- VMSTATE_UINTTL_ARRAY(env.mhpmcounter_val, RISCVCPU, RV_MAX_MHPMCOUNTERS),
415
- VMSTATE_UINTTL_ARRAY(env.mhpmcounterh_val, RISCVCPU, RV_MAX_MHPMCOUNTERS),
416
+ VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
417
+ vmstate_pmu_ctr_state, PMUCTRState),
418
VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
419
VMSTATE_UINTTL(env.sscratch, RISCVCPU),
420
VMSTATE_UINTTL(env.mscratch, RISCVCPU),
421
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
422
new file mode 100644
492
new file mode 100644
423
index XXXXXXX..XXXXXXX
493
index XXXXXXX..XXXXXXX
424
--- /dev/null
494
--- /dev/null
425
+++ b/target/riscv/pmu.c
495
+++ b/target/riscv/vector_internals.c
426
@@ -XXX,XX +XXX,XX @@
496
@@ -XXX,XX +XXX,XX @@
427
+/*
497
+/*
428
+ * RISC-V PMU file.
498
+ * RISC-V Vector Extension Internals
429
+ *
499
+ *
430
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
500
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
431
+ *
501
+ *
432
+ * This program is free software; you can redistribute it and/or modify it
502
+ * This program is free software; you can redistribute it and/or modify it
433
+ * under the terms and conditions of the GNU General Public License,
503
+ * under the terms and conditions of the GNU General Public License,
434
+ * version 2 or later, as published by the Free Software Foundation.
504
+ * version 2 or later, as published by the Free Software Foundation.
435
+ *
505
+ *
...
...
440
+ *
510
+ *
441
+ * You should have received a copy of the GNU General Public License along with
511
+ * You should have received a copy of the GNU General Public License along with
442
+ * this program. If not, see <http://www.gnu.org/licenses/>.
512
+ * this program. If not, see <http://www.gnu.org/licenses/>.
443
+ */
513
+ */
444
+
514
+
445
+#include "qemu/osdep.h"
515
+#include "vector_internals.h"
446
+#include "cpu.h"
516
+
447
+#include "pmu.h"
517
+/* set agnostic elements to 1s */
448
+
518
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
449
+bool riscv_pmu_ctr_monitor_instructions(CPURISCVState *env,
519
+ uint32_t tot)
450
+ uint32_t target_ctr)
520
+{
451
+{
521
+ if (is_agnostic == 0) {
452
+ return (target_ctr == 0) ? true : false;
522
+ /* policy undisturbed */
453
+}
523
+ return;
454
+
524
+ }
455
+bool riscv_pmu_ctr_monitor_cycles(CPURISCVState *env, uint32_t target_ctr)
525
+ if (tot - cnt == 0) {
456
+{
526
+ return ;
457
+ return (target_ctr == 2) ? true : false;
527
+ }
528
+ memset(base + cnt, -1, tot - cnt);
529
+}
530
+
531
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
532
+ CPURISCVState *env, uint32_t desc,
533
+ opivv2_fn *fn, uint32_t esz)
534
+{
535
+ uint32_t vm = vext_vm(desc);
536
+ uint32_t vl = env->vl;
537
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
538
+ uint32_t vta = vext_vta(desc);
539
+ uint32_t vma = vext_vma(desc);
540
+ uint32_t i;
541
+
542
+ for (i = env->vstart; i < vl; i++) {
543
+ if (!vm && !vext_elem_mask(v0, i)) {
544
+ /* set masked-off elements to 1s */
545
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
546
+ continue;
547
+ }
548
+ fn(vd, vs1, vs2, i);
549
+ }
550
+ env->vstart = 0;
551
+ /* set tail elements to 1s */
552
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
553
+}
554
+
555
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
556
+ CPURISCVState *env, uint32_t desc,
557
+ opivx2_fn fn, uint32_t esz)
558
+{
559
+ uint32_t vm = vext_vm(desc);
560
+ uint32_t vl = env->vl;
561
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
562
+ uint32_t vta = vext_vta(desc);
563
+ uint32_t vma = vext_vma(desc);
564
+ uint32_t i;
565
+
566
+ for (i = env->vstart; i < vl; i++) {
567
+ if (!vm && !vext_elem_mask(v0, i)) {
568
+ /* set masked-off elements to 1s */
569
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
570
+ continue;
571
+ }
572
+ fn(vd, s1, vs2, i);
573
+ }
574
+ env->vstart = 0;
575
+ /* set tail elements to 1s */
576
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
458
+}
577
+}
459
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
578
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
460
index XXXXXXX..XXXXXXX 100644
579
index XXXXXXX..XXXXXXX 100644
461
--- a/target/riscv/meson.build
580
--- a/target/riscv/meson.build
462
+++ b/target/riscv/meson.build
581
+++ b/target/riscv/meson.build
463
@@ -XXX,XX +XXX,XX @@ riscv_softmmu_ss.add(files(
582
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
464
'pmp.c',
583
'gdbstub.c',
465
'debug.c',
584
'op_helper.c',
466
'monitor.c',
585
'vector_helper.c',
467
- 'machine.c'
586
+ 'vector_internals.c',
468
+ 'machine.c',
587
'bitmanip_helper.c',
469
+ 'pmu.c'
588
'translate.c',
470
))
589
'm128_helper.c',
471
472
target_arch += {'riscv': riscv_ss}
473
--
590
--
474
2.36.1
591
2.41.0
diff view generated by jsdifflib
New patch
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
1
2
3
Refactor the non SEW-specific stuff out of `GEN_OPIVV_TRANS` into
4
function `opivv_trans` (similar to `opivi_trans`). `opivv_trans` will be
5
used in proceeding vector-crypto commits.
6
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Message-ID: <20230711165917.2629866-3-max.chou@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/insn_trans/trans_rvv.c.inc | 62 +++++++++++++------------
16
1 file changed, 32 insertions(+), 30 deletions(-)
17
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
22
@@ -XXX,XX +XXX,XX @@ GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
23
GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
24
GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
25
26
+static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
27
+ gen_helper_gvec_4_ptr *fn, DisasContext *s)
28
+{
29
+ uint32_t data = 0;
30
+ TCGLabel *over = gen_new_label();
31
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
33
+
34
+ data = FIELD_DP32(data, VDATA, VM, vm);
35
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
36
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
37
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
38
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
39
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
40
+ vreg_ofs(s, vs2), cpu_env, s->cfg_ptr->vlen / 8,
41
+ s->cfg_ptr->vlen / 8, data, fn);
42
+ mark_vs_dirty(s);
43
+ gen_set_label(over);
44
+ return true;
45
+}
46
+
47
/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
48
/* OPIVV without GVEC IR */
49
-#define GEN_OPIVV_TRANS(NAME, CHECK) \
50
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
51
-{ \
52
- if (CHECK(s, a)) { \
53
- uint32_t data = 0; \
54
- static gen_helper_gvec_4_ptr * const fns[4] = { \
55
- gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
56
- gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
57
- }; \
58
- TCGLabel *over = gen_new_label(); \
59
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
60
- tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
61
- \
62
- data = FIELD_DP32(data, VDATA, VM, a->vm); \
63
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
64
- data = FIELD_DP32(data, VDATA, VTA, s->vta); \
65
- data = \
66
- FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
67
- data = FIELD_DP32(data, VDATA, VMA, s->vma); \
68
- tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
69
- vreg_ofs(s, a->rs1), \
70
- vreg_ofs(s, a->rs2), cpu_env, \
71
- s->cfg_ptr->vlen / 8, \
72
- s->cfg_ptr->vlen / 8, data, \
73
- fns[s->sew]); \
74
- mark_vs_dirty(s); \
75
- gen_set_label(over); \
76
- return true; \
77
- } \
78
- return false; \
79
+#define GEN_OPIVV_TRANS(NAME, CHECK) \
80
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
81
+{ \
82
+ if (CHECK(s, a)) { \
83
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
84
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
85
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
86
+ }; \
87
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
88
+ } \
89
+ return false; \
90
}
91
92
/*
93
--
94
2.41.0
diff view generated by jsdifflib
New patch
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
1
2
3
Remove the redundant "vl == 0" check which is already included within the vstart >= vl check, when vl == 0.
4
5
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
6
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20230711165917.2629866-4-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/insn_trans/trans_rvv.c.inc | 31 +------------------------
13
1 file changed, 1 insertion(+), 30 deletions(-)
14
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
20
TCGv_i32 desc;
21
22
TCGLabel *over = gen_new_label();
23
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
24
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
25
26
dest = tcg_temp_new_ptr();
27
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
28
TCGv_i32 desc;
29
30
TCGLabel *over = gen_new_label();
31
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
33
34
dest = tcg_temp_new_ptr();
35
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
36
TCGv_i32 desc;
37
38
TCGLabel *over = gen_new_label();
39
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
40
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
41
42
dest = tcg_temp_new_ptr();
43
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
44
TCGv_i32 desc;
45
46
TCGLabel *over = gen_new_label();
47
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
48
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
49
50
dest = tcg_temp_new_ptr();
51
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
52
return false;
53
}
54
55
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
56
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
57
58
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
59
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
60
uint32_t data = 0;
61
62
TCGLabel *over = gen_new_label();
63
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
64
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
65
66
dest = tcg_temp_new_ptr();
67
@@ -XXX,XX +XXX,XX @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
68
uint32_t data = 0;
69
70
TCGLabel *over = gen_new_label();
71
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
72
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
73
74
dest = tcg_temp_new_ptr();
75
@@ -XXX,XX +XXX,XX @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
76
if (checkfn(s, a)) {
77
uint32_t data = 0;
78
TCGLabel *over = gen_new_label();
79
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
80
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
81
82
data = FIELD_DP32(data, VDATA, VM, a->vm);
83
@@ -XXX,XX +XXX,XX @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
84
if (opiwv_widen_check(s, a)) {
85
uint32_t data = 0;
86
TCGLabel *over = gen_new_label();
87
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
88
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
89
90
data = FIELD_DP32(data, VDATA, VM, a->vm);
91
@@ -XXX,XX +XXX,XX @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
92
{
93
uint32_t data = 0;
94
TCGLabel *over = gen_new_label();
95
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
96
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
97
98
data = FIELD_DP32(data, VDATA, VM, vm);
99
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
100
gen_helper_##NAME##_w, \
101
}; \
102
TCGLabel *over = gen_new_label(); \
103
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
104
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
105
\
106
data = FIELD_DP32(data, VDATA, VM, a->vm); \
107
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
108
gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
109
};
110
TCGLabel *over = gen_new_label();
111
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
112
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
113
114
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
115
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
116
vext_check_ss(s, a->rd, 0, 1)) {
117
TCGv s1;
118
TCGLabel *over = gen_new_label();
119
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
120
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
121
122
s1 = get_gpr(s, a->rs1, EXT_SIGN);
123
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
124
gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
125
};
126
TCGLabel *over = gen_new_label();
127
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
128
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
129
130
s1 = tcg_constant_i64(simm);
131
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
132
}; \
133
TCGLabel *over = gen_new_label(); \
134
gen_set_rm(s, RISCV_FRM_DYN); \
135
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
136
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
137
\
138
data = FIELD_DP32(data, VDATA, VM, a->vm); \
139
@@ -XXX,XX +XXX,XX @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
140
TCGv_i64 t1;
141
142
TCGLabel *over = gen_new_label();
143
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
144
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
145
146
dest = tcg_temp_new_ptr();
147
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
148
}; \
149
TCGLabel *over = gen_new_label(); \
150
gen_set_rm(s, RISCV_FRM_DYN); \
151
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
152
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\
153
\
154
data = FIELD_DP32(data, VDATA, VM, a->vm); \
155
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
156
}; \
157
TCGLabel *over = gen_new_label(); \
158
gen_set_rm(s, RISCV_FRM_DYN); \
159
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
160
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
161
\
162
data = FIELD_DP32(data, VDATA, VM, a->vm); \
163
@@ -XXX,XX +XXX,XX @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
164
uint32_t data = 0;
165
TCGLabel *over = gen_new_label();
166
gen_set_rm_chkfrm(s, rm);
167
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
168
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
169
170
data = FIELD_DP32(data, VDATA, VM, a->vm);
171
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
172
gen_helper_vmv_v_x_d,
173
};
174
TCGLabel *over = gen_new_label();
175
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
176
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
177
178
t1 = tcg_temp_new_i64();
179
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
180
}; \
181
TCGLabel *over = gen_new_label(); \
182
gen_set_rm_chkfrm(s, FRM); \
183
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
184
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
185
\
186
data = FIELD_DP32(data, VDATA, VM, a->vm); \
187
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
188
}; \
189
TCGLabel *over = gen_new_label(); \
190
gen_set_rm(s, RISCV_FRM_DYN); \
191
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
192
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
193
\
194
data = FIELD_DP32(data, VDATA, VM, a->vm); \
195
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
196
}; \
197
TCGLabel *over = gen_new_label(); \
198
gen_set_rm_chkfrm(s, FRM); \
199
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
200
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
201
\
202
data = FIELD_DP32(data, VDATA, VM, a->vm); \
203
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
204
}; \
205
TCGLabel *over = gen_new_label(); \
206
gen_set_rm_chkfrm(s, FRM); \
207
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
208
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
209
\
210
data = FIELD_DP32(data, VDATA, VM, a->vm); \
211
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
212
uint32_t data = 0; \
213
gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
214
TCGLabel *over = gen_new_label(); \
215
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
216
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
217
\
218
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
219
@@ -XXX,XX +XXX,XX @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
220
require_vm(a->vm, a->rd)) {
221
uint32_t data = 0;
222
TCGLabel *over = gen_new_label();
223
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
224
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
225
226
data = FIELD_DP32(data, VDATA, VM, a->vm);
227
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
228
TCGv s1;
229
TCGLabel *over = gen_new_label();
230
231
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
232
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
233
234
t1 = tcg_temp_new_i64();
235
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
236
TCGv_i64 t1;
237
TCGLabel *over = gen_new_label();
238
239
- /* if vl == 0 or vstart >= vl, skip vector register write back */
240
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
241
+ /* if vstart >= vl, skip vector register write back */
242
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
243
244
/* NaN-box f[rs1] */
245
@@ -XXX,XX +XXX,XX @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
246
uint32_t data = 0;
247
gen_helper_gvec_3_ptr *fn;
248
TCGLabel *over = gen_new_label();
249
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
250
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
251
252
static gen_helper_gvec_3_ptr * const fns[6][4] = {
253
--
254
2.41.0
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
2
3
While we set env->bins when unwinding for ILLEGAL_INST,
3
This commit adds support for the Zvbc vector-crypto extension, which
4
from e.g. csrrw, we weren't setting it for immediately
4
consists of the following instructions:
5
illegal instructions.
5
6
6
* vclmulh.[vx,vv]
7
Add a testcase for mtval via both exception paths.
7
* vclmul.[vx,vv]
8
8
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1060
9
Translation functions are defined in
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
`target/riscv/vcrypto_helper.c`.
12
Message-Id: <20220604231004.49990-2-richard.henderson@linaro.org>
12
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Max Chou <max.chou@sifive.com>
15
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
[max.chou@sifive.com: Exposed x-zvbc property]
19
Message-ID: <20230711165917.2629866-5-max.chou@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
21
---
15
target/riscv/translate.c | 2 +
22
target/riscv/cpu_cfg.h | 1 +
16
tests/tcg/riscv64/Makefile.softmmu-target | 21 +++++++++
23
target/riscv/helper.h | 6 +++
17
tests/tcg/riscv64/issue1060.S | 53 +++++++++++++++++++++++
24
target/riscv/insn32.decode | 6 +++
18
tests/tcg/riscv64/semihost.ld | 21 +++++++++
25
target/riscv/cpu.c | 9 ++++
19
4 files changed, 97 insertions(+)
26
target/riscv/translate.c | 1 +
20
create mode 100644 tests/tcg/riscv64/Makefile.softmmu-target
27
target/riscv/vcrypto_helper.c | 59 ++++++++++++++++++++++
21
create mode 100644 tests/tcg/riscv64/issue1060.S
28
target/riscv/insn_trans/trans_rvvk.c.inc | 62 ++++++++++++++++++++++++
22
create mode 100644 tests/tcg/riscv64/semihost.ld
29
target/riscv/meson.build | 3 +-
23
30
8 files changed, 146 insertions(+), 1 deletion(-)
31
create mode 100644 target/riscv/vcrypto_helper.c
32
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
33
34
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/cpu_cfg.h
37
+++ b/target/riscv/cpu_cfg.h
38
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
39
bool ext_zve32f;
40
bool ext_zve64f;
41
bool ext_zve64d;
42
+ bool ext_zvbc;
43
bool ext_zmmul;
44
bool ext_zvfbfmin;
45
bool ext_zvfbfwma;
46
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/target/riscv/helper.h
49
+++ b/target/riscv/helper.h
50
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vfwcvtbf16_f_f_v, void, ptr, ptr, ptr, env, i32)
51
52
DEF_HELPER_6(vfwmaccbf16_vv, void, ptr, ptr, ptr, ptr, env, i32)
53
DEF_HELPER_6(vfwmaccbf16_vf, void, ptr, ptr, i64, ptr, env, i32)
54
+
55
+/* Vector crypto functions */
56
+DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
57
+DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
58
+DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
59
+DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
60
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/riscv/insn32.decode
63
+++ b/target/riscv/insn32.decode
64
@@ -XXX,XX +XXX,XX @@ vfwcvtbf16_f_f_v 010010 . ..... 01101 001 ..... 1010111 @r2_vm
65
# *** Zvfbfwma Standard Extension ***
66
vfwmaccbf16_vv 111011 . ..... ..... 001 ..... 1010111 @r_vm
67
vfwmaccbf16_vf 111011 . ..... ..... 101 ..... 1010111 @r_vm
68
+
69
+# *** Zvbc vector crypto extension ***
70
+vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
71
+vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
72
+vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
73
+vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
74
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/riscv/cpu.c
77
+++ b/target/riscv/cpu.c
78
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
79
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
80
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
81
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
82
+ ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
83
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
84
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
85
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
86
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
87
return;
88
}
89
90
+ if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
91
+ error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
92
+ return;
93
+ }
94
+
95
if (cpu->cfg.ext_zk) {
96
cpu->cfg.ext_zkn = true;
97
cpu->cfg.ext_zkr = true;
98
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
99
DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
100
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
101
102
+ /* Vector cryptography extensions */
103
+ DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
104
+
105
DEFINE_PROP_END_OF_LIST(),
106
};
107
24
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
108
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
25
index XXXXXXX..XXXXXXX 100644
109
index XXXXXXX..XXXXXXX 100644
26
--- a/target/riscv/translate.c
110
--- a/target/riscv/translate.c
27
+++ b/target/riscv/translate.c
111
+++ b/target/riscv/translate.c
28
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
112
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
29
113
#include "insn_trans/trans_rvzfa.c.inc"
30
static void gen_exception_illegal(DisasContext *ctx)
114
#include "insn_trans/trans_rvzfh.c.inc"
31
{
115
#include "insn_trans/trans_rvk.c.inc"
32
+ tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env,
116
+#include "insn_trans/trans_rvvk.c.inc"
33
+ offsetof(CPURISCVState, bins));
117
#include "insn_trans/trans_privileged.c.inc"
34
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
118
#include "insn_trans/trans_svinval.c.inc"
35
}
119
#include "insn_trans/trans_rvbf16.c.inc"
36
120
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
37
diff --git a/tests/tcg/riscv64/Makefile.softmmu-target b/tests/tcg/riscv64/Makefile.softmmu-target
38
new file mode 100644
121
new file mode 100644
39
index XXXXXXX..XXXXXXX
122
index XXXXXXX..XXXXXXX
40
--- /dev/null
123
--- /dev/null
41
+++ b/tests/tcg/riscv64/Makefile.softmmu-target
124
+++ b/target/riscv/vcrypto_helper.c
42
@@ -XXX,XX +XXX,XX @@
125
@@ -XXX,XX +XXX,XX @@
43
+#
126
+/*
44
+# RISC-V system tests
127
+ * RISC-V Vector Crypto Extension Helpers for QEMU.
45
+#
128
+ *
46
+
129
+ * Copyright (C) 2023 SiFive, Inc.
47
+TEST_SRC = $(SRC_PATH)/tests/tcg/riscv64
130
+ * Written by Codethink Ltd and SiFive.
48
+VPATH += $(TEST_SRC)
131
+ *
49
+
132
+ * This program is free software; you can redistribute it and/or modify it
50
+LINK_SCRIPT = $(TEST_SRC)/semihost.ld
133
+ * under the terms and conditions of the GNU General Public License,
51
+LDFLAGS = -T $(LINK_SCRIPT)
134
+ * version 2 or later, as published by the Free Software Foundation.
52
+CFLAGS += -g -Og
135
+ *
53
+
136
+ * This program is distributed in the hope it will be useful, but WITHOUT
54
+%.o: %.S
137
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
55
+    $(CC) $(CFLAGS) $< -c -o $@
138
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
56
+%: %.o $(LINK_SCRIPT)
139
+ * more details.
57
+    $(LD) $(LDFLAGS) $< -o $@
140
+ *
58
+
141
+ * You should have received a copy of the GNU General Public License along with
59
+QEMU_OPTS += -M virt -display none -semihosting -device loader,file=
142
+ * this program. If not, see <http://www.gnu.org/licenses/>.
60
+
143
+ */
61
+EXTRA_RUNS += run-issue1060
144
+
62
+run-issue1060: issue1060
145
+#include "qemu/osdep.h"
63
+    $(call run-test, $<, $(QEMU) $(QEMU_OPTS)$<)
146
+#include "qemu/host-utils.h"
64
diff --git a/tests/tcg/riscv64/issue1060.S b/tests/tcg/riscv64/issue1060.S
147
+#include "qemu/bitops.h"
148
+#include "cpu.h"
149
+#include "exec/memop.h"
150
+#include "exec/exec-all.h"
151
+#include "exec/helper-proto.h"
152
+#include "internals.h"
153
+#include "vector_internals.h"
154
+
155
+static uint64_t clmul64(uint64_t y, uint64_t x)
156
+{
157
+ uint64_t result = 0;
158
+ for (int j = 63; j >= 0; j--) {
159
+ if ((y >> j) & 1) {
160
+ result ^= (x << j);
161
+ }
162
+ }
163
+ return result;
164
+}
165
+
166
+static uint64_t clmulh64(uint64_t y, uint64_t x)
167
+{
168
+ uint64_t result = 0;
169
+ for (int j = 63; j >= 1; j--) {
170
+ if ((y >> j) & 1) {
171
+ result ^= (x >> (64 - j));
172
+ }
173
+ }
174
+ return result;
175
+}
176
+
177
+RVVCALL(OPIVV2, vclmul_vv, OP_UUU_D, H8, H8, H8, clmul64)
178
+GEN_VEXT_VV(vclmul_vv, 8)
179
+RVVCALL(OPIVX2, vclmul_vx, OP_UUU_D, H8, H8, clmul64)
180
+GEN_VEXT_VX(vclmul_vx, 8)
181
+RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
182
+GEN_VEXT_VV(vclmulh_vv, 8)
183
+RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
184
+GEN_VEXT_VX(vclmulh_vx, 8)
185
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
65
new file mode 100644
186
new file mode 100644
66
index XXXXXXX..XXXXXXX
187
index XXXXXXX..XXXXXXX
67
--- /dev/null
188
--- /dev/null
68
+++ b/tests/tcg/riscv64/issue1060.S
189
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
69
@@ -XXX,XX +XXX,XX @@
190
@@ -XXX,XX +XXX,XX @@
70
+    .option    norvc
191
+/*
71
+
192
+ * RISC-V translation routines for the vector crypto extension.
72
+    .text
193
+ *
73
+    .global _start
194
+ * Copyright (C) 2023 SiFive, Inc.
74
+_start:
195
+ * Written by Codethink Ltd and SiFive.
75
+    lla    t0, trap
196
+ *
76
+    csrw    mtvec, t0
197
+ * This program is free software; you can redistribute it and/or modify it
77
+
198
+ * under the terms and conditions of the GNU General Public License,
78
+    # These are all illegal instructions
199
+ * version 2 or later, as published by the Free Software Foundation.
79
+    csrw    time, x0
200
+ *
80
+    .insn    i CUSTOM_0, 0, x0, x0, 0x321
201
+ * This program is distributed in the hope it will be useful, but WITHOUT
81
+    csrw    time, x0
202
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
82
+    .insn    i CUSTOM_0, 0, x0, x0, 0x123
203
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
83
+    csrw    cycle, x0
204
+ * more details.
84
+
205
+ *
85
+    # Success!
206
+ * You should have received a copy of the GNU General Public License along with
86
+    li    a0, 0
207
+ * this program. If not, see <http://www.gnu.org/licenses/>.
87
+    j    _exit
208
+ */
88
+
209
+
89
+trap:
210
+/*
90
+    # When an instruction traps, compare it to the insn in memory.
211
+ * Zvbc
91
+    csrr    t0, mepc
212
+ */
92
+    csrr    t1, mtval
213
+
93
+    lwu    t2, 0(t0)
214
+#define GEN_VV_MASKED_TRANS(NAME, CHECK) \
94
+    bne    t1, t2, fail
215
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
95
+
216
+ { \
96
+    # Skip the insn and continue.
217
+ if (CHECK(s, a)) { \
97
+    addi    t0, t0, 4
218
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, \
98
+    csrw    mepc, t0
219
+ gen_helper_##NAME, s); \
99
+    mret
220
+ } \
100
+
221
+ return false; \
101
+fail:
222
+ }
102
+    li    a0, 1
223
+
103
+
224
+static bool vclmul_vv_check(DisasContext *s, arg_rmrr *a)
104
+# Exit code in a0
225
+{
105
+_exit:
226
+ return opivv_check(s, a) &&
106
+    lla    a1, semiargs
227
+ s->cfg_ptr->ext_zvbc == true &&
107
+    li    t0, 0x20026    # ADP_Stopped_ApplicationExit
228
+ s->sew == MO_64;
108
+    sd    t0, 0(a1)
229
+}
109
+    sd    a0, 8(a1)
230
+
110
+    li    a0, 0x20    # TARGET_SYS_EXIT_EXTENDED
231
+GEN_VV_MASKED_TRANS(vclmul_vv, vclmul_vv_check)
111
+
232
+GEN_VV_MASKED_TRANS(vclmulh_vv, vclmul_vv_check)
112
+    # Semihosting call sequence
233
+
113
+    .balign    16
234
+#define GEN_VX_MASKED_TRANS(NAME, CHECK) \
114
+    slli    zero, zero, 0x1f
235
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
115
+    ebreak
236
+ { \
116
+    srai    zero, zero, 0x7
237
+ if (CHECK(s, a)) { \
117
+    j    .
238
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, \
118
+
239
+ gen_helper_##NAME, s); \
119
+    .data
240
+ } \
120
+    .balign    16
241
+ return false; \
121
+semiargs:
242
+ }
122
+    .space    16
243
+
123
diff --git a/tests/tcg/riscv64/semihost.ld b/tests/tcg/riscv64/semihost.ld
244
+static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
124
new file mode 100644
245
+{
125
index XXXXXXX..XXXXXXX
246
+ return opivx_check(s, a) &&
126
--- /dev/null
247
+ s->cfg_ptr->ext_zvbc == true &&
127
+++ b/tests/tcg/riscv64/semihost.ld
248
+ s->sew == MO_64;
128
@@ -XXX,XX +XXX,XX @@
249
+}
129
+ENTRY(_start)
250
+
130
+
251
+GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
131
+SECTIONS
252
+GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
132
+{
253
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
133
+ /* virt machine, RAM starts at 2gb */
254
index XXXXXXX..XXXXXXX 100644
134
+ . = 0x80000000;
255
--- a/target/riscv/meson.build
135
+ .text : {
256
+++ b/target/riscv/meson.build
136
+ *(.text)
257
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
137
+ }
258
'translate.c',
138
+ .rodata : {
259
'm128_helper.c',
139
+ *(.rodata)
260
'crypto_helper.c',
140
+ }
261
- 'zce_helper.c'
141
+ /* align r/w section to next 2mb */
262
+ 'zce_helper.c',
142
+ . = ALIGN(1 << 21);
263
+ 'vcrypto_helper.c'
143
+ .data : {
264
))
144
+ *(.data)
265
riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
145
+ }
266
146
+ .bss : {
147
+ *(.bss)
148
+ }
149
+}
150
--
267
--
151
2.36.1
268
2.41.0
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
Based on architecture review committee feedback, the [m|s|vs]seteienum,
3
Move the checks out of `do_opiv{v,x,i}_gvec{,_shift}` functions
4
[m|s|vs]clreienum, [m|s|vs]seteipnum, and [m|s|vs]clreipnum CSRs are
4
and into the corresponding macros. This enables the functions to be
5
removed in the latest AIA draft v0.3.0 specification.
5
reused in proceeding commits without check duplication.
6
(Refer, https://github.com/riscv/riscv-aia/releases/tag/0.3.0-draft.31)
7
6
8
These CSRs were mostly for software convenience and software can always
7
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
9
use [m|s|vs]iselect and [m|s|vs]ireg CSRs to update the IMSIC interrupt
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
file bits.
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
10
Signed-off-by: Max Chou <max.chou@sifive.com>
12
We update the IMSIC CSR emulation as-per above to match the latest AIA
11
Message-ID: <20230711165917.2629866-6-max.chou@sifive.com>
13
draft specification.
14
15
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Message-Id: <20220616031543.953776-2-apatel@ventanamicro.com>
18
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
---
13
---
20
target/riscv/cpu_bits.h | 24 +------
14
target/riscv/insn_trans/trans_rvv.c.inc | 28 +++++++++++--------------
21
target/riscv/csr.c | 150 +---------------------------------------
15
1 file changed, 12 insertions(+), 16 deletions(-)
22
2 files changed, 6 insertions(+), 168 deletions(-)
23
16
24
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
17
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
25
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
26
--- a/target/riscv/cpu_bits.h
19
--- a/target/riscv/insn_trans/trans_rvv.c.inc
27
+++ b/target/riscv/cpu_bits.h
20
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
28
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
29
#define CSR_MIREG 0x351
22
gen_helper_gvec_4_ptr *fn)
30
23
{
31
/* Machine-Level Interrupts (AIA) */
24
TCGLabel *over = gen_new_label();
32
-#define CSR_MTOPI 0xfb0
25
- if (!opivv_check(s, a)) {
33
-
26
- return false;
34
-/* Machine-Level IMSIC Interface (AIA) */
27
- }
35
-#define CSR_MSETEIPNUM 0x358
28
36
-#define CSR_MCLREIPNUM 0x359
29
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
37
-#define CSR_MSETEIENUM 0x35a
30
38
-#define CSR_MCLREIENUM 0x35b
31
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
39
#define CSR_MTOPEI 0x35c
32
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
40
+#define CSR_MTOPI 0xfb0
33
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
41
34
}; \
42
/* Virtual Interrupts for Supervisor Level (AIA) */
35
+ if (!opivv_check(s, a)) { \
43
#define CSR_MVIEN 0x308
36
+ return false; \
44
@@ -XXX,XX +XXX,XX @@
37
+ } \
45
#define CSR_SIREG 0x151
38
return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
46
47
/* Supervisor-Level Interrupts (AIA) */
48
-#define CSR_STOPI 0xdb0
49
-
50
-/* Supervisor-Level IMSIC Interface (AIA) */
51
-#define CSR_SSETEIPNUM 0x158
52
-#define CSR_SCLREIPNUM 0x159
53
-#define CSR_SSETEIENUM 0x15a
54
-#define CSR_SCLREIENUM 0x15b
55
#define CSR_STOPEI 0x15c
56
+#define CSR_STOPI 0xdb0
57
58
/* Supervisor-Level High-Half CSRs (AIA) */
59
#define CSR_SIEH 0x114
60
@@ -XXX,XX +XXX,XX @@
61
#define CSR_VSIREG 0x251
62
63
/* VS-Level Interrupts (H-extension with AIA) */
64
-#define CSR_VSTOPI 0xeb0
65
-
66
-/* VS-Level IMSIC Interface (H-extension with AIA) */
67
-#define CSR_VSSETEIPNUM 0x258
68
-#define CSR_VSCLREIPNUM 0x259
69
-#define CSR_VSSETEIENUM 0x25a
70
-#define CSR_VSCLREIENUM 0x25b
71
#define CSR_VSTOPEI 0x25c
72
+#define CSR_VSTOPI 0xeb0
73
74
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
75
#define CSR_HIDELEGH 0x613
76
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/riscv/csr.c
79
+++ b/target/riscv/csr.c
80
@@ -XXX,XX +XXX,XX @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
81
return CSR_VSISELECT;
82
case CSR_SIREG:
83
return CSR_VSIREG;
84
- case CSR_SSETEIPNUM:
85
- return CSR_VSSETEIPNUM;
86
- case CSR_SCLREIPNUM:
87
- return CSR_VSCLREIPNUM;
88
- case CSR_SSETEIENUM:
89
- return CSR_VSSETEIENUM;
90
- case CSR_SCLREIENUM:
91
- return CSR_VSCLREIENUM;
92
case CSR_STOPEI:
93
return CSR_VSTOPEI;
94
default:
95
@@ -XXX,XX +XXX,XX @@ done:
96
return RISCV_EXCP_NONE;
97
}
39
}
98
40
99
-static int rmw_xsetclreinum(CPURISCVState *env, int csrno, target_ulong *val,
41
@@ -XXX,XX +XXX,XX @@ static inline bool
100
- target_ulong new_val, target_ulong wr_mask)
42
do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
101
-{
43
gen_helper_opivx *fn)
102
- int ret = -EINVAL;
44
{
103
- bool set, pend, virt;
45
- if (!opivx_check(s, a)) {
104
- target_ulong priv, isel, vgein, xlen, nval, wmask;
46
- return false;
105
-
106
- /* Translate CSR number for VS-mode */
107
- csrno = aia_xlate_vs_csrno(env, csrno);
108
-
109
- /* Decode register details from CSR number */
110
- virt = set = pend = false;
111
- switch (csrno) {
112
- case CSR_MSETEIPNUM:
113
- priv = PRV_M;
114
- set = true;
115
- pend = true;
116
- break;
117
- case CSR_MCLREIPNUM:
118
- priv = PRV_M;
119
- pend = true;
120
- break;
121
- case CSR_MSETEIENUM:
122
- priv = PRV_M;
123
- set = true;
124
- break;
125
- case CSR_MCLREIENUM:
126
- priv = PRV_M;
127
- break;
128
- case CSR_SSETEIPNUM:
129
- priv = PRV_S;
130
- set = true;
131
- pend = true;
132
- break;
133
- case CSR_SCLREIPNUM:
134
- priv = PRV_S;
135
- pend = true;
136
- break;
137
- case CSR_SSETEIENUM:
138
- priv = PRV_S;
139
- set = true;
140
- break;
141
- case CSR_SCLREIENUM:
142
- priv = PRV_S;
143
- break;
144
- case CSR_VSSETEIPNUM:
145
- priv = PRV_S;
146
- virt = true;
147
- set = true;
148
- pend = true;
149
- break;
150
- case CSR_VSCLREIPNUM:
151
- priv = PRV_S;
152
- virt = true;
153
- pend = true;
154
- break;
155
- case CSR_VSSETEIENUM:
156
- priv = PRV_S;
157
- virt = true;
158
- set = true;
159
- break;
160
- case CSR_VSCLREIENUM:
161
- priv = PRV_S;
162
- virt = true;
163
- break;
164
- default:
165
- goto done;
166
- };
167
-
168
- /* IMSIC CSRs only available when machine implements IMSIC. */
169
- if (!env->aia_ireg_rmw_fn[priv]) {
170
- goto done;
171
- }
47
- }
172
-
48
-
173
- /* Find the selected guest interrupt file */
49
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
174
- vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
50
TCGv_i64 src1 = tcg_temp_new_i64();
175
-
51
176
- /* Selected guest interrupt file should be valid */
52
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
177
- if (virt && (!vgein || env->geilen < vgein)) {
53
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
178
- goto done;
54
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
55
}; \
56
+ if (!opivx_check(s, a)) { \
57
+ return false; \
58
+ } \
59
return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
60
}
61
62
@@ -XXX,XX +XXX,XX @@ static inline bool
63
do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
64
gen_helper_opivx *fn, imm_mode_t imm_mode)
65
{
66
- if (!opivx_check(s, a)) {
67
- return false;
179
- }
68
- }
180
-
69
-
181
- /* Set/Clear CSRs always read zero */
70
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
182
- if (val) {
71
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
183
- *val = 0;
72
extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
73
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
74
gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
75
gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
76
}; \
77
+ if (!opivx_check(s, a)) { \
78
+ return false; \
79
+ } \
80
return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
81
fns[s->sew], IMM_MODE); \
82
}
83
@@ -XXX,XX +XXX,XX @@ static inline bool
84
do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
85
gen_helper_opivx *fn)
86
{
87
- if (!opivx_check(s, a)) {
88
- return false;
184
- }
89
- }
185
-
90
-
186
- if (wr_mask) {
91
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
187
- /* Get interrupt number */
92
TCGv_i32 src1 = tcg_temp_new_i32();
188
- new_val &= wr_mask;
93
189
-
94
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
190
- /* Find target interrupt pending/enable register */
95
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
191
- xlen = riscv_cpu_mxl_bits(env);
96
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
192
- isel = (new_val / xlen);
97
}; \
193
- isel *= (xlen / IMSIC_EIPx_BITS);
98
- \
194
- isel += (pend) ? ISELECT_IMSIC_EIP0 : ISELECT_IMSIC_EIE0;
99
+ if (!opivx_check(s, a)) { \
195
-
100
+ return false; \
196
- /* Find the interrupt bit to be set/clear */
101
+ } \
197
- wmask = ((target_ulong)1) << (new_val % xlen);
102
return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
198
- nval = (set) ? wmask : 0;
103
}
199
-
104
200
- /* Call machine specific IMSIC register emulation */
201
- ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
202
- AIA_MAKE_IREG(isel, priv, virt,
203
- vgein, xlen),
204
- NULL, nval, wmask);
205
- } else {
206
- ret = 0;
207
- }
208
-
209
-done:
210
- if (ret) {
211
- return (riscv_cpu_virt_enabled(env) && virt) ?
212
- RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
213
- }
214
- return RISCV_EXCP_NONE;
215
-}
216
-
217
static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
218
target_ulong new_val, target_ulong wr_mask)
219
{
220
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
221
[CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
222
223
/* Machine-Level Interrupts (AIA) */
224
- [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
225
-
226
- /* Machine-Level IMSIC Interface (AIA) */
227
- [CSR_MSETEIPNUM] = { "mseteipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
228
- [CSR_MCLREIPNUM] = { "mclreipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
229
- [CSR_MSETEIENUM] = { "mseteienum", aia_any, NULL, NULL, rmw_xsetclreinum },
230
- [CSR_MCLREIENUM] = { "mclreienum", aia_any, NULL, NULL, rmw_xsetclreinum },
231
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
232
+ [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
233
234
/* Virtual Interrupts for Supervisor Level (AIA) */
235
[CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
236
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
237
[CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
238
239
/* Supervisor-Level Interrupts (AIA) */
240
- [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
241
-
242
- /* Supervisor-Level IMSIC Interface (AIA) */
243
- [CSR_SSETEIPNUM] = { "sseteipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
244
- [CSR_SCLREIPNUM] = { "sclreipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
245
- [CSR_SSETEIENUM] = { "sseteienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
246
- [CSR_SCLREIENUM] = { "sclreienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
247
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
248
+ [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
249
250
/* Supervisor-Level High-Half CSRs (AIA) */
251
[CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
252
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
253
[CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
254
255
/* VS-Level Interrupts (H-extension with AIA) */
256
- [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
257
-
258
- /* VS-Level IMSIC Interface (H-extension with AIA) */
259
- [CSR_VSSETEIPNUM] = { "vsseteipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
260
- [CSR_VSCLREIPNUM] = { "vsclreipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
261
- [CSR_VSSETEIENUM] = { "vsseteienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
262
- [CSR_VSCLREIENUM] = { "vsclreienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
263
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
264
+ [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
265
266
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
267
[CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh },
268
--
105
--
269
2.36.1
106
2.41.0
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
The function doesn't set mtval, it sets badaddr. Move the set
3
Zvbb (implemented in later commit) has a widening instruction, which
4
of badaddr directly into gen_exception_inst_addr_mis and use
4
requires an extra check on the enabled extensions. Refactor
5
generate_exception.
5
GEN_OPIVX_WIDEN_TRANS() to take a check function to avoid reimplementing
6
it.
6
7
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20220604231004.49990-3-richard.henderson@linaro.org>
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Message-ID: <20230711165917.2629866-7-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
14
---
12
target/riscv/translate.c | 11 ++---------
15
target/riscv/insn_trans/trans_rvv.c.inc | 52 +++++++++++--------------
13
1 file changed, 2 insertions(+), 9 deletions(-)
16
1 file changed, 23 insertions(+), 29 deletions(-)
14
17
15
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/translate.c
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/translate.c
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void generate_exception(DisasContext *ctx, int excp)
22
@@ -XXX,XX +XXX,XX @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
20
ctx->base.is_jmp = DISAS_NORETURN;
23
vext_check_ds(s, a->rd, a->rs2, a->vm);
21
}
24
}
22
25
23
-static void generate_exception_mtval(DisasContext *ctx, int excp)
26
-static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
27
- gen_helper_opivx *fn)
24
-{
28
-{
25
- gen_set_pc_imm(ctx, ctx->base.pc_next);
29
- if (opivx_widen_check(s, a)) {
26
- tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
30
- return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
27
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp));
31
- }
28
- ctx->base.is_jmp = DISAS_NORETURN;
32
- return false;
29
-}
33
-}
30
-
34
-
31
static void gen_exception_illegal(DisasContext *ctx)
35
-#define GEN_OPIVX_WIDEN_TRANS(NAME) \
32
{
36
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
33
tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env,
37
-{ \
34
@@ -XXX,XX +XXX,XX @@ static void gen_exception_illegal(DisasContext *ctx)
38
- static gen_helper_opivx * const fns[3] = { \
35
39
- gen_helper_##NAME##_b, \
36
static void gen_exception_inst_addr_mis(DisasContext *ctx)
40
- gen_helper_##NAME##_h, \
37
{
41
- gen_helper_##NAME##_w \
38
- generate_exception_mtval(ctx, RISCV_EXCP_INST_ADDR_MIS);
42
- }; \
39
+ tcg_gen_st_tl(cpu_pc, cpu_env, offsetof(CPURISCVState, badaddr));
43
- return do_opivx_widen(s, a, fns[s->sew]); \
40
+ generate_exception(ctx, RISCV_EXCP_INST_ADDR_MIS);
44
+#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
45
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
46
+{ \
47
+ if (CHECK(s, a)) { \
48
+ static gen_helper_opivx * const fns[3] = { \
49
+ gen_helper_##NAME##_b, \
50
+ gen_helper_##NAME##_h, \
51
+ gen_helper_##NAME##_w \
52
+ }; \
53
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
54
+ } \
55
+ return false; \
41
}
56
}
42
57
43
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
58
-GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
59
-GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
60
-GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
61
-GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
62
+GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
63
+GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
64
+GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
65
+GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
66
67
/* WIDEN OPIVV with WIDEN */
68
static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
69
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vrem_vx, opivx_check)
70
GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
71
GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
72
GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
73
-GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
74
-GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
75
-GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
76
+GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
77
+GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
78
+GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
79
80
/* Vector Single-Width Integer Multiply-Add Instructions */
81
GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
82
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
83
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
84
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
85
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
86
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
87
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
88
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
89
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
90
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
91
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
92
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
93
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
94
95
/* Vector Integer Merge and Move Instructions */
96
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
44
--
97
--
45
2.36.1
98
2.41.0
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
The set of instructions that require decode_save_opc for
3
Move some macros out of `vector_helper` and into `vector_internals`.
4
unwinding is really fairly small -- only insns that can
4
This ensures they can be used by both vector and vector-crypto helpers
5
raise ILLEGAL_INSN at runtime. This includes CSR, anything
5
(latter implemented in proceeding commits).
6
that uses a *new* fp rounding mode, and many privileged insns.
7
6
8
Since unwind info is stored as the difference from the
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
9
previous insn, storing a 0 for most insns minimizes the
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
10
size of the unwind info.
9
Signed-off-by: Max Chou <max.chou@sifive.com>
11
10
Message-ID: <20230711165917.2629866-8-max.chou@sifive.com>
12
Booting a debian kernel image to the missing rootfs panic yields
13
14
- gen code size 22226819/1026886656
15
+ gen code size 21601907/1026886656
16
17
on 41k TranslationBlocks, a savings of 610kB or a bit less than 3%.
18
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
21
Message-Id: <20220604231004.49990-4-richard.henderson@linaro.org>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
23
---
12
---
24
target/riscv/translate.c | 18 +++++++++---------
13
target/riscv/vector_internals.h | 46 +++++++++++++++++++++++++++++++++
25
target/riscv/insn_trans/trans_privileged.c.inc | 4 ++++
14
target/riscv/vector_helper.c | 42 ------------------------------
26
target/riscv/insn_trans/trans_rvh.c.inc | 2 ++
15
2 files changed, 46 insertions(+), 42 deletions(-)
27
target/riscv/insn_trans/trans_rvi.c.inc | 2 ++
28
4 files changed, 17 insertions(+), 9 deletions(-)
29
16
30
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
17
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
31
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
32
--- a/target/riscv/translate.c
19
--- a/target/riscv/vector_internals.h
33
+++ b/target/riscv/translate.c
20
+++ b/target/riscv/vector_internals.h
34
@@ -XXX,XX +XXX,XX @@ static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in)
21
@@ -XXX,XX +XXX,XX @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
35
tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan);
22
/* expand macro args before macro */
36
}
23
#define RVVCALL(macro, ...) macro(__VA_ARGS__)
37
24
38
+static void decode_save_opc(DisasContext *ctx)
25
+/* (TD, T2, TX2) */
39
+{
26
+#define OP_UU_B uint8_t, uint8_t, uint8_t
40
+ assert(ctx->insn_start != NULL);
27
+#define OP_UU_H uint16_t, uint16_t, uint16_t
41
+ tcg_set_insn_start_param(ctx->insn_start, 1, ctx->opcode);
28
+#define OP_UU_W uint32_t, uint32_t, uint32_t
42
+ ctx->insn_start = NULL;
29
+#define OP_UU_D uint64_t, uint64_t, uint64_t
30
+
31
/* (TD, T1, T2, TX1, TX2) */
32
#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
33
#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
34
#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
35
#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
36
37
+#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
38
+static void do_##NAME(void *vd, void *vs2, int i) \
39
+{ \
40
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
41
+ *((TD *)vd + HD(i)) = OP(s2); \
43
+}
42
+}
44
+
43
+
45
static void gen_set_pc_imm(DisasContext *ctx, target_ulong dest)
44
+#define GEN_VEXT_V(NAME, ESZ) \
46
{
45
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
47
if (get_xl(ctx) == MXL_RV32) {
46
+ CPURISCVState *env, uint32_t desc) \
48
@@ -XXX,XX +XXX,XX @@ static void gen_set_rm(DisasContext *ctx, int rm)
47
+{ \
49
return;
48
+ uint32_t vm = vext_vm(desc); \
50
}
49
+ uint32_t vl = env->vl; \
51
50
+ uint32_t total_elems = \
52
+ /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
51
+ vext_get_total_elems(env, desc, ESZ); \
53
+ decode_save_opc(ctx);
52
+ uint32_t vta = vext_vta(desc); \
54
gen_helper_set_rounding_mode(cpu_env, tcg_constant_i32(rm));
53
+ uint32_t vma = vext_vma(desc); \
54
+ uint32_t i; \
55
+ \
56
+ for (i = env->vstart; i < vl; i++) { \
57
+ if (!vm && !vext_elem_mask(v0, i)) { \
58
+ /* set masked-off elements to 1s */ \
59
+ vext_set_elems_1s(vd, vma, i * ESZ, \
60
+ (i + 1) * ESZ); \
61
+ continue; \
62
+ } \
63
+ do_##NAME(vd, vs2, i); \
64
+ } \
65
+ env->vstart = 0; \
66
+ /* set tail elements to 1s */ \
67
+ vext_set_elems_1s(vd, vta, vl * ESZ, \
68
+ total_elems * ESZ); \
69
+}
70
+
71
/* operation of two vector elements */
72
typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
73
74
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
75
do_##NAME, ESZ); \
55
}
76
}
56
77
57
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
78
+/* Three of the widening shortening macros: */
58
/* Include decoders for factored-out extensions */
79
+/* (TD, T1, T2, TX1, TX2) */
59
#include "decode-XVentanaCondOps.c.inc"
80
+#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
60
81
+#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
61
-static inline void decode_save_opc(DisasContext *ctx, target_ulong opc)
82
+#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
62
-{
83
+
63
- assert(ctx->insn_start != NULL);
84
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
64
- tcg_set_insn_start_param(ctx->insn_start, 1, opc);
85
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
65
- ctx->insn_start = NULL;
86
index XXXXXXX..XXXXXXX 100644
87
--- a/target/riscv/vector_helper.c
88
+++ b/target/riscv/vector_helper.c
89
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
90
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
91
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
92
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
93
-#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
94
-#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
95
-#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
96
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
97
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
98
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
99
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VF(vfwnmsac_vf_h, 4)
100
GEN_VEXT_VF(vfwnmsac_vf_w, 8)
101
102
/* Vector Floating-Point Square-Root Instruction */
103
-/* (TD, T2, TX2) */
104
-#define OP_UU_H uint16_t, uint16_t, uint16_t
105
-#define OP_UU_W uint32_t, uint32_t, uint32_t
106
-#define OP_UU_D uint64_t, uint64_t, uint64_t
107
-
108
#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
109
static void do_##NAME(void *vd, void *vs2, int i, \
110
CPURISCVState *env) \
111
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
112
GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
113
114
/* Vector Floating-Point Classify Instruction */
115
-#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
116
-static void do_##NAME(void *vd, void *vs2, int i) \
117
-{ \
118
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
119
- *((TD *)vd + HD(i)) = OP(s2); \
66
-}
120
-}
67
-
121
-
68
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
122
-#define GEN_VEXT_V(NAME, ESZ) \
123
-void HELPER(NAME)(void *vd, void *v0, void *vs2, \
124
- CPURISCVState *env, uint32_t desc) \
125
-{ \
126
- uint32_t vm = vext_vm(desc); \
127
- uint32_t vl = env->vl; \
128
- uint32_t total_elems = \
129
- vext_get_total_elems(env, desc, ESZ); \
130
- uint32_t vta = vext_vta(desc); \
131
- uint32_t vma = vext_vma(desc); \
132
- uint32_t i; \
133
- \
134
- for (i = env->vstart; i < vl; i++) { \
135
- if (!vm && !vext_elem_mask(v0, i)) { \
136
- /* set masked-off elements to 1s */ \
137
- vext_set_elems_1s(vd, vma, i * ESZ, \
138
- (i + 1) * ESZ); \
139
- continue; \
140
- } \
141
- do_##NAME(vd, vs2, i); \
142
- } \
143
- env->vstart = 0; \
144
- /* set tail elements to 1s */ \
145
- vext_set_elems_1s(vd, vta, vl * ESZ, \
146
- total_elems * ESZ); \
147
-}
148
-
149
target_ulong fclass_h(uint64_t frs1)
69
{
150
{
70
/*
151
float16 f = frs1;
71
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
72
73
/* Check for compressed insn */
74
if (extract16(opcode, 0, 2) != 3) {
75
- decode_save_opc(ctx, opcode);
76
if (!has_ext(ctx, RVC)) {
77
gen_exception_illegal(ctx);
78
} else {
79
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
80
opcode32 = deposit32(opcode32, 16, 16,
81
translator_lduw(env, &ctx->base,
82
ctx->base.pc_next + 2));
83
- decode_save_opc(ctx, opcode32);
84
ctx->opcode = opcode32;
85
ctx->pc_succ_insn = ctx->base.pc_next + 4;
86
87
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/riscv/insn_trans/trans_privileged.c.inc
90
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
91
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
92
{
93
#ifndef CONFIG_USER_ONLY
94
if (has_ext(ctx, RVS)) {
95
+ decode_save_opc(ctx);
96
gen_helper_sret(cpu_pc, cpu_env);
97
tcg_gen_exit_tb(NULL, 0); /* no chaining */
98
ctx->base.is_jmp = DISAS_NORETURN;
99
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
100
static bool trans_mret(DisasContext *ctx, arg_mret *a)
101
{
102
#ifndef CONFIG_USER_ONLY
103
+ decode_save_opc(ctx);
104
gen_helper_mret(cpu_pc, cpu_env);
105
tcg_gen_exit_tb(NULL, 0); /* no chaining */
106
ctx->base.is_jmp = DISAS_NORETURN;
107
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
108
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
109
{
110
#ifndef CONFIG_USER_ONLY
111
+ decode_save_opc(ctx);
112
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
113
gen_helper_wfi(cpu_env);
114
return true;
115
@@ -XXX,XX +XXX,XX @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
116
static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
117
{
118
#ifndef CONFIG_USER_ONLY
119
+ decode_save_opc(ctx);
120
gen_helper_tlb_flush(cpu_env);
121
return true;
122
#endif
123
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/riscv/insn_trans/trans_rvh.c.inc
126
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
127
@@ -XXX,XX +XXX,XX @@ static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
128
{
129
REQUIRE_EXT(ctx, RVH);
130
#ifndef CONFIG_USER_ONLY
131
+ decode_save_opc(ctx);
132
gen_helper_hyp_gvma_tlb_flush(cpu_env);
133
return true;
134
#endif
135
@@ -XXX,XX +XXX,XX @@ static bool trans_hfence_vvma(DisasContext *ctx, arg_sfence_vma *a)
136
{
137
REQUIRE_EXT(ctx, RVH);
138
#ifndef CONFIG_USER_ONLY
139
+ decode_save_opc(ctx);
140
gen_helper_hyp_tlb_flush(cpu_env);
141
return true;
142
#endif
143
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
144
index XXXXXXX..XXXXXXX 100644
145
--- a/target/riscv/insn_trans/trans_rvi.c.inc
146
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
147
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
148
149
static bool do_csr_post(DisasContext *ctx)
150
{
151
+ /* The helper may raise ILLEGAL_INSN -- record binv for unwind. */
152
+ decode_save_opc(ctx);
153
/* We may have changed important cpu state -- exit to main loop. */
154
gen_set_pc_imm(ctx, ctx->pc_succ_insn);
155
tcg_gen_exit_tb(NULL, 0);
156
--
152
--
157
2.36.1
153
2.41.0
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
With SBI PMU extension, user can use any of the available hpmcounters to
3
This commit adds support for the Zvbb vector-crypto extension, which
4
track any perf events based on the value written to mhpmevent csr.
4
consists of the following instructions:
5
Add read/write functionality for these csrs.
6
5
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
* vrol.[vv,vx]
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
7
* vror.[vv,vx,vi]
9
Signed-off-by: Atish Patra <atish.patra@wdc.com>
8
* vbrev8.v
10
Signed-off-by: Atish Patra <atishp@rivosinc.com>
9
* vrev8.v
11
Message-Id: <20220620231603.2547260-7-atishp@rivosinc.com>
10
* vandn.[vv,vx]
11
* vbrev.v
12
* vclz.v
13
* vctz.v
14
* vcpop.v
15
* vwsll.[vv,vx,vi]
16
17
Translation functions are defined in
18
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
19
`target/riscv/vcrypto_helper.c`.
20
21
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
22
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
23
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
24
[max.chou@sifive.com: Fix imm mode of vror.vi]
25
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
26
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
27
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
28
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
29
Signed-off-by: Max Chou <max.chou@sifive.com>
30
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
[max.chou@sifive.com: Exposed x-zvbb property]
32
Message-ID: <20230711165917.2629866-9-max.chou@sifive.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
34
---
14
target/riscv/cpu.h | 11 +
35
target/riscv/cpu_cfg.h | 1 +
15
target/riscv/csr.c | 469 ++++++++++++++++++++++++++++-------------
36
target/riscv/helper.h | 62 +++++++++
16
target/riscv/machine.c | 3 +
37
target/riscv/insn32.decode | 20 +++
17
3 files changed, 331 insertions(+), 152 deletions(-)
38
target/riscv/cpu.c | 12 ++
39
target/riscv/vcrypto_helper.c | 138 +++++++++++++++++++
40
target/riscv/insn_trans/trans_rvvk.c.inc | 164 +++++++++++++++++++++++
41
6 files changed, 397 insertions(+)
18
42
19
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
43
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
20
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.h
45
--- a/target/riscv/cpu_cfg.h
22
+++ b/target/riscv/cpu.h
46
+++ b/target/riscv/cpu_cfg.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState CPURISCVState;
47
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
24
#endif
48
bool ext_zve32f;
25
49
bool ext_zve64f;
26
#define RV_VLEN_MAX 1024
50
bool ext_zve64d;
27
+#define RV_MAX_MHPMEVENTS 29
51
+ bool ext_zvbb;
28
+#define RV_MAX_MHPMCOUNTERS 32
52
bool ext_zvbc;
29
53
bool ext_zmmul;
30
FIELD(VTYPE, VLMUL, 0, 3)
54
bool ext_zvfbfmin;
31
FIELD(VTYPE, VSEW, 3, 3)
55
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
32
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
33
34
target_ulong mcountinhibit;
35
36
+ /* PMU counter configured values */
37
+ target_ulong mhpmcounter_val[RV_MAX_MHPMCOUNTERS];
38
+
39
+ /* for RV32 */
40
+ target_ulong mhpmcounterh_val[RV_MAX_MHPMCOUNTERS];
41
+
42
+ /* PMU event selector configured values */
43
+ target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
44
+
45
target_ulong sscratch;
46
target_ulong mscratch;
47
48
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
49
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
50
--- a/target/riscv/csr.c
57
--- a/target/riscv/helper.h
51
+++ b/target/riscv/csr.c
58
+++ b/target/riscv/helper.h
52
@@ -XXX,XX +XXX,XX @@ static RISCVException mctr(CPURISCVState *env, int csrno)
59
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
53
return RISCV_EXCP_NONE;
60
DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
54
}
61
DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
55
62
DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
56
+static RISCVException mctr32(CPURISCVState *env, int csrno)
63
+
64
+DEF_HELPER_6(vror_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
65
+DEF_HELPER_6(vror_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
66
+DEF_HELPER_6(vror_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
67
+DEF_HELPER_6(vror_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
68
+
69
+DEF_HELPER_6(vror_vx_b, void, ptr, ptr, tl, ptr, env, i32)
70
+DEF_HELPER_6(vror_vx_h, void, ptr, ptr, tl, ptr, env, i32)
71
+DEF_HELPER_6(vror_vx_w, void, ptr, ptr, tl, ptr, env, i32)
72
+DEF_HELPER_6(vror_vx_d, void, ptr, ptr, tl, ptr, env, i32)
73
+
74
+DEF_HELPER_6(vrol_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
75
+DEF_HELPER_6(vrol_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
76
+DEF_HELPER_6(vrol_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
77
+DEF_HELPER_6(vrol_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
78
+
79
+DEF_HELPER_6(vrol_vx_b, void, ptr, ptr, tl, ptr, env, i32)
80
+DEF_HELPER_6(vrol_vx_h, void, ptr, ptr, tl, ptr, env, i32)
81
+DEF_HELPER_6(vrol_vx_w, void, ptr, ptr, tl, ptr, env, i32)
82
+DEF_HELPER_6(vrol_vx_d, void, ptr, ptr, tl, ptr, env, i32)
83
+
84
+DEF_HELPER_5(vrev8_v_b, void, ptr, ptr, ptr, env, i32)
85
+DEF_HELPER_5(vrev8_v_h, void, ptr, ptr, ptr, env, i32)
86
+DEF_HELPER_5(vrev8_v_w, void, ptr, ptr, ptr, env, i32)
87
+DEF_HELPER_5(vrev8_v_d, void, ptr, ptr, ptr, env, i32)
88
+DEF_HELPER_5(vbrev8_v_b, void, ptr, ptr, ptr, env, i32)
89
+DEF_HELPER_5(vbrev8_v_h, void, ptr, ptr, ptr, env, i32)
90
+DEF_HELPER_5(vbrev8_v_w, void, ptr, ptr, ptr, env, i32)
91
+DEF_HELPER_5(vbrev8_v_d, void, ptr, ptr, ptr, env, i32)
92
+DEF_HELPER_5(vbrev_v_b, void, ptr, ptr, ptr, env, i32)
93
+DEF_HELPER_5(vbrev_v_h, void, ptr, ptr, ptr, env, i32)
94
+DEF_HELPER_5(vbrev_v_w, void, ptr, ptr, ptr, env, i32)
95
+DEF_HELPER_5(vbrev_v_d, void, ptr, ptr, ptr, env, i32)
96
+
97
+DEF_HELPER_5(vclz_v_b, void, ptr, ptr, ptr, env, i32)
98
+DEF_HELPER_5(vclz_v_h, void, ptr, ptr, ptr, env, i32)
99
+DEF_HELPER_5(vclz_v_w, void, ptr, ptr, ptr, env, i32)
100
+DEF_HELPER_5(vclz_v_d, void, ptr, ptr, ptr, env, i32)
101
+DEF_HELPER_5(vctz_v_b, void, ptr, ptr, ptr, env, i32)
102
+DEF_HELPER_5(vctz_v_h, void, ptr, ptr, ptr, env, i32)
103
+DEF_HELPER_5(vctz_v_w, void, ptr, ptr, ptr, env, i32)
104
+DEF_HELPER_5(vctz_v_d, void, ptr, ptr, ptr, env, i32)
105
+DEF_HELPER_5(vcpop_v_b, void, ptr, ptr, ptr, env, i32)
106
+DEF_HELPER_5(vcpop_v_h, void, ptr, ptr, ptr, env, i32)
107
+DEF_HELPER_5(vcpop_v_w, void, ptr, ptr, ptr, env, i32)
108
+DEF_HELPER_5(vcpop_v_d, void, ptr, ptr, ptr, env, i32)
109
+
110
+DEF_HELPER_6(vwsll_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
111
+DEF_HELPER_6(vwsll_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
112
+DEF_HELPER_6(vwsll_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
113
+DEF_HELPER_6(vwsll_vx_b, void, ptr, ptr, tl, ptr, env, i32)
114
+DEF_HELPER_6(vwsll_vx_h, void, ptr, ptr, tl, ptr, env, i32)
115
+DEF_HELPER_6(vwsll_vx_w, void, ptr, ptr, tl, ptr, env, i32)
116
+
117
+DEF_HELPER_6(vandn_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
118
+DEF_HELPER_6(vandn_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
119
+DEF_HELPER_6(vandn_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
120
+DEF_HELPER_6(vandn_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
121
+DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
122
+DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
123
+DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
124
+DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
125
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/riscv/insn32.decode
128
+++ b/target/riscv/insn32.decode
129
@@ -XXX,XX +XXX,XX @@
130
%imm_u 12:s20 !function=ex_shift_12
131
%imm_bs 30:2 !function=ex_shift_3
132
%imm_rnum 20:4
133
+%imm_z6 26:1 15:5
134
135
# Argument sets:
136
&empty
137
@@ -XXX,XX +XXX,XX @@
138
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
139
@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
140
@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
141
+@r2_zimm6 ..... . vm:1 ..... ..... ... ..... ....... &rmrr %rs2 rs1=%imm_z6 %rd
142
@r2_zimm11 . zimm:11 ..... ... ..... ....... %rs1 %rd
143
@r2_zimm10 .. zimm:10 ..... ... ..... ....... %rs1 %rd
144
@r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
145
@@ -XXX,XX +XXX,XX @@ vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
146
vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
147
vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
148
vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
149
+
150
+# *** Zvbb vector crypto extension ***
151
+vrol_vv 010101 . ..... ..... 000 ..... 1010111 @r_vm
152
+vrol_vx 010101 . ..... ..... 100 ..... 1010111 @r_vm
153
+vror_vv 010100 . ..... ..... 000 ..... 1010111 @r_vm
154
+vror_vx 010100 . ..... ..... 100 ..... 1010111 @r_vm
155
+vror_vi 01010. . ..... ..... 011 ..... 1010111 @r2_zimm6
156
+vbrev8_v 010010 . ..... 01000 010 ..... 1010111 @r2_vm
157
+vrev8_v 010010 . ..... 01001 010 ..... 1010111 @r2_vm
158
+vandn_vv 000001 . ..... ..... 000 ..... 1010111 @r_vm
159
+vandn_vx 000001 . ..... ..... 100 ..... 1010111 @r_vm
160
+vbrev_v 010010 . ..... 01010 010 ..... 1010111 @r2_vm
161
+vclz_v 010010 . ..... 01100 010 ..... 1010111 @r2_vm
162
+vctz_v 010010 . ..... 01101 010 ..... 1010111 @r2_vm
163
+vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
164
+vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
165
+vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
166
+vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
167
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/target/riscv/cpu.c
170
+++ b/target/riscv/cpu.c
171
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
172
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
173
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
174
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
175
+ ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
176
ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
177
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
178
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
179
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
180
return;
181
}
182
183
+ /*
184
+ * In principle Zve*x would also suffice here, were they supported
185
+ * in qemu
186
+ */
187
+ if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
188
+ error_setg(errp,
189
+ "Vector crypto extensions require V or Zve* extensions");
190
+ return;
191
+ }
192
+
193
if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
194
error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
195
return;
196
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
197
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
198
199
/* Vector cryptography extensions */
200
+ DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
201
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
202
203
DEFINE_PROP_END_OF_LIST(),
204
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/riscv/vcrypto_helper.c
207
+++ b/target/riscv/vcrypto_helper.c
208
@@ -XXX,XX +XXX,XX @@
209
#include "qemu/osdep.h"
210
#include "qemu/host-utils.h"
211
#include "qemu/bitops.h"
212
+#include "qemu/bswap.h"
213
#include "cpu.h"
214
#include "exec/memop.h"
215
#include "exec/exec-all.h"
216
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
217
GEN_VEXT_VV(vclmulh_vv, 8)
218
RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
219
GEN_VEXT_VX(vclmulh_vx, 8)
220
+
221
+RVVCALL(OPIVV2, vror_vv_b, OP_UUU_B, H1, H1, H1, ror8)
222
+RVVCALL(OPIVV2, vror_vv_h, OP_UUU_H, H2, H2, H2, ror16)
223
+RVVCALL(OPIVV2, vror_vv_w, OP_UUU_W, H4, H4, H4, ror32)
224
+RVVCALL(OPIVV2, vror_vv_d, OP_UUU_D, H8, H8, H8, ror64)
225
+GEN_VEXT_VV(vror_vv_b, 1)
226
+GEN_VEXT_VV(vror_vv_h, 2)
227
+GEN_VEXT_VV(vror_vv_w, 4)
228
+GEN_VEXT_VV(vror_vv_d, 8)
229
+
230
+RVVCALL(OPIVX2, vror_vx_b, OP_UUU_B, H1, H1, ror8)
231
+RVVCALL(OPIVX2, vror_vx_h, OP_UUU_H, H2, H2, ror16)
232
+RVVCALL(OPIVX2, vror_vx_w, OP_UUU_W, H4, H4, ror32)
233
+RVVCALL(OPIVX2, vror_vx_d, OP_UUU_D, H8, H8, ror64)
234
+GEN_VEXT_VX(vror_vx_b, 1)
235
+GEN_VEXT_VX(vror_vx_h, 2)
236
+GEN_VEXT_VX(vror_vx_w, 4)
237
+GEN_VEXT_VX(vror_vx_d, 8)
238
+
239
+RVVCALL(OPIVV2, vrol_vv_b, OP_UUU_B, H1, H1, H1, rol8)
240
+RVVCALL(OPIVV2, vrol_vv_h, OP_UUU_H, H2, H2, H2, rol16)
241
+RVVCALL(OPIVV2, vrol_vv_w, OP_UUU_W, H4, H4, H4, rol32)
242
+RVVCALL(OPIVV2, vrol_vv_d, OP_UUU_D, H8, H8, H8, rol64)
243
+GEN_VEXT_VV(vrol_vv_b, 1)
244
+GEN_VEXT_VV(vrol_vv_h, 2)
245
+GEN_VEXT_VV(vrol_vv_w, 4)
246
+GEN_VEXT_VV(vrol_vv_d, 8)
247
+
248
+RVVCALL(OPIVX2, vrol_vx_b, OP_UUU_B, H1, H1, rol8)
249
+RVVCALL(OPIVX2, vrol_vx_h, OP_UUU_H, H2, H2, rol16)
250
+RVVCALL(OPIVX2, vrol_vx_w, OP_UUU_W, H4, H4, rol32)
251
+RVVCALL(OPIVX2, vrol_vx_d, OP_UUU_D, H8, H8, rol64)
252
+GEN_VEXT_VX(vrol_vx_b, 1)
253
+GEN_VEXT_VX(vrol_vx_h, 2)
254
+GEN_VEXT_VX(vrol_vx_w, 4)
255
+GEN_VEXT_VX(vrol_vx_d, 8)
256
+
257
+static uint64_t brev8(uint64_t val)
57
+{
258
+{
58
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
259
+ val = ((val & 0x5555555555555555ull) << 1) |
59
+ return RISCV_EXCP_ILLEGAL_INST;
260
+ ((val & 0xAAAAAAAAAAAAAAAAull) >> 1);
60
+ }
261
+ val = ((val & 0x3333333333333333ull) << 2) |
61
+
262
+ ((val & 0xCCCCCCCCCCCCCCCCull) >> 2);
62
+ return mctr(env, csrno);
263
+ val = ((val & 0x0F0F0F0F0F0F0F0Full) << 4) |
264
+ ((val & 0xF0F0F0F0F0F0F0F0ull) >> 4);
265
+
266
+ return val;
63
+}
267
+}
64
+
268
+
65
static RISCVException any(CPURISCVState *env, int csrno)
269
+RVVCALL(OPIVV1, vbrev8_v_b, OP_UU_B, H1, H1, brev8)
66
{
270
+RVVCALL(OPIVV1, vbrev8_v_h, OP_UU_H, H2, H2, brev8)
67
return RISCV_EXCP_NONE;
271
+RVVCALL(OPIVV1, vbrev8_v_w, OP_UU_W, H4, H4, brev8)
68
@@ -XXX,XX +XXX,XX @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
272
+RVVCALL(OPIVV1, vbrev8_v_d, OP_UU_D, H8, H8, brev8)
69
273
+GEN_VEXT_V(vbrev8_v_b, 1)
70
#else /* CONFIG_USER_ONLY */
274
+GEN_VEXT_V(vbrev8_v_h, 2)
71
275
+GEN_VEXT_V(vbrev8_v_w, 4)
72
+static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
276
+GEN_VEXT_V(vbrev8_v_d, 8)
277
+
278
+#define DO_IDENTITY(a) (a)
279
+RVVCALL(OPIVV1, vrev8_v_b, OP_UU_B, H1, H1, DO_IDENTITY)
280
+RVVCALL(OPIVV1, vrev8_v_h, OP_UU_H, H2, H2, bswap16)
281
+RVVCALL(OPIVV1, vrev8_v_w, OP_UU_W, H4, H4, bswap32)
282
+RVVCALL(OPIVV1, vrev8_v_d, OP_UU_D, H8, H8, bswap64)
283
+GEN_VEXT_V(vrev8_v_b, 1)
284
+GEN_VEXT_V(vrev8_v_h, 2)
285
+GEN_VEXT_V(vrev8_v_w, 4)
286
+GEN_VEXT_V(vrev8_v_d, 8)
287
+
288
+#define DO_ANDN(a, b) ((a) & ~(b))
289
+RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
290
+RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
291
+RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
292
+RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
293
+GEN_VEXT_VV(vandn_vv_b, 1)
294
+GEN_VEXT_VV(vandn_vv_h, 2)
295
+GEN_VEXT_VV(vandn_vv_w, 4)
296
+GEN_VEXT_VV(vandn_vv_d, 8)
297
+
298
+RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
299
+RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
300
+RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
301
+RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
302
+GEN_VEXT_VX(vandn_vx_b, 1)
303
+GEN_VEXT_VX(vandn_vx_h, 2)
304
+GEN_VEXT_VX(vandn_vx_w, 4)
305
+GEN_VEXT_VX(vandn_vx_d, 8)
306
+
307
+RVVCALL(OPIVV1, vbrev_v_b, OP_UU_B, H1, H1, revbit8)
308
+RVVCALL(OPIVV1, vbrev_v_h, OP_UU_H, H2, H2, revbit16)
309
+RVVCALL(OPIVV1, vbrev_v_w, OP_UU_W, H4, H4, revbit32)
310
+RVVCALL(OPIVV1, vbrev_v_d, OP_UU_D, H8, H8, revbit64)
311
+GEN_VEXT_V(vbrev_v_b, 1)
312
+GEN_VEXT_V(vbrev_v_h, 2)
313
+GEN_VEXT_V(vbrev_v_w, 4)
314
+GEN_VEXT_V(vbrev_v_d, 8)
315
+
316
+RVVCALL(OPIVV1, vclz_v_b, OP_UU_B, H1, H1, clz8)
317
+RVVCALL(OPIVV1, vclz_v_h, OP_UU_H, H2, H2, clz16)
318
+RVVCALL(OPIVV1, vclz_v_w, OP_UU_W, H4, H4, clz32)
319
+RVVCALL(OPIVV1, vclz_v_d, OP_UU_D, H8, H8, clz64)
320
+GEN_VEXT_V(vclz_v_b, 1)
321
+GEN_VEXT_V(vclz_v_h, 2)
322
+GEN_VEXT_V(vclz_v_w, 4)
323
+GEN_VEXT_V(vclz_v_d, 8)
324
+
325
+RVVCALL(OPIVV1, vctz_v_b, OP_UU_B, H1, H1, ctz8)
326
+RVVCALL(OPIVV1, vctz_v_h, OP_UU_H, H2, H2, ctz16)
327
+RVVCALL(OPIVV1, vctz_v_w, OP_UU_W, H4, H4, ctz32)
328
+RVVCALL(OPIVV1, vctz_v_d, OP_UU_D, H8, H8, ctz64)
329
+GEN_VEXT_V(vctz_v_b, 1)
330
+GEN_VEXT_V(vctz_v_h, 2)
331
+GEN_VEXT_V(vctz_v_w, 4)
332
+GEN_VEXT_V(vctz_v_d, 8)
333
+
334
+RVVCALL(OPIVV1, vcpop_v_b, OP_UU_B, H1, H1, ctpop8)
335
+RVVCALL(OPIVV1, vcpop_v_h, OP_UU_H, H2, H2, ctpop16)
336
+RVVCALL(OPIVV1, vcpop_v_w, OP_UU_W, H4, H4, ctpop32)
337
+RVVCALL(OPIVV1, vcpop_v_d, OP_UU_D, H8, H8, ctpop64)
338
+GEN_VEXT_V(vcpop_v_b, 1)
339
+GEN_VEXT_V(vcpop_v_h, 2)
340
+GEN_VEXT_V(vcpop_v_w, 4)
341
+GEN_VEXT_V(vcpop_v_d, 8)
342
+
343
+#define DO_SLL(N, M) (N << (M & (sizeof(N) * 8 - 1)))
344
+RVVCALL(OPIVV2, vwsll_vv_b, WOP_UUU_B, H2, H1, H1, DO_SLL)
345
+RVVCALL(OPIVV2, vwsll_vv_h, WOP_UUU_H, H4, H2, H2, DO_SLL)
346
+RVVCALL(OPIVV2, vwsll_vv_w, WOP_UUU_W, H8, H4, H4, DO_SLL)
347
+GEN_VEXT_VV(vwsll_vv_b, 2)
348
+GEN_VEXT_VV(vwsll_vv_h, 4)
349
+GEN_VEXT_VV(vwsll_vv_w, 8)
350
+
351
+RVVCALL(OPIVX2, vwsll_vx_b, WOP_UUU_B, H2, H1, DO_SLL)
352
+RVVCALL(OPIVX2, vwsll_vx_h, WOP_UUU_H, H4, H2, DO_SLL)
353
+RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
354
+GEN_VEXT_VX(vwsll_vx_b, 2)
355
+GEN_VEXT_VX(vwsll_vx_h, 4)
356
+GEN_VEXT_VX(vwsll_vx_w, 8)
357
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
360
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
361
@@ -XXX,XX +XXX,XX @@ static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
362
363
GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
364
GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
365
+
366
+/*
367
+ * Zvbb
368
+ */
369
+
370
+#define GEN_OPIVI_GVEC_TRANS_CHECK(NAME, IMM_MODE, OPIVX, SUF, CHECK) \
371
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
372
+ { \
373
+ if (CHECK(s, a)) { \
374
+ static gen_helper_opivx *const fns[4] = { \
375
+ gen_helper_##OPIVX##_b, \
376
+ gen_helper_##OPIVX##_h, \
377
+ gen_helper_##OPIVX##_w, \
378
+ gen_helper_##OPIVX##_d, \
379
+ }; \
380
+ return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew], \
381
+ IMM_MODE); \
382
+ } \
383
+ return false; \
384
+ }
385
+
386
+#define GEN_OPIVV_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
387
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
388
+ { \
389
+ if (CHECK(s, a)) { \
390
+ static gen_helper_gvec_4_ptr *const fns[4] = { \
391
+ gen_helper_##NAME##_b, \
392
+ gen_helper_##NAME##_h, \
393
+ gen_helper_##NAME##_w, \
394
+ gen_helper_##NAME##_d, \
395
+ }; \
396
+ return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
397
+ } \
398
+ return false; \
399
+ }
400
+
401
+#define GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(NAME, SUF, CHECK) \
402
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
403
+ { \
404
+ if (CHECK(s, a)) { \
405
+ static gen_helper_opivx *const fns[4] = { \
406
+ gen_helper_##NAME##_b, \
407
+ gen_helper_##NAME##_h, \
408
+ gen_helper_##NAME##_w, \
409
+ gen_helper_##NAME##_d, \
410
+ }; \
411
+ return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, \
412
+ fns[s->sew]); \
413
+ } \
414
+ return false; \
415
+ }
416
+
417
+static bool zvbb_vv_check(DisasContext *s, arg_rmrr *a)
73
+{
418
+{
74
+ int evt_index = csrno - CSR_MHPMEVENT3;
419
+ return opivv_check(s, a) && s->cfg_ptr->ext_zvbb == true;
75
+
76
+ *val = env->mhpmevent_val[evt_index];
77
+
78
+ return RISCV_EXCP_NONE;
79
+}
420
+}
80
+
421
+
81
+static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
422
+static bool zvbb_vx_check(DisasContext *s, arg_rmrr *a)
82
+{
423
+{
83
+ int evt_index = csrno - CSR_MHPMEVENT3;
424
+ return opivx_check(s, a) && s->cfg_ptr->ext_zvbb == true;
84
+
85
+ env->mhpmevent_val[evt_index] = val;
86
+
87
+ return RISCV_EXCP_NONE;
88
+}
425
+}
89
+
426
+
90
+static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
427
+/* vrol.v[vx] */
428
+GEN_OPIVV_GVEC_TRANS_CHECK(vrol_vv, rotlv, zvbb_vv_check)
429
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vrol_vx, rotls, zvbb_vx_check)
430
+
431
+/* vror.v[vxi] */
432
+GEN_OPIVV_GVEC_TRANS_CHECK(vror_vv, rotrv, zvbb_vv_check)
433
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vror_vx, rotrs, zvbb_vx_check)
434
+GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri, zvbb_vx_check)
435
+
436
+#define GEN_OPIVX_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
437
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
438
+ { \
439
+ if (CHECK(s, a)) { \
440
+ static gen_helper_opivx *const fns[4] = { \
441
+ gen_helper_##NAME##_b, \
442
+ gen_helper_##NAME##_h, \
443
+ gen_helper_##NAME##_w, \
444
+ gen_helper_##NAME##_d, \
445
+ }; \
446
+ return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
447
+ } \
448
+ return false; \
449
+ }
450
+
451
+/* vandn.v[vx] */
452
+GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andc, zvbb_vv_check)
453
+GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvbb_vx_check)
454
+
455
+#define GEN_OPIV_TRANS(NAME, CHECK) \
456
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
457
+ { \
458
+ if (CHECK(s, a)) { \
459
+ uint32_t data = 0; \
460
+ static gen_helper_gvec_3_ptr *const fns[4] = { \
461
+ gen_helper_##NAME##_b, \
462
+ gen_helper_##NAME##_h, \
463
+ gen_helper_##NAME##_w, \
464
+ gen_helper_##NAME##_d, \
465
+ }; \
466
+ TCGLabel *over = gen_new_label(); \
467
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
468
+ \
469
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
470
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
471
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
472
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
473
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
474
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
475
+ vreg_ofs(s, a->rs2), cpu_env, \
476
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
477
+ data, fns[s->sew]); \
478
+ mark_vs_dirty(s); \
479
+ gen_set_label(over); \
480
+ return true; \
481
+ } \
482
+ return false; \
483
+ }
484
+
485
+static bool zvbb_opiv_check(DisasContext *s, arg_rmr *a)
91
+{
486
+{
92
+ int ctr_index = csrno - CSR_MHPMCOUNTER3 + 3;
487
+ return s->cfg_ptr->ext_zvbb == true &&
93
+
488
+ require_rvv(s) &&
94
+ env->mhpmcounter_val[ctr_index] = val;
489
+ vext_check_isa_ill(s) &&
95
+
490
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
96
+ return RISCV_EXCP_NONE;
97
+}
491
+}
98
+
492
+
99
+static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
493
+GEN_OPIV_TRANS(vbrev8_v, zvbb_opiv_check)
494
+GEN_OPIV_TRANS(vrev8_v, zvbb_opiv_check)
495
+GEN_OPIV_TRANS(vbrev_v, zvbb_opiv_check)
496
+GEN_OPIV_TRANS(vclz_v, zvbb_opiv_check)
497
+GEN_OPIV_TRANS(vctz_v, zvbb_opiv_check)
498
+GEN_OPIV_TRANS(vcpop_v, zvbb_opiv_check)
499
+
500
+static bool vwsll_vv_check(DisasContext *s, arg_rmrr *a)
100
+{
501
+{
101
+ int ctr_index = csrno - CSR_MHPMCOUNTER3H + 3;
502
+ return s->cfg_ptr->ext_zvbb && opivv_widen_check(s, a);
102
+
103
+ env->mhpmcounterh_val[ctr_index] = val;
104
+
105
+ return RISCV_EXCP_NONE;
106
+}
503
+}
107
+
504
+
108
+static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
505
+static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
109
+{
506
+{
110
+ int ctr_index;
507
+ return s->cfg_ptr->ext_zvbb && opivx_widen_check(s, a);
111
+
112
+ if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
113
+ ctr_index = csrno - CSR_MHPMCOUNTER3 + 3;
114
+ } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
115
+ ctr_index = csrno - CSR_HPMCOUNTER3 + 3;
116
+ } else {
117
+ return RISCV_EXCP_ILLEGAL_INST;
118
+ }
119
+ *val = env->mhpmcounter_val[ctr_index];
120
+
121
+ return RISCV_EXCP_NONE;
122
+}
508
+}
123
+
509
+
124
+static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
510
+/* OPIVI without GVEC IR */
125
+{
511
+#define GEN_OPIVI_WIDEN_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
126
+ int ctr_index;
512
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
127
+
513
+ { \
128
+ if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
514
+ if (CHECK(s, a)) { \
129
+ ctr_index = csrno - CSR_MHPMCOUNTER3H + 3;
515
+ static gen_helper_opivx *const fns[3] = { \
130
+ } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
516
+ gen_helper_##OPIVX##_b, \
131
+ ctr_index = csrno - CSR_HPMCOUNTER3H + 3;
517
+ gen_helper_##OPIVX##_h, \
132
+ } else {
518
+ gen_helper_##OPIVX##_w, \
133
+ return RISCV_EXCP_ILLEGAL_INST;
519
+ }; \
134
+ }
520
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, \
135
+ *val = env->mhpmcounterh_val[ctr_index];
521
+ IMM_MODE); \
136
+
522
+ } \
137
+ return RISCV_EXCP_NONE;
523
+ return false; \
138
+}
524
+ }
139
+
525
+
140
+
526
+GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
141
static RISCVException read_time(CPURISCVState *env, int csrno,
527
+GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
142
target_ulong *val)
528
+GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
143
{
144
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
145
[CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase },
146
147
/* Performance Counters */
148
- [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero },
149
- [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero },
150
- [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero },
151
- [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero },
152
- [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero },
153
- [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero },
154
- [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero },
155
- [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero },
156
- [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero },
157
- [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero },
158
- [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero },
159
- [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero },
160
- [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero },
161
- [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero },
162
- [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero },
163
- [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero },
164
- [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero },
165
- [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero },
166
- [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero },
167
- [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero },
168
- [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero },
169
- [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero },
170
- [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero },
171
- [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero },
172
- [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero },
173
- [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero },
174
- [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero },
175
- [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
176
- [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
177
-
178
- [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_zero },
179
- [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_zero },
180
- [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_zero },
181
- [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_zero },
182
- [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_zero },
183
- [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_zero },
184
- [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_zero },
185
- [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_zero },
186
- [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_zero },
187
- [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_zero },
188
- [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_zero },
189
- [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_zero },
190
- [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_zero },
191
- [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_zero },
192
- [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_zero },
193
- [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_zero },
194
- [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_zero },
195
- [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_zero },
196
- [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_zero },
197
- [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_zero },
198
- [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_zero },
199
- [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_zero },
200
- [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_zero },
201
- [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_zero },
202
- [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_zero },
203
- [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_zero },
204
- [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_zero },
205
- [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_zero },
206
- [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_zero },
207
-
208
- [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
209
- write_mcountinhibit },
210
-
211
- [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
212
- [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
213
- [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
214
- [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero },
215
- [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero },
216
- [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero },
217
- [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero },
218
- [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero },
219
- [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero },
220
- [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero },
221
- [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero },
222
- [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero },
223
- [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero },
224
- [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero },
225
- [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero },
226
- [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero },
227
- [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero },
228
- [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero },
229
- [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero },
230
- [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero },
231
- [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero },
232
- [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero },
233
- [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero },
234
- [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero },
235
- [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero },
236
- [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero },
237
- [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero },
238
- [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero },
239
- [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero },
240
-
241
- [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero },
242
- [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero },
243
- [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero },
244
- [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero },
245
- [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero },
246
- [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero },
247
- [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero },
248
- [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero },
249
- [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero },
250
- [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero },
251
- [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero },
252
- [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero },
253
- [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero },
254
- [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero },
255
- [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero },
256
- [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero },
257
- [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero },
258
- [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero },
259
- [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero },
260
- [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero },
261
- [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero },
262
- [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero },
263
- [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero },
264
- [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero },
265
- [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero },
266
- [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero },
267
- [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero },
268
- [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero },
269
- [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero },
270
-
271
- [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero },
272
- [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero },
273
- [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero },
274
- [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero },
275
- [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero },
276
- [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero },
277
- [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero },
278
- [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero },
279
- [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero },
280
- [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero },
281
- [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero },
282
- [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero },
283
- [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero },
284
- [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero },
285
- [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero },
286
- [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero },
287
- [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero },
288
- [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero },
289
- [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero },
290
- [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero },
291
- [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero },
292
- [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero },
293
- [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero },
294
- [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero },
295
- [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero },
296
- [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero },
297
- [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero },
298
- [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero },
299
- [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero },
300
+ [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
301
+ [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
302
+ [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
303
+ [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
304
+ [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
305
+ [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
306
+ [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
307
+ [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
308
+ [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
309
+ [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
310
+ [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
311
+ [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
312
+ [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
313
+ [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
314
+ [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
315
+ [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
316
+ [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
317
+ [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
318
+ [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
319
+ [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
320
+ [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
321
+ [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
322
+ [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
323
+ [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
324
+ [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
325
+ [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
326
+ [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
327
+ [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
328
+ [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
329
+
330
+ [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
331
+ write_mhpmcounter },
332
+ [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
333
+ write_mhpmcounter },
334
+ [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
335
+ write_mhpmcounter },
336
+ [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
337
+ write_mhpmcounter },
338
+ [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
339
+ write_mhpmcounter },
340
+ [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
341
+ write_mhpmcounter },
342
+ [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
343
+ write_mhpmcounter },
344
+ [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
345
+ write_mhpmcounter },
346
+ [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
347
+ write_mhpmcounter },
348
+ [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
349
+ write_mhpmcounter },
350
+ [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
351
+ write_mhpmcounter },
352
+ [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
353
+ write_mhpmcounter },
354
+ [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
355
+ write_mhpmcounter },
356
+ [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
357
+ write_mhpmcounter },
358
+ [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
359
+ write_mhpmcounter },
360
+ [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
361
+ write_mhpmcounter },
362
+ [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
363
+ write_mhpmcounter },
364
+ [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
365
+ write_mhpmcounter },
366
+ [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
367
+ write_mhpmcounter },
368
+ [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
369
+ write_mhpmcounter },
370
+ [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
371
+ write_mhpmcounter },
372
+ [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
373
+ write_mhpmcounter },
374
+ [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
375
+ write_mhpmcounter },
376
+ [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
377
+ write_mhpmcounter },
378
+ [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
379
+ write_mhpmcounter },
380
+ [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
381
+ write_mhpmcounter },
382
+ [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
383
+ write_mhpmcounter },
384
+ [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
385
+ write_mhpmcounter },
386
+ [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
387
+ write_mhpmcounter },
388
+
389
+ [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
390
+ write_mcountinhibit },
391
+
392
+ [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
393
+ write_mhpmevent },
394
+ [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
395
+ write_mhpmevent },
396
+ [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
397
+ write_mhpmevent },
398
+ [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
399
+ write_mhpmevent },
400
+ [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
401
+ write_mhpmevent },
402
+ [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
403
+ write_mhpmevent },
404
+ [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
405
+ write_mhpmevent },
406
+ [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
407
+ write_mhpmevent },
408
+ [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
409
+ write_mhpmevent },
410
+ [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
411
+ write_mhpmevent },
412
+ [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
413
+ write_mhpmevent },
414
+ [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
415
+ write_mhpmevent },
416
+ [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
417
+ write_mhpmevent },
418
+ [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
419
+ write_mhpmevent },
420
+ [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
421
+ write_mhpmevent },
422
+ [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
423
+ write_mhpmevent },
424
+ [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
425
+ write_mhpmevent },
426
+ [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
427
+ write_mhpmevent },
428
+ [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
429
+ write_mhpmevent },
430
+ [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
431
+ write_mhpmevent },
432
+ [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
433
+ write_mhpmevent },
434
+ [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
435
+ write_mhpmevent },
436
+ [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
437
+ write_mhpmevent },
438
+ [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
439
+ write_mhpmevent },
440
+ [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
441
+ write_mhpmevent },
442
+ [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
443
+ write_mhpmevent },
444
+ [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
445
+ write_mhpmevent },
446
+ [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
447
+ write_mhpmevent },
448
+ [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
449
+ write_mhpmevent },
450
+
451
+ [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
452
+ [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
453
+ [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
454
+ [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
455
+ [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
456
+ [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
457
+ [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
458
+ [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
459
+ [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
460
+ [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
461
+ [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
462
+ [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
463
+ [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
464
+ [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
465
+ [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
466
+ [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
467
+ [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
468
+ [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
469
+ [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
470
+ [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
471
+ [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
472
+ [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
473
+ [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
474
+ [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
475
+ [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
476
+ [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
477
+ [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
478
+ [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
479
+ [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
480
+
481
+ [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
482
+ write_mhpmcounterh },
483
+ [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
484
+ write_mhpmcounterh },
485
+ [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
486
+ write_mhpmcounterh },
487
+ [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
488
+ write_mhpmcounterh },
489
+ [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
490
+ write_mhpmcounterh },
491
+ [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
492
+ write_mhpmcounterh },
493
+ [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
494
+ write_mhpmcounterh },
495
+ [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
496
+ write_mhpmcounterh },
497
+ [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
498
+ write_mhpmcounterh },
499
+ [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
500
+ write_mhpmcounterh },
501
+ [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
502
+ write_mhpmcounterh },
503
+ [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
504
+ write_mhpmcounterh },
505
+ [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
506
+ write_mhpmcounterh },
507
+ [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
508
+ write_mhpmcounterh },
509
+ [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
510
+ write_mhpmcounterh },
511
+ [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
512
+ write_mhpmcounterh },
513
+ [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
514
+ write_mhpmcounterh },
515
+ [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
516
+ write_mhpmcounterh },
517
+ [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
518
+ write_mhpmcounterh },
519
+ [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
520
+ write_mhpmcounterh },
521
+ [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
522
+ write_mhpmcounterh },
523
+ [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
524
+ write_mhpmcounterh },
525
+ [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
526
+ write_mhpmcounterh },
527
+ [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
528
+ write_mhpmcounterh },
529
+ [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
530
+ write_mhpmcounterh },
531
+ [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
532
+ write_mhpmcounterh },
533
+ [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
534
+ write_mhpmcounterh },
535
+ [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
536
+ write_mhpmcounterh },
537
+ [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
538
+ write_mhpmcounterh },
539
#endif /* !CONFIG_USER_ONLY */
540
};
541
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
542
index XXXXXXX..XXXXXXX 100644
543
--- a/target/riscv/machine.c
544
+++ b/target/riscv/machine.c
545
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
546
VMSTATE_UINTTL(env.scounteren, RISCVCPU),
547
VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
548
VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
549
+ VMSTATE_UINTTL_ARRAY(env.mhpmcounter_val, RISCVCPU, RV_MAX_MHPMCOUNTERS),
550
+ VMSTATE_UINTTL_ARRAY(env.mhpmcounterh_val, RISCVCPU, RV_MAX_MHPMCOUNTERS),
551
+ VMSTATE_UINTTL_ARRAY(env.mhpmevent_val, RISCVCPU, RV_MAX_MHPMEVENTS),
552
VMSTATE_UINTTL(env.sscratch, RISCVCPU),
553
VMSTATE_UINTTL(env.mscratch, RISCVCPU),
554
VMSTATE_UINT64(env.mfromhost, RISCVCPU),
555
--
529
--
556
2.36.1
530
2.41.0
diff view generated by jsdifflib
New patch
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
1
2
3
This commit adds support for the Zvkned vector-crypto extension, which
4
consists of the following instructions:
5
6
* vaesef.[vv,vs]
7
* vaesdf.[vv,vs]
8
* vaesdm.[vv,vs]
9
* vaesz.vs
10
* vaesem.[vv,vs]
11
* vaeskf1.vi
12
* vaeskf2.vi
13
14
Translation functions are defined in
15
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
16
`target/riscv/vcrypto_helper.c`.
17
18
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
19
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
20
[max.chou@sifive.com: Replaced vstart checking by TCG op]
21
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
22
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
23
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
24
Signed-off-by: Max Chou <max.chou@sifive.com>
25
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
26
[max.chou@sifive.com: Imported aes-round.h and exposed x-zvkned
27
property]
28
[max.chou@sifive.com: Fixed endian issues and replaced the vstart & vl
29
egs checking by helper function]
30
[max.chou@sifive.com: Replaced bswap32 calls in aes key expanding]
31
Message-ID: <20230711165917.2629866-10-max.chou@sifive.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
---
34
target/riscv/cpu_cfg.h | 1 +
35
target/riscv/helper.h | 14 ++
36
target/riscv/insn32.decode | 14 ++
37
target/riscv/cpu.c | 4 +-
38
target/riscv/vcrypto_helper.c | 202 +++++++++++++++++++++++
39
target/riscv/insn_trans/trans_rvvk.c.inc | 147 +++++++++++++++++
40
6 files changed, 381 insertions(+), 1 deletion(-)
41
42
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/cpu_cfg.h
45
+++ b/target/riscv/cpu_cfg.h
46
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
47
bool ext_zve64d;
48
bool ext_zvbb;
49
bool ext_zvbc;
50
+ bool ext_zvkned;
51
bool ext_zmmul;
52
bool ext_zvfbfmin;
53
bool ext_zvfbfwma;
54
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/riscv/helper.h
57
+++ b/target/riscv/helper.h
58
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
59
DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
60
DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
61
DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
62
+
63
+DEF_HELPER_2(egs_check, void, i32, env)
64
+
65
+DEF_HELPER_4(vaesef_vv, void, ptr, ptr, env, i32)
66
+DEF_HELPER_4(vaesef_vs, void, ptr, ptr, env, i32)
67
+DEF_HELPER_4(vaesdf_vv, void, ptr, ptr, env, i32)
68
+DEF_HELPER_4(vaesdf_vs, void, ptr, ptr, env, i32)
69
+DEF_HELPER_4(vaesem_vv, void, ptr, ptr, env, i32)
70
+DEF_HELPER_4(vaesem_vs, void, ptr, ptr, env, i32)
71
+DEF_HELPER_4(vaesdm_vv, void, ptr, ptr, env, i32)
72
+DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
73
+DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
74
+DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
75
+DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
76
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
77
index XXXXXXX..XXXXXXX 100644
78
--- a/target/riscv/insn32.decode
79
+++ b/target/riscv/insn32.decode
80
@@ -XXX,XX +XXX,XX @@
81
@r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
82
@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
83
@r2 ....... ..... ..... ... ..... ....... &r2 %rs1 %rd
84
+@r2_vm_1 ...... . ..... ..... ... ..... ....... &rmr vm=1 %rs2 %rd
85
@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
86
@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
87
@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
88
@@ -XXX,XX +XXX,XX @@ vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
89
vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
90
vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
91
vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
92
+
93
+# *** Zvkned vector crypto extension ***
94
+vaesef_vv 101000 1 ..... 00011 010 ..... 1110111 @r2_vm_1
95
+vaesef_vs 101001 1 ..... 00011 010 ..... 1110111 @r2_vm_1
96
+vaesdf_vv 101000 1 ..... 00001 010 ..... 1110111 @r2_vm_1
97
+vaesdf_vs 101001 1 ..... 00001 010 ..... 1110111 @r2_vm_1
98
+vaesem_vv 101000 1 ..... 00010 010 ..... 1110111 @r2_vm_1
99
+vaesem_vs 101001 1 ..... 00010 010 ..... 1110111 @r2_vm_1
100
+vaesdm_vv 101000 1 ..... 00000 010 ..... 1110111 @r2_vm_1
101
+vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
102
+vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
103
+vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
104
+vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
105
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/riscv/cpu.c
108
+++ b/target/riscv/cpu.c
109
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
110
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
111
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
112
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
113
+ ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
114
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
115
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
116
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
117
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
118
* In principle Zve*x would also suffice here, were they supported
119
* in qemu
120
*/
121
- if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
122
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
123
error_setg(errp,
124
"Vector crypto extensions require V or Zve* extensions");
125
return;
126
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
127
/* Vector cryptography extensions */
128
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
129
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
130
+ DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
131
132
DEFINE_PROP_END_OF_LIST(),
133
};
134
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/riscv/vcrypto_helper.c
137
+++ b/target/riscv/vcrypto_helper.c
138
@@ -XXX,XX +XXX,XX @@
139
#include "qemu/bitops.h"
140
#include "qemu/bswap.h"
141
#include "cpu.h"
142
+#include "crypto/aes.h"
143
+#include "crypto/aes-round.h"
144
#include "exec/memop.h"
145
#include "exec/exec-all.h"
146
#include "exec/helper-proto.h"
147
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
148
GEN_VEXT_VX(vwsll_vx_b, 2)
149
GEN_VEXT_VX(vwsll_vx_h, 4)
150
GEN_VEXT_VX(vwsll_vx_w, 8)
151
+
152
+void HELPER(egs_check)(uint32_t egs, CPURISCVState *env)
153
+{
154
+ uint32_t vl = env->vl;
155
+ uint32_t vstart = env->vstart;
156
+
157
+ if (vl % egs != 0 || vstart % egs != 0) {
158
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
159
+ }
160
+}
161
+
162
+static inline void xor_round_key(AESState *round_state, AESState *round_key)
163
+{
164
+ round_state->v = round_state->v ^ round_key->v;
165
+}
166
+
167
+#define GEN_ZVKNED_HELPER_VV(NAME, ...) \
168
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
169
+ uint32_t desc) \
170
+ { \
171
+ uint32_t vl = env->vl; \
172
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
173
+ uint32_t vta = vext_vta(desc); \
174
+ \
175
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
176
+ AESState round_key; \
177
+ round_key.d[0] = *((uint64_t *)vs2 + H8(i * 2 + 0)); \
178
+ round_key.d[1] = *((uint64_t *)vs2 + H8(i * 2 + 1)); \
179
+ AESState round_state; \
180
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
181
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
182
+ __VA_ARGS__; \
183
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
184
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
185
+ } \
186
+ env->vstart = 0; \
187
+ /* set tail elements to 1s */ \
188
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
189
+ }
190
+
191
+#define GEN_ZVKNED_HELPER_VS(NAME, ...) \
192
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
193
+ uint32_t desc) \
194
+ { \
195
+ uint32_t vl = env->vl; \
196
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
197
+ uint32_t vta = vext_vta(desc); \
198
+ \
199
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
200
+ AESState round_key; \
201
+ round_key.d[0] = *((uint64_t *)vs2 + H8(0)); \
202
+ round_key.d[1] = *((uint64_t *)vs2 + H8(1)); \
203
+ AESState round_state; \
204
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
205
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
206
+ __VA_ARGS__; \
207
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
208
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
209
+ } \
210
+ env->vstart = 0; \
211
+ /* set tail elements to 1s */ \
212
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
213
+ }
214
+
215
+GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
216
+ &round_state,
217
+ &round_key,
218
+ false);)
219
+GEN_ZVKNED_HELPER_VS(vaesef_vs, aesenc_SB_SR_AK(&round_state,
220
+ &round_state,
221
+ &round_key,
222
+ false);)
223
+GEN_ZVKNED_HELPER_VV(vaesdf_vv, aesdec_ISB_ISR_AK(&round_state,
224
+ &round_state,
225
+ &round_key,
226
+ false);)
227
+GEN_ZVKNED_HELPER_VS(vaesdf_vs, aesdec_ISB_ISR_AK(&round_state,
228
+ &round_state,
229
+ &round_key,
230
+ false);)
231
+GEN_ZVKNED_HELPER_VV(vaesem_vv, aesenc_SB_SR_MC_AK(&round_state,
232
+ &round_state,
233
+ &round_key,
234
+ false);)
235
+GEN_ZVKNED_HELPER_VS(vaesem_vs, aesenc_SB_SR_MC_AK(&round_state,
236
+ &round_state,
237
+ &round_key,
238
+ false);)
239
+GEN_ZVKNED_HELPER_VV(vaesdm_vv, aesdec_ISB_ISR_AK_IMC(&round_state,
240
+ &round_state,
241
+ &round_key,
242
+ false);)
243
+GEN_ZVKNED_HELPER_VS(vaesdm_vs, aesdec_ISB_ISR_AK_IMC(&round_state,
244
+ &round_state,
245
+ &round_key,
246
+ false);)
247
+GEN_ZVKNED_HELPER_VS(vaesz_vs, xor_round_key(&round_state, &round_key);)
248
+
249
+void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
250
+ CPURISCVState *env, uint32_t desc)
251
+{
252
+ uint32_t *vd = vd_vptr;
253
+ uint32_t *vs2 = vs2_vptr;
254
+ uint32_t vl = env->vl;
255
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
256
+ uint32_t vta = vext_vta(desc);
257
+
258
+ uimm &= 0b1111;
259
+ if (uimm > 10 || uimm == 0) {
260
+ uimm ^= 0b1000;
261
+ }
262
+
263
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
264
+ uint32_t rk[8], tmp;
265
+ static const uint32_t rcon[] = {
266
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
267
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
268
+ };
269
+
270
+ rk[0] = vs2[i * 4 + H4(0)];
271
+ rk[1] = vs2[i * 4 + H4(1)];
272
+ rk[2] = vs2[i * 4 + H4(2)];
273
+ rk[3] = vs2[i * 4 + H4(3)];
274
+ tmp = ror32(rk[3], 8);
275
+
276
+ rk[4] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
277
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
278
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
279
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
280
+ ^ rcon[uimm - 1];
281
+ rk[5] = rk[1] ^ rk[4];
282
+ rk[6] = rk[2] ^ rk[5];
283
+ rk[7] = rk[3] ^ rk[6];
284
+
285
+ vd[i * 4 + H4(0)] = rk[4];
286
+ vd[i * 4 + H4(1)] = rk[5];
287
+ vd[i * 4 + H4(2)] = rk[6];
288
+ vd[i * 4 + H4(3)] = rk[7];
289
+ }
290
+ env->vstart = 0;
291
+ /* set tail elements to 1s */
292
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
293
+}
294
+
295
+void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
296
+ CPURISCVState *env, uint32_t desc)
297
+{
298
+ uint32_t *vd = vd_vptr;
299
+ uint32_t *vs2 = vs2_vptr;
300
+ uint32_t vl = env->vl;
301
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
302
+ uint32_t vta = vext_vta(desc);
303
+
304
+ uimm &= 0b1111;
305
+ if (uimm > 14 || uimm < 2) {
306
+ uimm ^= 0b1000;
307
+ }
308
+
309
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
310
+ uint32_t rk[12], tmp;
311
+ static const uint32_t rcon[] = {
312
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
313
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
314
+ };
315
+
316
+ rk[0] = vd[i * 4 + H4(0)];
317
+ rk[1] = vd[i * 4 + H4(1)];
318
+ rk[2] = vd[i * 4 + H4(2)];
319
+ rk[3] = vd[i * 4 + H4(3)];
320
+ rk[4] = vs2[i * 4 + H4(0)];
321
+ rk[5] = vs2[i * 4 + H4(1)];
322
+ rk[6] = vs2[i * 4 + H4(2)];
323
+ rk[7] = vs2[i * 4 + H4(3)];
324
+
325
+ if (uimm % 2 == 0) {
326
+ tmp = ror32(rk[7], 8);
327
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
328
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
329
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
330
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
331
+ ^ rcon[(uimm - 1) / 2];
332
+ } else {
333
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(rk[7] >> 24) & 0xff] << 24) |
334
+ ((uint32_t)AES_sbox[(rk[7] >> 16) & 0xff] << 16) |
335
+ ((uint32_t)AES_sbox[(rk[7] >> 8) & 0xff] << 8) |
336
+ ((uint32_t)AES_sbox[(rk[7] >> 0) & 0xff] << 0));
337
+ }
338
+ rk[9] = rk[1] ^ rk[8];
339
+ rk[10] = rk[2] ^ rk[9];
340
+ rk[11] = rk[3] ^ rk[10];
341
+
342
+ vd[i * 4 + H4(0)] = rk[8];
343
+ vd[i * 4 + H4(1)] = rk[9];
344
+ vd[i * 4 + H4(2)] = rk[10];
345
+ vd[i * 4 + H4(3)] = rk[11];
346
+ }
347
+ env->vstart = 0;
348
+ /* set tail elements to 1s */
349
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
350
+}
351
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
352
index XXXXXXX..XXXXXXX 100644
353
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
354
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
355
@@ -XXX,XX +XXX,XX @@ static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
356
GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
357
GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
358
GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
359
+
360
+/*
361
+ * Zvkned
362
+ */
363
+
364
+#define ZVKNED_EGS 4
365
+
366
+#define GEN_V_UNMASKED_TRANS(NAME, CHECK, EGS) \
367
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
368
+ { \
369
+ if (CHECK(s, a)) { \
370
+ TCGv_ptr rd_v, rs2_v; \
371
+ TCGv_i32 desc, egs; \
372
+ uint32_t data = 0; \
373
+ TCGLabel *over = gen_new_label(); \
374
+ \
375
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
376
+ /* save opcode for unwinding in case we throw an exception */ \
377
+ decode_save_opc(s); \
378
+ egs = tcg_constant_i32(EGS); \
379
+ gen_helper_egs_check(egs, cpu_env); \
380
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
381
+ } \
382
+ \
383
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
384
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
385
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
386
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
387
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
388
+ rd_v = tcg_temp_new_ptr(); \
389
+ rs2_v = tcg_temp_new_ptr(); \
390
+ desc = tcg_constant_i32( \
391
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
392
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
393
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
394
+ gen_helper_##NAME(rd_v, rs2_v, cpu_env, desc); \
395
+ mark_vs_dirty(s); \
396
+ gen_set_label(over); \
397
+ return true; \
398
+ } \
399
+ return false; \
400
+ }
401
+
402
+static bool vaes_check_vv(DisasContext *s, arg_rmr *a)
403
+{
404
+ int egw_bytes = ZVKNED_EGS << s->sew;
405
+ return s->cfg_ptr->ext_zvkned == true &&
406
+ require_rvv(s) &&
407
+ vext_check_isa_ill(s) &&
408
+ MAXSZ(s) >= egw_bytes &&
409
+ require_align(a->rd, s->lmul) &&
410
+ require_align(a->rs2, s->lmul) &&
411
+ s->sew == MO_32;
412
+}
413
+
414
+static bool vaes_check_overlap(DisasContext *s, int vd, int vs2)
415
+{
416
+ int8_t op_size = s->lmul <= 0 ? 1 : 1 << s->lmul;
417
+ return !is_overlapped(vd, op_size, vs2, 1);
418
+}
419
+
420
+static bool vaes_check_vs(DisasContext *s, arg_rmr *a)
421
+{
422
+ int egw_bytes = ZVKNED_EGS << s->sew;
423
+ return vaes_check_overlap(s, a->rd, a->rs2) &&
424
+ MAXSZ(s) >= egw_bytes &&
425
+ s->cfg_ptr->ext_zvkned == true &&
426
+ require_rvv(s) &&
427
+ vext_check_isa_ill(s) &&
428
+ require_align(a->rd, s->lmul) &&
429
+ s->sew == MO_32;
430
+}
431
+
432
+GEN_V_UNMASKED_TRANS(vaesef_vv, vaes_check_vv, ZVKNED_EGS)
433
+GEN_V_UNMASKED_TRANS(vaesef_vs, vaes_check_vs, ZVKNED_EGS)
434
+GEN_V_UNMASKED_TRANS(vaesdf_vv, vaes_check_vv, ZVKNED_EGS)
435
+GEN_V_UNMASKED_TRANS(vaesdf_vs, vaes_check_vs, ZVKNED_EGS)
436
+GEN_V_UNMASKED_TRANS(vaesdm_vv, vaes_check_vv, ZVKNED_EGS)
437
+GEN_V_UNMASKED_TRANS(vaesdm_vs, vaes_check_vs, ZVKNED_EGS)
438
+GEN_V_UNMASKED_TRANS(vaesz_vs, vaes_check_vs, ZVKNED_EGS)
439
+GEN_V_UNMASKED_TRANS(vaesem_vv, vaes_check_vv, ZVKNED_EGS)
440
+GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
441
+
442
+#define GEN_VI_UNMASKED_TRANS(NAME, CHECK, EGS) \
443
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
444
+ { \
445
+ if (CHECK(s, a)) { \
446
+ TCGv_ptr rd_v, rs2_v; \
447
+ TCGv_i32 uimm_v, desc, egs; \
448
+ uint32_t data = 0; \
449
+ TCGLabel *over = gen_new_label(); \
450
+ \
451
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
452
+ /* save opcode for unwinding in case we throw an exception */ \
453
+ decode_save_opc(s); \
454
+ egs = tcg_constant_i32(EGS); \
455
+ gen_helper_egs_check(egs, cpu_env); \
456
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
457
+ } \
458
+ \
459
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
460
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
461
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
462
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
463
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
464
+ \
465
+ rd_v = tcg_temp_new_ptr(); \
466
+ rs2_v = tcg_temp_new_ptr(); \
467
+ uimm_v = tcg_constant_i32(a->rs1); \
468
+ desc = tcg_constant_i32( \
469
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
470
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
471
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
472
+ gen_helper_##NAME(rd_v, rs2_v, uimm_v, cpu_env, desc); \
473
+ mark_vs_dirty(s); \
474
+ gen_set_label(over); \
475
+ return true; \
476
+ } \
477
+ return false; \
478
+ }
479
+
480
+static bool vaeskf1_check(DisasContext *s, arg_vaeskf1_vi *a)
481
+{
482
+ int egw_bytes = ZVKNED_EGS << s->sew;
483
+ return s->cfg_ptr->ext_zvkned == true &&
484
+ require_rvv(s) &&
485
+ vext_check_isa_ill(s) &&
486
+ MAXSZ(s) >= egw_bytes &&
487
+ s->sew == MO_32 &&
488
+ require_align(a->rd, s->lmul) &&
489
+ require_align(a->rs2, s->lmul);
490
+}
491
+
492
+static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
493
+{
494
+ int egw_bytes = ZVKNED_EGS << s->sew;
495
+ return s->cfg_ptr->ext_zvkned == true &&
496
+ require_rvv(s) &&
497
+ vext_check_isa_ill(s) &&
498
+ MAXSZ(s) >= egw_bytes &&
499
+ s->sew == MO_32 &&
500
+ require_align(a->rd, s->lmul) &&
501
+ require_align(a->rs2, s->lmul);
502
+}
503
+
504
+GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
505
+GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
506
--
507
2.41.0
diff view generated by jsdifflib
New patch
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
1
2
3
This commit adds support for the Zvknh vector-crypto extension, which
4
consists of the following instructions:
5
6
* vsha2ms.vv
7
* vsha2c[hl].vv
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
15
[max.chou@sifive.com: Replaced vstart checking by TCG op]
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
18
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
19
Signed-off-by: Max Chou <max.chou@sifive.com>
20
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
21
[max.chou@sifive.com: Exposed x-zvknha & x-zvknhb properties]
22
[max.chou@sifive.com: Replaced SEW selection to happened during
23
translation]
24
Message-ID: <20230711165917.2629866-11-max.chou@sifive.com>
25
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
26
---
27
target/riscv/cpu_cfg.h | 2 +
28
target/riscv/helper.h | 6 +
29
target/riscv/insn32.decode | 5 +
30
target/riscv/cpu.c | 13 +-
31
target/riscv/vcrypto_helper.c | 238 +++++++++++++++++++++++
32
target/riscv/insn_trans/trans_rvvk.c.inc | 129 ++++++++++++
33
6 files changed, 390 insertions(+), 3 deletions(-)
34
35
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/cpu_cfg.h
38
+++ b/target/riscv/cpu_cfg.h
39
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
40
bool ext_zvbb;
41
bool ext_zvbc;
42
bool ext_zvkned;
43
+ bool ext_zvknha;
44
+ bool ext_zvknhb;
45
bool ext_zmmul;
46
bool ext_zvfbfmin;
47
bool ext_zvfbfwma;
48
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/riscv/helper.h
51
+++ b/target/riscv/helper.h
52
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
53
DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
54
DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
55
DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
56
+
57
+DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
58
+DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
59
+DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
60
+DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
61
+DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
62
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/riscv/insn32.decode
65
+++ b/target/riscv/insn32.decode
66
@@ -XXX,XX +XXX,XX @@ vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
67
vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
68
vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
69
vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
70
+
71
+# *** Zvknh vector crypto extension ***
72
+vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
73
+vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
74
+vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
75
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/riscv/cpu.c
78
+++ b/target/riscv/cpu.c
79
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
80
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
81
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
82
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
83
+ ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
84
+ ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
85
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
86
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
87
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
88
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
89
* In principle Zve*x would also suffice here, were they supported
90
* in qemu
91
*/
92
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
93
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
94
+ !cpu->cfg.ext_zve32f) {
95
error_setg(errp,
96
"Vector crypto extensions require V or Zve* extensions");
97
return;
98
}
99
100
- if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
101
- error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
102
+ if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
103
+ error_setg(
104
+ errp,
105
+ "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
106
return;
107
}
108
109
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
110
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
111
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
112
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
113
+ DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
114
+ DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
115
116
DEFINE_PROP_END_OF_LIST(),
117
};
118
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/target/riscv/vcrypto_helper.c
121
+++ b/target/riscv/vcrypto_helper.c
122
@@ -XXX,XX +XXX,XX @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
123
/* set tail elements to 1s */
124
vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
125
}
126
+
127
+static inline uint32_t sig0_sha256(uint32_t x)
128
+{
129
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
130
+}
131
+
132
+static inline uint32_t sig1_sha256(uint32_t x)
133
+{
134
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
135
+}
136
+
137
+static inline uint64_t sig0_sha512(uint64_t x)
138
+{
139
+ return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
140
+}
141
+
142
+static inline uint64_t sig1_sha512(uint64_t x)
143
+{
144
+ return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
145
+}
146
+
147
+static inline void vsha2ms_e32(uint32_t *vd, uint32_t *vs1, uint32_t *vs2)
148
+{
149
+ uint32_t res[4];
150
+ res[0] = sig1_sha256(vs1[H4(2)]) + vs2[H4(1)] + sig0_sha256(vd[H4(1)]) +
151
+ vd[H4(0)];
152
+ res[1] = sig1_sha256(vs1[H4(3)]) + vs2[H4(2)] + sig0_sha256(vd[H4(2)]) +
153
+ vd[H4(1)];
154
+ res[2] =
155
+ sig1_sha256(res[0]) + vs2[H4(3)] + sig0_sha256(vd[H4(3)]) + vd[H4(2)];
156
+ res[3] =
157
+ sig1_sha256(res[1]) + vs1[H4(0)] + sig0_sha256(vs2[H4(0)]) + vd[H4(3)];
158
+ vd[H4(3)] = res[3];
159
+ vd[H4(2)] = res[2];
160
+ vd[H4(1)] = res[1];
161
+ vd[H4(0)] = res[0];
162
+}
163
+
164
+static inline void vsha2ms_e64(uint64_t *vd, uint64_t *vs1, uint64_t *vs2)
165
+{
166
+ uint64_t res[4];
167
+ res[0] = sig1_sha512(vs1[2]) + vs2[1] + sig0_sha512(vd[1]) + vd[0];
168
+ res[1] = sig1_sha512(vs1[3]) + vs2[2] + sig0_sha512(vd[2]) + vd[1];
169
+ res[2] = sig1_sha512(res[0]) + vs2[3] + sig0_sha512(vd[3]) + vd[2];
170
+ res[3] = sig1_sha512(res[1]) + vs1[0] + sig0_sha512(vs2[0]) + vd[3];
171
+ vd[3] = res[3];
172
+ vd[2] = res[2];
173
+ vd[1] = res[1];
174
+ vd[0] = res[0];
175
+}
176
+
177
+void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
178
+ uint32_t desc)
179
+{
180
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
181
+ uint32_t esz = sew == MO_32 ? 4 : 8;
182
+ uint32_t total_elems;
183
+ uint32_t vta = vext_vta(desc);
184
+
185
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
186
+ if (sew == MO_32) {
187
+ vsha2ms_e32(((uint32_t *)vd) + i * 4, ((uint32_t *)vs1) + i * 4,
188
+ ((uint32_t *)vs2) + i * 4);
189
+ } else {
190
+ /* If not 32 then SEW should be 64 */
191
+ vsha2ms_e64(((uint64_t *)vd) + i * 4, ((uint64_t *)vs1) + i * 4,
192
+ ((uint64_t *)vs2) + i * 4);
193
+ }
194
+ }
195
+ /* set tail elements to 1s */
196
+ total_elems = vext_get_total_elems(env, desc, esz);
197
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
198
+ env->vstart = 0;
199
+}
200
+
201
+static inline uint64_t sum0_64(uint64_t x)
202
+{
203
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
204
+}
205
+
206
+static inline uint32_t sum0_32(uint32_t x)
207
+{
208
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
209
+}
210
+
211
+static inline uint64_t sum1_64(uint64_t x)
212
+{
213
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
214
+}
215
+
216
+static inline uint32_t sum1_32(uint32_t x)
217
+{
218
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
219
+}
220
+
221
+#define ch(x, y, z) ((x & y) ^ ((~x) & z))
222
+
223
+#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
224
+
225
+static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1)
226
+{
227
+ uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0];
228
+ uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0];
229
+ uint64_t W0 = vs1[0], W1 = vs1[1];
230
+ uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0;
231
+ uint64_t T2 = sum0_64(a) + maj(a, b, c);
232
+
233
+ h = g;
234
+ g = f;
235
+ f = e;
236
+ e = d + T1;
237
+ d = c;
238
+ c = b;
239
+ b = a;
240
+ a = T1 + T2;
241
+
242
+ T1 = h + sum1_64(e) + ch(e, f, g) + W1;
243
+ T2 = sum0_64(a) + maj(a, b, c);
244
+ h = g;
245
+ g = f;
246
+ f = e;
247
+ e = d + T1;
248
+ d = c;
249
+ c = b;
250
+ b = a;
251
+ a = T1 + T2;
252
+
253
+ vd[0] = f;
254
+ vd[1] = e;
255
+ vd[2] = b;
256
+ vd[3] = a;
257
+}
258
+
259
+static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1)
260
+{
261
+ uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)];
262
+ uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)];
263
+ uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)];
264
+ uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0;
265
+ uint32_t T2 = sum0_32(a) + maj(a, b, c);
266
+
267
+ h = g;
268
+ g = f;
269
+ f = e;
270
+ e = d + T1;
271
+ d = c;
272
+ c = b;
273
+ b = a;
274
+ a = T1 + T2;
275
+
276
+ T1 = h + sum1_32(e) + ch(e, f, g) + W1;
277
+ T2 = sum0_32(a) + maj(a, b, c);
278
+ h = g;
279
+ g = f;
280
+ f = e;
281
+ e = d + T1;
282
+ d = c;
283
+ c = b;
284
+ b = a;
285
+ a = T1 + T2;
286
+
287
+ vd[H4(0)] = f;
288
+ vd[H4(1)] = e;
289
+ vd[H4(2)] = b;
290
+ vd[H4(3)] = a;
291
+}
292
+
293
+void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
294
+ uint32_t desc)
295
+{
296
+ const uint32_t esz = 4;
297
+ uint32_t total_elems;
298
+ uint32_t vta = vext_vta(desc);
299
+
300
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
301
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
302
+ ((uint32_t *)vs1) + 4 * i + 2);
303
+ }
304
+
305
+ /* set tail elements to 1s */
306
+ total_elems = vext_get_total_elems(env, desc, esz);
307
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
308
+ env->vstart = 0;
309
+}
310
+
311
+void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
312
+ uint32_t desc)
313
+{
314
+ const uint32_t esz = 8;
315
+ uint32_t total_elems;
316
+ uint32_t vta = vext_vta(desc);
317
+
318
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
319
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
320
+ ((uint64_t *)vs1) + 4 * i + 2);
321
+ }
322
+
323
+ /* set tail elements to 1s */
324
+ total_elems = vext_get_total_elems(env, desc, esz);
325
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
326
+ env->vstart = 0;
327
+}
328
+
329
+void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
330
+ uint32_t desc)
331
+{
332
+ const uint32_t esz = 4;
333
+ uint32_t total_elems;
334
+ uint32_t vta = vext_vta(desc);
335
+
336
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
337
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
338
+ (((uint32_t *)vs1) + 4 * i));
339
+ }
340
+
341
+ /* set tail elements to 1s */
342
+ total_elems = vext_get_total_elems(env, desc, esz);
343
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
344
+ env->vstart = 0;
345
+}
346
+
347
+void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
348
+ uint32_t desc)
349
+{
350
+ uint32_t esz = 8;
351
+ uint32_t total_elems;
352
+ uint32_t vta = vext_vta(desc);
353
+
354
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
355
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
356
+ (((uint64_t *)vs1) + 4 * i));
357
+ }
358
+
359
+ /* set tail elements to 1s */
360
+ total_elems = vext_get_total_elems(env, desc, esz);
361
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
362
+ env->vstart = 0;
363
+}
364
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
365
index XXXXXXX..XXXXXXX 100644
366
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
367
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
368
@@ -XXX,XX +XXX,XX @@ static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
369
370
GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
371
GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
372
+
373
+/*
374
+ * Zvknh
375
+ */
376
+
377
+#define ZVKNH_EGS 4
378
+
379
+#define GEN_VV_UNMASKED_TRANS(NAME, CHECK, EGS) \
380
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
381
+ { \
382
+ if (CHECK(s, a)) { \
383
+ uint32_t data = 0; \
384
+ TCGLabel *over = gen_new_label(); \
385
+ TCGv_i32 egs; \
386
+ \
387
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
388
+ /* save opcode for unwinding in case we throw an exception */ \
389
+ decode_save_opc(s); \
390
+ egs = tcg_constant_i32(EGS); \
391
+ gen_helper_egs_check(egs, cpu_env); \
392
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
393
+ } \
394
+ \
395
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
396
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
397
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
398
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
399
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
400
+ \
401
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
402
+ vreg_ofs(s, a->rs2), cpu_env, \
403
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
404
+ data, gen_helper_##NAME); \
405
+ \
406
+ mark_vs_dirty(s); \
407
+ gen_set_label(over); \
408
+ return true; \
409
+ } \
410
+ return false; \
411
+ }
412
+
413
+static bool vsha_check_sew(DisasContext *s)
414
+{
415
+ return (s->cfg_ptr->ext_zvknha == true && s->sew == MO_32) ||
416
+ (s->cfg_ptr->ext_zvknhb == true &&
417
+ (s->sew == MO_32 || s->sew == MO_64));
418
+}
419
+
420
+static bool vsha_check(DisasContext *s, arg_rmrr *a)
421
+{
422
+ int egw_bytes = ZVKNH_EGS << s->sew;
423
+ int mult = 1 << MAX(s->lmul, 0);
424
+ return opivv_check(s, a) &&
425
+ vsha_check_sew(s) &&
426
+ MAXSZ(s) >= egw_bytes &&
427
+ !is_overlapped(a->rd, mult, a->rs1, mult) &&
428
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
429
+ s->lmul >= 0;
430
+}
431
+
432
+GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, ZVKNH_EGS)
433
+
434
+static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
435
+{
436
+ if (vsha_check(s, a)) {
437
+ uint32_t data = 0;
438
+ TCGLabel *over = gen_new_label();
439
+ TCGv_i32 egs;
440
+
441
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
442
+ /* save opcode for unwinding in case we throw an exception */
443
+ decode_save_opc(s);
444
+ egs = tcg_constant_i32(ZVKNH_EGS);
445
+ gen_helper_egs_check(egs, cpu_env);
446
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
447
+ }
448
+
449
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
450
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
451
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
452
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
453
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
454
+
455
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
456
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
457
+ s->cfg_ptr->vlen / 8, data,
458
+ s->sew == MO_32 ?
459
+ gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv);
460
+
461
+ mark_vs_dirty(s);
462
+ gen_set_label(over);
463
+ return true;
464
+ }
465
+ return false;
466
+}
467
+
468
+static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
469
+{
470
+ if (vsha_check(s, a)) {
471
+ uint32_t data = 0;
472
+ TCGLabel *over = gen_new_label();
473
+ TCGv_i32 egs;
474
+
475
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
476
+ /* save opcode for unwinding in case we throw an exception */
477
+ decode_save_opc(s);
478
+ egs = tcg_constant_i32(ZVKNH_EGS);
479
+ gen_helper_egs_check(egs, cpu_env);
480
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
481
+ }
482
+
483
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
484
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
485
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
486
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
487
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
488
+
489
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
490
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
491
+ s->cfg_ptr->vlen / 8, data,
492
+ s->sew == MO_32 ?
493
+ gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv);
494
+
495
+ mark_vs_dirty(s);
496
+ gen_set_label(over);
497
+ return true;
498
+ }
499
+ return false;
500
+}
501
--
502
2.41.0
diff view generated by jsdifflib
New patch
1
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
3
This commit adds support for the Zvksh vector-crypto extension, which
4
consists of the following instructions:
5
6
* vsm3me.vv
7
* vsm3c.vi
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvksh property]
20
Message-ID: <20230711165917.2629866-12-max.chou@sifive.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
---
23
target/riscv/cpu_cfg.h | 1 +
24
target/riscv/helper.h | 3 +
25
target/riscv/insn32.decode | 4 +
26
target/riscv/cpu.c | 6 +-
27
target/riscv/vcrypto_helper.c | 134 +++++++++++++++++++++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 31 ++++++
29
6 files changed, 177 insertions(+), 2 deletions(-)
30
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu_cfg.h
34
+++ b/target/riscv/cpu_cfg.h
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
36
bool ext_zvkned;
37
bool ext_zvknha;
38
bool ext_zvknhb;
39
+ bool ext_zvksh;
40
bool ext_zmmul;
41
bool ext_zvfbfmin;
42
bool ext_zvfbfwma;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
48
DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
49
DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
53
+DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
54
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/riscv/insn32.decode
57
+++ b/target/riscv/insn32.decode
58
@@ -XXX,XX +XXX,XX @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
59
vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
61
vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
+
63
+# *** Zvksh vector crypto extension ***
64
+vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
65
+vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/cpu.c
69
+++ b/target/riscv/cpu.c
70
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
71
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
72
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
73
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
74
+ ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
75
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
76
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
77
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
78
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
79
* In principle Zve*x would also suffice here, were they supported
80
* in qemu
81
*/
82
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
83
- !cpu->cfg.ext_zve32f) {
84
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
85
+ cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
86
error_setg(errp,
87
"Vector crypto extensions require V or Zve* extensions");
88
return;
89
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
90
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
91
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
92
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
93
+ DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
94
95
DEFINE_PROP_END_OF_LIST(),
96
};
97
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/riscv/vcrypto_helper.c
100
+++ b/target/riscv/vcrypto_helper.c
101
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
102
vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
103
env->vstart = 0;
104
}
105
+
106
+static inline uint32_t p1(uint32_t x)
107
+{
108
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
109
+}
110
+
111
+static inline uint32_t zvksh_w(uint32_t m16, uint32_t m9, uint32_t m3,
112
+ uint32_t m13, uint32_t m6)
113
+{
114
+ return p1(m16 ^ m9 ^ rol32(m3, 15)) ^ rol32(m13, 7) ^ m6;
115
+}
116
+
117
+void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
118
+ CPURISCVState *env, uint32_t desc)
119
+{
120
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
121
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
122
+ uint32_t vta = vext_vta(desc);
123
+ uint32_t *vd = vd_vptr;
124
+ uint32_t *vs1 = vs1_vptr;
125
+ uint32_t *vs2 = vs2_vptr;
126
+
127
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
128
+ uint32_t w[24];
129
+ for (int j = 0; j < 8; j++) {
130
+ w[j] = bswap32(vs1[H4((i * 8) + j)]);
131
+ w[j + 8] = bswap32(vs2[H4((i * 8) + j)]);
132
+ }
133
+ for (int j = 0; j < 8; j++) {
134
+ w[j + 16] =
135
+ zvksh_w(w[j], w[j + 7], w[j + 13], w[j + 3], w[j + 10]);
136
+ }
137
+ for (int j = 0; j < 8; j++) {
138
+ vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
139
+ }
140
+ }
141
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
142
+ env->vstart = 0;
143
+}
144
+
145
+static inline uint32_t ff1(uint32_t x, uint32_t y, uint32_t z)
146
+{
147
+ return x ^ y ^ z;
148
+}
149
+
150
+static inline uint32_t ff2(uint32_t x, uint32_t y, uint32_t z)
151
+{
152
+ return (x & y) | (x & z) | (y & z);
153
+}
154
+
155
+static inline uint32_t ff_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
156
+{
157
+ return (j <= 15) ? ff1(x, y, z) : ff2(x, y, z);
158
+}
159
+
160
+static inline uint32_t gg1(uint32_t x, uint32_t y, uint32_t z)
161
+{
162
+ return x ^ y ^ z;
163
+}
164
+
165
+static inline uint32_t gg2(uint32_t x, uint32_t y, uint32_t z)
166
+{
167
+ return (x & y) | (~x & z);
168
+}
169
+
170
+static inline uint32_t gg_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
171
+{
172
+ return (j <= 15) ? gg1(x, y, z) : gg2(x, y, z);
173
+}
174
+
175
+static inline uint32_t t_j(uint32_t j)
176
+{
177
+ return (j <= 15) ? 0x79cc4519 : 0x7a879d8a;
178
+}
179
+
180
+static inline uint32_t p_0(uint32_t x)
181
+{
182
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
183
+}
184
+
185
+static void sm3c(uint32_t *vd, uint32_t *vs1, uint32_t *vs2, uint32_t uimm)
186
+{
187
+ uint32_t x0, x1;
188
+ uint32_t j;
189
+ uint32_t ss1, ss2, tt1, tt2;
190
+ x0 = vs2[0] ^ vs2[4];
191
+ x1 = vs2[1] ^ vs2[5];
192
+ j = 2 * uimm;
193
+ ss1 = rol32(rol32(vs1[0], 12) + vs1[4] + rol32(t_j(j), j % 32), 7);
194
+ ss2 = ss1 ^ rol32(vs1[0], 12);
195
+ tt1 = ff_j(vs1[0], vs1[1], vs1[2], j) + vs1[3] + ss2 + x0;
196
+ tt2 = gg_j(vs1[4], vs1[5], vs1[6], j) + vs1[7] + ss1 + vs2[0];
197
+ vs1[3] = vs1[2];
198
+ vd[3] = rol32(vs1[1], 9);
199
+ vs1[1] = vs1[0];
200
+ vd[1] = tt1;
201
+ vs1[7] = vs1[6];
202
+ vd[7] = rol32(vs1[5], 19);
203
+ vs1[5] = vs1[4];
204
+ vd[5] = p_0(tt2);
205
+ j = 2 * uimm + 1;
206
+ ss1 = rol32(rol32(vd[1], 12) + vd[5] + rol32(t_j(j), j % 32), 7);
207
+ ss2 = ss1 ^ rol32(vd[1], 12);
208
+ tt1 = ff_j(vd[1], vs1[1], vd[3], j) + vs1[3] + ss2 + x1;
209
+ tt2 = gg_j(vd[5], vs1[5], vd[7], j) + vs1[7] + ss1 + vs2[1];
210
+ vd[2] = rol32(vs1[1], 9);
211
+ vd[0] = tt1;
212
+ vd[6] = rol32(vs1[5], 19);
213
+ vd[4] = p_0(tt2);
214
+}
215
+
216
+void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
217
+ CPURISCVState *env, uint32_t desc)
218
+{
219
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
220
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
221
+ uint32_t vta = vext_vta(desc);
222
+ uint32_t *vd = vd_vptr;
223
+ uint32_t *vs2 = vs2_vptr;
224
+ uint32_t v1[8], v2[8], v3[8];
225
+
226
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
227
+ for (int k = 0; k < 8; k++) {
228
+ v2[k] = bswap32(vd[H4(i * 8 + k)]);
229
+ v3[k] = bswap32(vs2[H4(i * 8 + k)]);
230
+ }
231
+ sm3c(v1, v2, v3, uimm);
232
+ for (int k = 0; k < 8; k++) {
233
+ vd[i * 8 + k] = bswap32(v1[H4(k)]);
234
+ }
235
+ }
236
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
237
+ env->vstart = 0;
238
+}
239
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
242
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
243
@@ -XXX,XX +XXX,XX @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
244
}
245
return false;
246
}
247
+
248
+/*
249
+ * Zvksh
250
+ */
251
+
252
+#define ZVKSH_EGS 8
253
+
254
+static inline bool vsm3_check(DisasContext *s, arg_rmrr *a)
255
+{
256
+ int egw_bytes = ZVKSH_EGS << s->sew;
257
+ int mult = 1 << MAX(s->lmul, 0);
258
+ return s->cfg_ptr->ext_zvksh == true &&
259
+ require_rvv(s) &&
260
+ vext_check_isa_ill(s) &&
261
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
262
+ MAXSZ(s) >= egw_bytes &&
263
+ s->sew == MO_32;
264
+}
265
+
266
+static inline bool vsm3me_check(DisasContext *s, arg_rmrr *a)
267
+{
268
+ return vsm3_check(s, a) && vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
269
+}
270
+
271
+static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
272
+{
273
+ return vsm3_check(s, a) && vext_check_ss(s, a->rd, a->rs2, a->vm);
274
+}
275
+
276
+GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
277
+GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
278
--
279
2.41.0
diff view generated by jsdifflib
New patch
1
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
3
This commit adds support for the Zvkg vector-crypto extension, which
4
consists of the following instructions:
5
6
* vgmul.vv
7
* vghsh.vv
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvkg property]
20
[max.chou@sifive.com: Replaced uint by int for cross win32 build]
21
Message-ID: <20230711165917.2629866-13-max.chou@sifive.com>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
23
---
24
target/riscv/cpu_cfg.h | 1 +
25
target/riscv/helper.h | 3 +
26
target/riscv/insn32.decode | 4 ++
27
target/riscv/cpu.c | 6 +-
28
target/riscv/vcrypto_helper.c | 72 ++++++++++++++++++++++++
29
target/riscv/insn_trans/trans_rvvk.c.inc | 30 ++++++++++
30
6 files changed, 114 insertions(+), 2 deletions(-)
31
32
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_cfg.h
35
+++ b/target/riscv/cpu_cfg.h
36
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
37
bool ext_zve64d;
38
bool ext_zvbb;
39
bool ext_zvbc;
40
+ bool ext_zvkg;
41
bool ext_zvkned;
42
bool ext_zvknha;
43
bool ext_zvknhb;
44
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/riscv/helper.h
47
+++ b/target/riscv/helper.h
48
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
49
50
DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
51
DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
52
+
53
+DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
54
+DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/insn32.decode
58
+++ b/target/riscv/insn32.decode
59
@@ -XXX,XX +XXX,XX @@ vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
# *** Zvksh vector crypto extension ***
61
vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
63
+
64
+# *** Zvkg vector crypto extension ***
65
+vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
+vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
67
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/riscv/cpu.c
70
+++ b/target/riscv/cpu.c
71
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
72
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
73
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
74
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
75
+ ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
76
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
77
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
78
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
79
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
80
* In principle Zve*x would also suffice here, were they supported
81
* in qemu
82
*/
83
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
84
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
85
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
86
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
87
error_setg(errp,
88
"Vector crypto extensions require V or Zve* extensions");
89
return;
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
91
/* Vector cryptography extensions */
92
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
93
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
94
+ DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false),
95
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
96
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
97
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/vcrypto_helper.c
101
+++ b/target/riscv/vcrypto_helper.c
102
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
103
vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
104
env->vstart = 0;
105
}
106
+
107
+void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
108
+ CPURISCVState *env, uint32_t desc)
109
+{
110
+ uint64_t *vd = vd_vptr;
111
+ uint64_t *vs1 = vs1_vptr;
112
+ uint64_t *vs2 = vs2_vptr;
113
+ uint32_t vta = vext_vta(desc);
114
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
115
+
116
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
117
+ uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
118
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
119
+ uint64_t X[2] = {vs1[i * 2 + 0], vs1[i * 2 + 1]};
120
+ uint64_t Z[2] = {0, 0};
121
+
122
+ uint64_t S[2] = {brev8(Y[0] ^ X[0]), brev8(Y[1] ^ X[1])};
123
+
124
+ for (int j = 0; j < 128; j++) {
125
+ if ((S[j / 64] >> (j % 64)) & 1) {
126
+ Z[0] ^= H[0];
127
+ Z[1] ^= H[1];
128
+ }
129
+ bool reduce = ((H[1] >> 63) & 1);
130
+ H[1] = H[1] << 1 | H[0] >> 63;
131
+ H[0] = H[0] << 1;
132
+ if (reduce) {
133
+ H[0] ^= 0x87;
134
+ }
135
+ }
136
+
137
+ vd[i * 2 + 0] = brev8(Z[0]);
138
+ vd[i * 2 + 1] = brev8(Z[1]);
139
+ }
140
+ /* set tail elements to 1s */
141
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
142
+ env->vstart = 0;
143
+}
144
+
145
+void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
146
+ uint32_t desc)
147
+{
148
+ uint64_t *vd = vd_vptr;
149
+ uint64_t *vs2 = vs2_vptr;
150
+ uint32_t vta = vext_vta(desc);
151
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
152
+
153
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
154
+ uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
155
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
156
+ uint64_t Z[2] = {0, 0};
157
+
158
+ for (int j = 0; j < 128; j++) {
159
+ if ((Y[j / 64] >> (j % 64)) & 1) {
160
+ Z[0] ^= H[0];
161
+ Z[1] ^= H[1];
162
+ }
163
+ bool reduce = ((H[1] >> 63) & 1);
164
+ H[1] = H[1] << 1 | H[0] >> 63;
165
+ H[0] = H[0] << 1;
166
+ if (reduce) {
167
+ H[0] ^= 0x87;
168
+ }
169
+ }
170
+
171
+ vd[i * 2 + 0] = brev8(Z[0]);
172
+ vd[i * 2 + 1] = brev8(Z[1]);
173
+ }
174
+ /* set tail elements to 1s */
175
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
176
+ env->vstart = 0;
177
+}
178
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
181
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
182
@@ -XXX,XX +XXX,XX @@ static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
183
184
GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
185
GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
186
+
187
+/*
188
+ * Zvkg
189
+ */
190
+
191
+#define ZVKG_EGS 4
192
+
193
+static bool vgmul_check(DisasContext *s, arg_rmr *a)
194
+{
195
+ int egw_bytes = ZVKG_EGS << s->sew;
196
+ return s->cfg_ptr->ext_zvkg == true &&
197
+ vext_check_isa_ill(s) &&
198
+ require_rvv(s) &&
199
+ MAXSZ(s) >= egw_bytes &&
200
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
201
+ s->sew == MO_32;
202
+}
203
+
204
+GEN_V_UNMASKED_TRANS(vgmul_vv, vgmul_check, ZVKG_EGS)
205
+
206
+static bool vghsh_check(DisasContext *s, arg_rmrr *a)
207
+{
208
+ int egw_bytes = ZVKG_EGS << s->sew;
209
+ return s->cfg_ptr->ext_zvkg == true &&
210
+ opivv_check(s, a) &&
211
+ MAXSZ(s) >= egw_bytes &&
212
+ s->sew == MO_32;
213
+}
214
+
215
+GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
216
--
217
2.41.0
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
Allows sharing of sm4_subword between different targets.
4
5
Signed-off-by: Max Chou <max.chou@sifive.com>
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Max Chou <max.chou@sifive.com>
9
Message-ID: <20230711165917.2629866-14-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
include/crypto/sm4.h | 8 ++++++++
13
target/arm/tcg/crypto_helper.c | 10 ++--------
14
2 files changed, 10 insertions(+), 8 deletions(-)
15
16
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/crypto/sm4.h
19
+++ b/include/crypto/sm4.h
20
@@ -XXX,XX +XXX,XX @@
21
22
extern const uint8_t sm4_sbox[256];
23
24
+static inline uint32_t sm4_subword(uint32_t word)
25
+{
26
+ return sm4_sbox[word & 0xff] |
27
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
28
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
29
+ sm4_sbox[(word >> 24) & 0xff] << 24;
30
+}
31
+
32
#endif
33
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/arm/tcg/crypto_helper.c
36
+++ b/target/arm/tcg/crypto_helper.c
37
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm)
38
CR_ST_WORD(d, (i + 3) % 4) ^
39
CR_ST_WORD(n, i);
40
41
- t = sm4_sbox[t & 0xff] |
42
- sm4_sbox[(t >> 8) & 0xff] << 8 |
43
- sm4_sbox[(t >> 16) & 0xff] << 16 |
44
- sm4_sbox[(t >> 24) & 0xff] << 24;
45
+ t = sm4_subword(t);
46
47
CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
48
rol32(t, 24);
49
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4ekey(uint64_t *rd, uint64_t *rn, uint64_t *rm)
50
CR_ST_WORD(d, (i + 3) % 4) ^
51
CR_ST_WORD(m, i);
52
53
- t = sm4_sbox[t & 0xff] |
54
- sm4_sbox[(t >> 8) & 0xff] << 8 |
55
- sm4_sbox[(t >> 16) & 0xff] << 16 |
56
- sm4_sbox[(t >> 24) & 0xff] << 24;
57
+ t = sm4_subword(t);
58
59
CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
60
}
61
--
62
2.41.0
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
Adds sm4_ck constant for use in sm4 cryptography across different targets.
4
5
Signed-off-by: Max Chou <max.chou@sifive.com>
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Message-ID: <20230711165917.2629866-15-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
include/crypto/sm4.h | 1 +
12
crypto/sm4.c | 10 ++++++++++
13
2 files changed, 11 insertions(+)
14
15
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/crypto/sm4.h
18
+++ b/include/crypto/sm4.h
19
@@ -XXX,XX +XXX,XX @@
20
#define QEMU_SM4_H
21
22
extern const uint8_t sm4_sbox[256];
23
+extern const uint32_t sm4_ck[32];
24
25
static inline uint32_t sm4_subword(uint32_t word)
26
{
27
diff --git a/crypto/sm4.c b/crypto/sm4.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/crypto/sm4.c
30
+++ b/crypto/sm4.c
31
@@ -XXX,XX +XXX,XX @@ uint8_t const sm4_sbox[] = {
32
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
33
};
34
35
+uint32_t const sm4_ck[] = {
36
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
37
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
38
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
39
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
40
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
41
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
42
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
43
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
44
+};
45
--
46
2.41.0
diff view generated by jsdifflib
New patch
1
1
From: Max Chou <max.chou@sifive.com>
2
3
This commit adds support for the Zvksed vector-crypto extension, which
4
consists of the following instructions:
5
6
* vsm4k.vi
7
* vsm4r.[vv,vs]
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Signed-off-by: Max Chou <max.chou@sifive.com>
14
Reviewed-by: Frank Chang <frank.chang@sifive.com>
15
[lawrence.hunter@codethink.co.uk: Moved SM4 functions from
16
crypto_helper.c to vcrypto_helper.c]
17
[nazar.kazakov@codethink.co.uk: Added alignment checks, refactored code to
18
use macros, and minor style changes]
19
Signed-off-by: Max Chou <max.chou@sifive.com>
20
Message-ID: <20230711165917.2629866-16-max.chou@sifive.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
---
23
target/riscv/cpu_cfg.h | 1 +
24
target/riscv/helper.h | 4 +
25
target/riscv/insn32.decode | 5 +
26
target/riscv/cpu.c | 5 +-
27
target/riscv/vcrypto_helper.c | 127 +++++++++++++++++++++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 43 ++++++++
29
6 files changed, 184 insertions(+), 1 deletion(-)
30
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu_cfg.h
34
+++ b/target/riscv/cpu_cfg.h
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
36
bool ext_zvkned;
37
bool ext_zvknha;
38
bool ext_zvknhb;
39
+ bool ext_zvksed;
40
bool ext_zvksh;
41
bool ext_zmmul;
42
bool ext_zvfbfmin;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
48
49
DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32)
53
+DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32)
54
+DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/insn32.decode
58
+++ b/target/riscv/insn32.decode
59
@@ -XXX,XX +XXX,XX @@ vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
# *** Zvkg vector crypto extension ***
61
vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
63
+
64
+# *** Zvksed vector crypto extension ***
65
+vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
+vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
67
+vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
68
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/target/riscv/cpu.c
71
+++ b/target/riscv/cpu.c
72
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
73
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
74
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
75
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
76
+ ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
77
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
78
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
79
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
80
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
81
* in qemu
82
*/
83
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
84
- cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
85
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) &&
86
+ !cpu->cfg.ext_zve32f) {
87
error_setg(errp,
88
"Vector crypto extensions require V or Zve* extensions");
89
return;
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
91
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
92
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
93
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
94
+ DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false),
95
DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
96
97
DEFINE_PROP_END_OF_LIST(),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/vcrypto_helper.c
101
+++ b/target/riscv/vcrypto_helper.c
102
@@ -XXX,XX +XXX,XX @@
103
#include "cpu.h"
104
#include "crypto/aes.h"
105
#include "crypto/aes-round.h"
106
+#include "crypto/sm4.h"
107
#include "exec/memop.h"
108
#include "exec/exec-all.h"
109
#include "exec/helper-proto.h"
110
@@ -XXX,XX +XXX,XX @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
111
vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
112
env->vstart = 0;
113
}
114
+
115
+void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
116
+ uint32_t desc)
117
+{
118
+ const uint32_t egs = 4;
119
+ uint32_t rnd = uimm5 & 0x7;
120
+ uint32_t group_start = env->vstart / egs;
121
+ uint32_t group_end = env->vl / egs;
122
+ uint32_t esz = sizeof(uint32_t);
123
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
124
+
125
+ for (uint32_t i = group_start; i < group_end; ++i) {
126
+ uint32_t vstart = i * egs;
127
+ uint32_t vend = (i + 1) * egs;
128
+ uint32_t rk[4] = {0};
129
+ uint32_t tmp[8] = {0};
130
+
131
+ for (uint32_t j = vstart; j < vend; ++j) {
132
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
133
+ }
134
+
135
+ for (uint32_t j = 0; j < egs; ++j) {
136
+ tmp[j] = rk[j];
137
+ }
138
+
139
+ for (uint32_t j = 0; j < egs; ++j) {
140
+ uint32_t b, s;
141
+ b = tmp[j + 1] ^ tmp[j + 2] ^ tmp[j + 3] ^ sm4_ck[rnd * 4 + j];
142
+
143
+ s = sm4_subword(b);
144
+
145
+ tmp[j + 4] = tmp[j] ^ (s ^ rol32(s, 13) ^ rol32(s, 23));
146
+ }
147
+
148
+ for (uint32_t j = vstart; j < vend; ++j) {
149
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
150
+ }
151
+ }
152
+
153
+ env->vstart = 0;
154
+ /* set tail elements to 1s */
155
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
156
+}
157
+
158
+static void do_sm4_round(uint32_t *rk, uint32_t *buf)
159
+{
160
+ const uint32_t egs = 4;
161
+ uint32_t s, b;
162
+
163
+ for (uint32_t j = egs; j < egs * 2; ++j) {
164
+ b = buf[j - 3] ^ buf[j - 2] ^ buf[j - 1] ^ rk[j - 4];
165
+
166
+ s = sm4_subword(b);
167
+
168
+ buf[j] = buf[j - 4] ^ (s ^ rol32(s, 2) ^ rol32(s, 10) ^ rol32(s, 18) ^
169
+ rol32(s, 24));
170
+ }
171
+}
172
+
173
+void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
174
+{
175
+ const uint32_t egs = 4;
176
+ uint32_t group_start = env->vstart / egs;
177
+ uint32_t group_end = env->vl / egs;
178
+ uint32_t esz = sizeof(uint32_t);
179
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
180
+
181
+ for (uint32_t i = group_start; i < group_end; ++i) {
182
+ uint32_t vstart = i * egs;
183
+ uint32_t vend = (i + 1) * egs;
184
+ uint32_t rk[4] = {0};
185
+ uint32_t tmp[8] = {0};
186
+
187
+ for (uint32_t j = vstart; j < vend; ++j) {
188
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
189
+ }
190
+
191
+ for (uint32_t j = vstart; j < vend; ++j) {
192
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
193
+ }
194
+
195
+ do_sm4_round(rk, tmp);
196
+
197
+ for (uint32_t j = vstart; j < vend; ++j) {
198
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
199
+ }
200
+ }
201
+
202
+ env->vstart = 0;
203
+ /* set tail elements to 1s */
204
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
205
+}
206
+
207
+void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
208
+{
209
+ const uint32_t egs = 4;
210
+ uint32_t group_start = env->vstart / egs;
211
+ uint32_t group_end = env->vl / egs;
212
+ uint32_t esz = sizeof(uint32_t);
213
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
214
+
215
+ for (uint32_t i = group_start; i < group_end; ++i) {
216
+ uint32_t vstart = i * egs;
217
+ uint32_t vend = (i + 1) * egs;
218
+ uint32_t rk[4] = {0};
219
+ uint32_t tmp[8] = {0};
220
+
221
+ for (uint32_t j = 0; j < egs; ++j) {
222
+ rk[j] = *((uint32_t *)vs2 + H4(j));
223
+ }
224
+
225
+ for (uint32_t j = vstart; j < vend; ++j) {
226
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
227
+ }
228
+
229
+ do_sm4_round(rk, tmp);
230
+
231
+ for (uint32_t j = vstart; j < vend; ++j) {
232
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
233
+ }
234
+ }
235
+
236
+ env->vstart = 0;
237
+ /* set tail elements to 1s */
238
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
239
+}
240
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
243
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
244
@@ -XXX,XX +XXX,XX @@ static bool vghsh_check(DisasContext *s, arg_rmrr *a)
245
}
246
247
GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
248
+
249
+/*
250
+ * Zvksed
251
+ */
252
+
253
+#define ZVKSED_EGS 4
254
+
255
+static bool zvksed_check(DisasContext *s)
256
+{
257
+ int egw_bytes = ZVKSED_EGS << s->sew;
258
+ return s->cfg_ptr->ext_zvksed == true &&
259
+ require_rvv(s) &&
260
+ vext_check_isa_ill(s) &&
261
+ MAXSZ(s) >= egw_bytes &&
262
+ s->sew == MO_32;
263
+}
264
+
265
+static bool vsm4k_vi_check(DisasContext *s, arg_rmrr *a)
266
+{
267
+ return zvksed_check(s) &&
268
+ require_align(a->rd, s->lmul) &&
269
+ require_align(a->rs2, s->lmul);
270
+}
271
+
272
+GEN_VI_UNMASKED_TRANS(vsm4k_vi, vsm4k_vi_check, ZVKSED_EGS)
273
+
274
+static bool vsm4r_vv_check(DisasContext *s, arg_rmr *a)
275
+{
276
+ return zvksed_check(s) &&
277
+ require_align(a->rd, s->lmul) &&
278
+ require_align(a->rs2, s->lmul);
279
+}
280
+
281
+GEN_V_UNMASKED_TRANS(vsm4r_vv, vsm4r_vv_check, ZVKSED_EGS)
282
+
283
+static bool vsm4r_vs_check(DisasContext *s, arg_rmr *a)
284
+{
285
+ return zvksed_check(s) &&
286
+ !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
287
+ require_align(a->rd, s->lmul);
288
+}
289
+
290
+GEN_V_UNMASKED_TRANS(vsm4r_vs, vsm4r_vs_check, ZVKSED_EGS)
291
--
292
2.41.0
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Rob Bradford <rbradford@rivosinc.com>
2
2
3
The minimum priv spec versino for mcountinhibit to v1.11 so that it
3
These are WARL fields - zero out the bits for unavailable counters and
4
is not available for v1.10 (or lower).
4
special case the TM bit in mcountinhibit which is hardwired to zero.
5
This patch achieves this by modifying the value written so that any use
6
of the field will see the correctly masked bits.
5
7
6
Fixes: eab4776b2bad ("target/riscv: Add support for hpmcounters/hpmevents")
8
Tested by modifying OpenSBI to write max value to these CSRs and upon
7
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
9
subsequent read the appropriate number of bits for number of PMUs is
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
enabled and the TM bit is zero in mcountinhibit.
9
Message-Id: <20220628101737.786681-3-apatel@ventanamicro.com>
11
12
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20230802124906.24197-1-rbradford@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
17
---
12
target/riscv/csr.c | 2 +-
18
target/riscv/csr.c | 11 +++++++++--
13
1 file changed, 1 insertion(+), 1 deletion(-)
19
1 file changed, 9 insertions(+), 2 deletions(-)
14
20
15
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
21
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/csr.c
23
--- a/target/riscv/csr.c
18
+++ b/target/riscv/csr.c
24
+++ b/target/riscv/csr.c
19
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
25
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
20
write_mhpmcounter },
26
{
21
27
int cidx;
22
[CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
28
PMUCTRState *counter;
23
- write_mcountinhibit },
29
+ RISCVCPU *cpu = env_archcpu(env);
24
+ write_mcountinhibit, .min_priv_ver = PRIV_VERSION_1_11_0 },
30
25
31
- env->mcountinhibit = val;
26
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
32
+ /* WARL register - disable unavailable counters; TM bit is always 0 */
27
write_mhpmevent },
33
+ env->mcountinhibit =
34
+ val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
35
36
/* Check if any other counter is also monitoring cycles/instructions */
37
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
38
@@ -XXX,XX +XXX,XX @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
39
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
40
target_ulong val)
41
{
42
- env->mcounteren = val;
43
+ RISCVCPU *cpu = env_archcpu(env);
44
+
45
+ /* WARL register - disable unavailable counters */
46
+ env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
47
+ COUNTEREN_IR);
48
return RISCV_EXCP_NONE;
49
}
50
28
--
51
--
29
2.36.1
52
2.41.0
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
The PMU counters are supported via cpu config "Counters" which doesn't
3
RVA23 Profiles states:
4
indicate the correct purpose of those counters.
4
The RVA23 profiles are intended to be used for 64-bit application
5
processors that will run rich OS stacks from standard binary OS
6
distributions and with a substantial number of third-party binary user
7
applications that will be supported over a considerable length of time
8
in the field.
5
9
6
Rename the config property to pmu to indicate that these counters
10
The chapter 4 of the unprivileged spec introduces the Zihintntl extension
7
are performance monitoring counters. This aligns with cpu options for
11
and Zihintntl is a mandatory extension presented in RVA23 Profiles, whose
8
ARM architecture as well.
12
purpose is to enable application and operating system portability across
13
different implementations. Thus the DTS should contain the Zihintntl ISA
14
string in order to pass to software.
9
15
10
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
16
The unprivileged spec states:
17
Like any HINTs, these instructions may be freely ignored. Hence, although
18
they are described in terms of cache-based memory hierarchies, they do not
19
mandate the provision of caches.
20
21
These instructions are encoded with non-used opcode, e.g. ADD x0, x0, x2,
22
which QEMU already supports, and QEMU does not emulate cache. Therefore
23
these instructions can be considered as a no-op, and we only need to add
24
a new property for the Zihintntl extension.
25
26
Reviewed-by: Frank Chang <frank.chang@sifive.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
27
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Atish Patra <atish.patra@wdc.com>
28
Signed-off-by: Jason Chien <jason.chien@sifive.com>
13
Signed-off-by: Atish Patra <atishp@rivosinc.com>
29
Message-ID: <20230726074049.19505-2-jason.chien@sifive.com>
14
Message-Id: <20220620231603.2547260-4-atishp@rivosinc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
31
---
17
target/riscv/cpu.h | 2 +-
32
target/riscv/cpu_cfg.h | 1 +
18
target/riscv/cpu.c | 4 ++--
33
target/riscv/cpu.c | 2 ++
19
target/riscv/csr.c | 4 ++--
34
2 files changed, 3 insertions(+)
20
3 files changed, 5 insertions(+), 5 deletions(-)
21
35
22
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
36
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
23
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu.h
38
--- a/target/riscv/cpu_cfg.h
25
+++ b/target/riscv/cpu.h
39
+++ b/target/riscv/cpu_cfg.h
26
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
40
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
27
bool ext_zksed;
41
bool ext_icbom;
28
bool ext_zksh;
42
bool ext_icboz;
29
bool ext_zkt;
43
bool ext_zicond;
30
- bool ext_counters;
44
+ bool ext_zihintntl;
31
+ bool ext_pmu;
45
bool ext_zihintpause;
32
bool ext_ifencei;
46
bool ext_smstateen;
33
bool ext_icsr;
47
bool ext_sstc;
34
bool ext_svinval;
35
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
48
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
36
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/cpu.c
50
--- a/target/riscv/cpu.c
38
+++ b/target/riscv/cpu.c
51
+++ b/target/riscv/cpu.c
39
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_init(Object *obj)
52
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
40
{
53
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
41
RISCVCPU *cpu = RISCV_CPU(obj);
54
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
42
55
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
43
- cpu->cfg.ext_counters = true;
56
+ ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
44
+ cpu->cfg.ext_pmu = true;
57
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
45
cpu->cfg.ext_ifencei = true;
58
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
46
cpu->cfg.ext_icsr = true;
59
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
47
cpu->cfg.mmu = true;
48
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
60
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
49
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
61
DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
50
DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
51
DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
52
- DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
53
+ DEFINE_PROP_BOOL("pmu", RISCVCPU, cfg.ext_pmu, true),
54
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
62
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
55
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
63
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
56
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
64
+ DEFINE_PROP_BOOL("Zihintntl", RISCVCPU, cfg.ext_zihintntl, true),
57
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
65
DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
58
index XXXXXXX..XXXXXXX 100644
66
DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
59
--- a/target/riscv/csr.c
67
DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
60
+++ b/target/riscv/csr.c
61
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
62
RISCVCPU *cpu = RISCV_CPU(cs);
63
int ctr_index;
64
65
- if (!cpu->cfg.ext_counters) {
66
- /* The Counters extensions is not enabled */
67
+ if (!cpu->cfg.ext_pmu) {
68
+ /* The PMU extension is not enabled */
69
return RISCV_EXCP_ILLEGAL_INST;
70
}
71
72
--
68
--
73
2.36.1
69
2.41.0
diff view generated by jsdifflib
1
From: Víctor Colombo <victor.colombo@eldorado.org.br>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
Commit 57c108b8646 introduced gen_set_gpri(), which already contains
3
Commit a47842d ("riscv: Add support for the Zfa extension") implemented the zfa extension.
4
a check for if the destination register is 'zero'. The check in auipc
4
However, it has some typos for fleq.d and fltq.d. Both of them misused the fltq.s
5
and lui are then redundant. This patch removes those checks.
5
helper function.
6
6
7
Signed-off-by: Víctor Colombo <victor.colombo@eldorado.org.br>
7
Fixes: a47842d ("riscv: Add support for the Zfa extension")
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Message-Id: <20220610165517.47517-1-victor.colombo@eldorado.org.br>
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Message-ID: <20230728003906.768-1-zhiwei_liu@linux.alibaba.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
---
13
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++------
14
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 ++--
14
1 file changed, 2 insertions(+), 6 deletions(-)
15
1 file changed, 2 insertions(+), 2 deletions(-)
15
16
16
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
17
diff --git a/target/riscv/insn_trans/trans_rvzfa.c.inc b/target/riscv/insn_trans/trans_rvzfa.c.inc
17
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/insn_trans/trans_rvi.c.inc
19
--- a/target/riscv/insn_trans/trans_rvzfa.c.inc
19
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
20
+++ b/target/riscv/insn_trans/trans_rvzfa.c.inc
20
@@ -XXX,XX +XXX,XX @@ static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
21
@@ -XXX,XX +XXX,XX @@ bool trans_fleq_d(DisasContext *ctx, arg_fleq_d *a)
21
22
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
22
static bool trans_lui(DisasContext *ctx, arg_lui *a)
23
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
23
{
24
24
- if (a->rd != 0) {
25
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
25
- gen_set_gpri(ctx, a->rd, a->imm);
26
+ gen_helper_fleq_d(dest, cpu_env, src1, src2);
26
- }
27
gen_set_gpr(ctx, a->rd, dest);
27
+ gen_set_gpri(ctx, a->rd, a->imm);
28
return true;
28
return true;
29
}
29
}
30
30
@@ -XXX,XX +XXX,XX @@ bool trans_fltq_d(DisasContext *ctx, arg_fltq_d *a)
31
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
31
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
32
{
32
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
33
- if (a->rd != 0) {
33
34
- gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
34
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
35
- }
35
+ gen_helper_fltq_d(dest, cpu_env, src1, src2);
36
+ gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
36
gen_set_gpr(ctx, a->rd, dest);
37
return true;
37
return true;
38
}
38
}
39
40
--
39
--
41
2.36.1
40
2.41.0
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
When writing the upper mtime, we should keep the original lower mtime
4
whose value is given by cpu_riscv_read_rtc() instead of
5
cpu_riscv_read_rtc_raw(). The same logic applies to writes to lower mtime.
6
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20230728082502.26439-1-jason.chien@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
hw/intc/riscv_aclint.c | 5 +++--
13
1 file changed, 3 insertions(+), 2 deletions(-)
14
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/intc/riscv_aclint.c
18
+++ b/hw/intc/riscv_aclint.c
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
20
return;
21
} else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) {
22
uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq);
23
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
24
25
if (addr == mtimer->time_base) {
26
if (size == 4) {
27
/* time_lo for RV32/RV64 */
28
- mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r;
29
+ mtimer->time_delta = ((rtc & ~0xFFFFFFFFULL) | value) - rtc_r;
30
} else {
31
/* time for RV64 */
32
mtimer->time_delta = value - rtc_r;
33
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
34
} else {
35
if (size == 4) {
36
/* time_hi for RV32/RV64 */
37
- mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r;
38
+ mtimer->time_delta = (value << 32 | (rtc & 0xFFFFFFFF)) - rtc_r;
39
} else {
40
qemu_log_mask(LOG_GUEST_ERROR,
41
"aclint-mtimer: invalid time_hi write: %08x",
42
--
43
2.41.0
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
The variables whose values are given by cpu_riscv_read_rtc() should be named
4
"rtc". The variables whose value are given by cpu_riscv_read_rtc_raw()
5
should be named "rtc_r".
6
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20230728082502.26439-2-jason.chien@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
hw/intc/riscv_aclint.c | 6 +++---
13
1 file changed, 3 insertions(+), 3 deletions(-)
14
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/intc/riscv_aclint.c
18
+++ b/hw/intc/riscv_aclint.c
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
20
uint64_t next;
21
uint64_t diff;
22
23
- uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
24
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
25
26
/* Compute the relative hartid w.r.t the socket */
27
hartid = hartid - mtimer->hartid_base;
28
29
mtimer->timecmp[hartid] = value;
30
- if (mtimer->timecmp[hartid] <= rtc_r) {
31
+ if (mtimer->timecmp[hartid] <= rtc) {
32
/*
33
* If we're setting an MTIMECMP value in the "past",
34
* immediately raise the timer interrupt
35
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
36
37
/* otherwise, set up the future timer interrupt */
38
qemu_irq_lower(mtimer->timer_irqs[hartid]);
39
- diff = mtimer->timecmp[hartid] - rtc_r;
40
+ diff = mtimer->timecmp[hartid] - rtc;
41
/* back to ns (note args switched in muldiv64) */
42
uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
43
44
--
45
2.41.0
diff view generated by jsdifflib
1
From: Alistair Francis <alistair.francis@wdc.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
We previously stored the device tree at a 16MB alignment from the end of
3
We should not use types dependend on host arch for target_ucontext.
4
memory (or 3GB). This means we need at least 16MB of memory to be able
4
This bug is found when run rv32 applications.
5
to do this. We don't actually need the FDT to be 16MB aligned, so let's
6
drop it down to 2MB so that we can support systems with less memory,
7
while also allowing FDT size expansion.
8
5
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/992
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Atish Patra <atishp@rivosinc.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Bin Meng <bin.meng@windriver.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
Tested-by: Bin Meng <bin.meng@windriver.com>
10
Message-ID: <20230811055438.1945-1-zhiwei_liu@linux.alibaba.com>
14
Message-Id: <20220608062015.317894-1-alistair.francis@opensource.wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
12
---
17
hw/riscv/boot.c | 4 ++--
13
linux-user/riscv/signal.c | 4 ++--
18
1 file changed, 2 insertions(+), 2 deletions(-)
14
1 file changed, 2 insertions(+), 2 deletions(-)
19
15
20
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
16
diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/riscv/boot.c
18
--- a/linux-user/riscv/signal.c
23
+++ b/hw/riscv/boot.c
19
+++ b/linux-user/riscv/signal.c
24
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt)
20
@@ -XXX,XX +XXX,XX @@ struct target_sigcontext {
25
/*
21
}; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
26
* We should put fdt as far as possible to avoid kernel/initrd overwriting
22
27
* its content. But it should be addressable by 32 bit system as well.
23
struct target_ucontext {
28
- * Thus, put it at an 16MB aligned address that less than fdt size from the
24
- unsigned long uc_flags;
29
+ * Thus, put it at an 2MB aligned address that less than fdt size from the
25
- struct target_ucontext *uc_link;
30
* end of dram or 3GB whichever is lesser.
26
+ abi_ulong uc_flags;
31
*/
27
+ abi_ptr uc_link;
32
temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end;
28
target_stack_t uc_stack;
33
- fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB);
29
target_sigset_t uc_sigmask;
34
+ fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB);
30
uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
35
36
ret = fdt_pack(fdt);
37
/* Should only fail if we've built a corrupted tree */
38
--
31
--
39
2.36.1
32
2.41.0
33
34
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
As per the privilege specification v1.11, mcountinhibit allows to start/stop
3
In this patch, we create the APLIC and IMSIC FDT helper functions and
4
a pmu counter selectively.
4
remove M mode AIA devices when using KVM acceleration.
5
5
6
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
6
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Jim Shu <jim.shu@sifive.com>
8
Signed-off-by: Atish Patra <atish.patra@wdc.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Signed-off-by: Atish Patra <atishp@rivosinc.com>
9
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Message-Id: <20220620231603.2547260-6-atishp@rivosinc.com>
10
Message-ID: <20230727102439.22554-2-yongxuan.wang@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
12
---
13
target/riscv/cpu.h | 2 ++
13
hw/riscv/virt.c | 290 +++++++++++++++++++++++-------------------------
14
target/riscv/cpu_bits.h | 4 ++++
14
1 file changed, 137 insertions(+), 153 deletions(-)
15
target/riscv/csr.c | 25 +++++++++++++++++++++++++
16
target/riscv/machine.c | 1 +
17
4 files changed, 32 insertions(+)
18
15
19
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
16
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.h
18
--- a/hw/riscv/virt.c
22
+++ b/target/riscv/cpu.h
19
+++ b/hw/riscv/virt.c
23
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
20
@@ -XXX,XX +XXX,XX @@ static uint32_t imsic_num_bits(uint32_t count)
24
target_ulong scounteren;
21
return ret;
25
target_ulong mcounteren;
26
27
+ target_ulong mcountinhibit;
28
+
29
target_ulong sscratch;
30
target_ulong mscratch;
31
32
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_bits.h
35
+++ b/target/riscv/cpu_bits.h
36
@@ -XXX,XX +XXX,XX @@
37
#define CSR_MHPMCOUNTER29 0xb1d
38
#define CSR_MHPMCOUNTER30 0xb1e
39
#define CSR_MHPMCOUNTER31 0xb1f
40
+
41
+/* Machine counter-inhibit register */
42
+#define CSR_MCOUNTINHIBIT 0x320
43
+
44
#define CSR_MHPMEVENT3 0x323
45
#define CSR_MHPMEVENT4 0x324
46
#define CSR_MHPMEVENT5 0x325
47
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/riscv/csr.c
50
+++ b/target/riscv/csr.c
51
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mtvec(CPURISCVState *env, int csrno,
52
return RISCV_EXCP_NONE;
53
}
22
}
54
23
55
+static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
24
-static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
56
+ target_ulong *val)
25
- uint32_t *phandle, uint32_t *intc_phandles,
26
- uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
27
+static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
28
+ uint32_t *intc_phandles, uint32_t msi_phandle,
29
+ bool m_mode, uint32_t imsic_guest_bits)
30
{
31
int cpu, socket;
32
char *imsic_name;
33
MachineState *ms = MACHINE(s);
34
int socket_count = riscv_socket_count(ms);
35
- uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
36
+ uint32_t imsic_max_hart_per_socket;
37
uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
38
39
- *msi_m_phandle = (*phandle)++;
40
- *msi_s_phandle = (*phandle)++;
41
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
42
imsic_regs = g_new0(uint32_t, socket_count * 4);
43
44
- /* M-level IMSIC node */
45
for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
46
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
47
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
48
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
49
}
50
- imsic_max_hart_per_socket = 0;
51
- for (socket = 0; socket < socket_count; socket++) {
52
- imsic_addr = memmap[VIRT_IMSIC_M].base +
53
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
54
- imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
55
- imsic_regs[socket * 4 + 0] = 0;
56
- imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
57
- imsic_regs[socket * 4 + 2] = 0;
58
- imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
59
- if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
60
- imsic_max_hart_per_socket = s->soc[socket].num_harts;
61
- }
62
- }
63
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
64
- (unsigned long)memmap[VIRT_IMSIC_M].base);
65
- qemu_fdt_add_subnode(ms->fdt, imsic_name);
66
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
67
- "riscv,imsics");
68
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
69
- FDT_IMSIC_INT_CELLS);
70
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
71
- NULL, 0);
72
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
73
- NULL, 0);
74
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
75
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
76
- qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
77
- socket_count * sizeof(uint32_t) * 4);
78
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
79
- VIRT_IRQCHIP_NUM_MSIS);
80
- if (socket_count > 1) {
81
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
82
- imsic_num_bits(imsic_max_hart_per_socket));
83
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
84
- imsic_num_bits(socket_count));
85
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
86
- IMSIC_MMIO_GROUP_MIN_SHIFT);
87
- }
88
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_m_phandle);
89
-
90
- g_free(imsic_name);
91
92
- /* S-level IMSIC node */
93
- for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
94
- imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
95
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
96
- }
97
- imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
98
imsic_max_hart_per_socket = 0;
99
for (socket = 0; socket < socket_count; socket++) {
100
- imsic_addr = memmap[VIRT_IMSIC_S].base +
101
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
102
+ imsic_addr = base_addr + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
103
imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
104
s->soc[socket].num_harts;
105
imsic_regs[socket * 4 + 0] = 0;
106
@@ -XXX,XX +XXX,XX @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
107
imsic_max_hart_per_socket = s->soc[socket].num_harts;
108
}
109
}
110
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
111
- (unsigned long)memmap[VIRT_IMSIC_S].base);
112
+
113
+ imsic_name = g_strdup_printf("/soc/imsics@%lx", (unsigned long)base_addr);
114
qemu_fdt_add_subnode(ms->fdt, imsic_name);
115
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
116
- "riscv,imsics");
117
+ qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible", "riscv,imsics");
118
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
119
- FDT_IMSIC_INT_CELLS);
120
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
121
- NULL, 0);
122
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
123
- NULL, 0);
124
+ FDT_IMSIC_INT_CELLS);
125
+ qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
126
+ qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
127
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
128
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
129
+ imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
130
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
131
- socket_count * sizeof(uint32_t) * 4);
132
+ socket_count * sizeof(uint32_t) * 4);
133
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
134
- VIRT_IRQCHIP_NUM_MSIS);
135
+ VIRT_IRQCHIP_NUM_MSIS);
136
+
137
if (imsic_guest_bits) {
138
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,guest-index-bits",
139
- imsic_guest_bits);
140
+ imsic_guest_bits);
141
}
142
+
143
if (socket_count > 1) {
144
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
145
- imsic_num_bits(imsic_max_hart_per_socket));
146
+ imsic_num_bits(imsic_max_hart_per_socket));
147
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
148
- imsic_num_bits(socket_count));
149
+ imsic_num_bits(socket_count));
150
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
151
- IMSIC_MMIO_GROUP_MIN_SHIFT);
152
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
153
}
154
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_s_phandle);
155
- g_free(imsic_name);
156
+ qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
157
158
+ g_free(imsic_name);
159
g_free(imsic_regs);
160
g_free(imsic_cells);
161
}
162
163
-static void create_fdt_socket_aplic(RISCVVirtState *s,
164
- const MemMapEntry *memmap, int socket,
165
- uint32_t msi_m_phandle,
166
- uint32_t msi_s_phandle,
167
- uint32_t *phandle,
168
- uint32_t *intc_phandles,
169
- uint32_t *aplic_phandles)
170
+static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
171
+ uint32_t *phandle, uint32_t *intc_phandles,
172
+ uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
57
+{
173
+{
58
+ if (env->priv_ver < PRIV_VERSION_1_11_0) {
174
+ *msi_m_phandle = (*phandle)++;
59
+ return RISCV_EXCP_ILLEGAL_INST;
175
+ *msi_s_phandle = (*phandle)++;
176
+
177
+ if (!kvm_enabled()) {
178
+ /* M-level IMSIC node */
179
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
180
+ *msi_m_phandle, true, 0);
60
+ }
181
+ }
61
+
182
+
62
+ *val = env->mcountinhibit;
183
+ /* S-level IMSIC node */
63
+ return RISCV_EXCP_NONE;
184
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
185
+ *msi_s_phandle, false,
186
+ imsic_num_bits(s->aia_guests + 1));
187
+
64
+}
188
+}
65
+
189
+
66
+static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
190
+static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
67
+ target_ulong val)
191
+ unsigned long aplic_addr, uint32_t aplic_size,
192
+ uint32_t msi_phandle,
193
+ uint32_t *intc_phandles,
194
+ uint32_t aplic_phandle,
195
+ uint32_t aplic_child_phandle,
196
+ bool m_mode)
197
{
198
int cpu;
199
char *aplic_name;
200
uint32_t *aplic_cells;
201
- unsigned long aplic_addr;
202
MachineState *ms = MACHINE(s);
203
- uint32_t aplic_m_phandle, aplic_s_phandle;
204
205
- aplic_m_phandle = (*phandle)++;
206
- aplic_s_phandle = (*phandle)++;
207
aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
208
209
- /* M-level APLIC node */
210
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
211
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
212
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
213
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
214
}
215
- aplic_addr = memmap[VIRT_APLIC_M].base +
216
- (memmap[VIRT_APLIC_M].size * socket);
217
+
218
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
219
qemu_fdt_add_subnode(ms->fdt, aplic_name);
220
qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
221
qemu_fdt_setprop_cell(ms->fdt, aplic_name,
222
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
223
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
224
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
225
+
226
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
227
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
228
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
229
+ aplic_cells,
230
+ s->soc[socket].num_harts * sizeof(uint32_t) * 2);
231
} else {
232
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
233
- msi_m_phandle);
234
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
235
}
236
+
237
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
238
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
239
+ 0x0, aplic_addr, 0x0, aplic_size);
240
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
241
- VIRT_IRQCHIP_NUM_SOURCES);
242
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
243
- aplic_s_phandle);
244
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
245
- aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
246
+ VIRT_IRQCHIP_NUM_SOURCES);
247
+
248
+ if (aplic_child_phandle) {
249
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
250
+ aplic_child_phandle);
251
+ qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
252
+ aplic_child_phandle, 0x1,
253
+ VIRT_IRQCHIP_NUM_SOURCES);
254
+ }
255
+
256
riscv_socket_fdt_write_id(ms, aplic_name, socket);
257
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_m_phandle);
258
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle);
259
+
260
g_free(aplic_name);
261
+ g_free(aplic_cells);
262
+}
263
264
- /* S-level APLIC node */
265
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
266
- aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
267
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
268
+static void create_fdt_socket_aplic(RISCVVirtState *s,
269
+ const MemMapEntry *memmap, int socket,
270
+ uint32_t msi_m_phandle,
271
+ uint32_t msi_s_phandle,
272
+ uint32_t *phandle,
273
+ uint32_t *intc_phandles,
274
+ uint32_t *aplic_phandles)
68
+{
275
+{
69
+ if (env->priv_ver < PRIV_VERSION_1_11_0) {
276
+ char *aplic_name;
70
+ return RISCV_EXCP_ILLEGAL_INST;
277
+ unsigned long aplic_addr;
71
+ }
278
+ MachineState *ms = MACHINE(s);
72
+
279
+ uint32_t aplic_m_phandle, aplic_s_phandle;
73
+ env->mcountinhibit = val;
280
+
74
+ return RISCV_EXCP_NONE;
281
+ aplic_m_phandle = (*phandle)++;
75
+}
282
+ aplic_s_phandle = (*phandle)++;
76
+
283
+
77
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
284
+ if (!kvm_enabled()) {
78
target_ulong *val)
285
+ /* M-level APLIC node */
79
{
286
+ aplic_addr = memmap[VIRT_APLIC_M].base +
80
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
287
+ (memmap[VIRT_APLIC_M].size * socket);
81
[CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_zero },
288
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
82
[CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_zero },
289
+ msi_m_phandle, intc_phandles,
83
290
+ aplic_m_phandle, aplic_s_phandle,
84
+ [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
291
+ true);
85
+ write_mcountinhibit },
292
}
86
+
293
+
87
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
294
+ /* S-level APLIC node */
88
[CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
295
aplic_addr = memmap[VIRT_APLIC_S].base +
89
[CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
296
(memmap[VIRT_APLIC_S].size * socket);
90
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
297
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
91
index XXXXXXX..XXXXXXX 100644
298
+ msi_s_phandle, intc_phandles,
92
--- a/target/riscv/machine.c
299
+ aplic_s_phandle, 0,
93
+++ b/target/riscv/machine.c
300
+ false);
94
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
301
+
95
VMSTATE_UINTTL(env.siselect, RISCVCPU),
302
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
96
VMSTATE_UINTTL(env.scounteren, RISCVCPU),
303
- qemu_fdt_add_subnode(ms->fdt, aplic_name);
97
VMSTATE_UINTTL(env.mcounteren, RISCVCPU),
304
- qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
98
+ VMSTATE_UINTTL(env.mcountinhibit, RISCVCPU),
305
- qemu_fdt_setprop_cell(ms->fdt, aplic_name,
99
VMSTATE_UINTTL(env.sscratch, RISCVCPU),
306
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
100
VMSTATE_UINTTL(env.mscratch, RISCVCPU),
307
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
101
VMSTATE_UINT64(env.mfromhost, RISCVCPU),
308
- if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
309
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
310
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
311
- } else {
312
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
313
- msi_s_phandle);
314
- }
315
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
316
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
317
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
318
- VIRT_IRQCHIP_NUM_SOURCES);
319
- riscv_socket_fdt_write_id(ms, aplic_name, socket);
320
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_s_phandle);
321
322
if (!socket) {
323
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
324
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
325
326
g_free(aplic_name);
327
328
- g_free(aplic_cells);
329
aplic_phandles[socket] = aplic_s_phandle;
330
}
331
332
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
333
int i;
334
hwaddr addr;
335
uint32_t guest_bits;
336
- DeviceState *aplic_m;
337
- bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
338
+ DeviceState *aplic_s = NULL;
339
+ DeviceState *aplic_m = NULL;
340
+ bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
341
342
if (msimode) {
343
- /* Per-socket M-level IMSICs */
344
- addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
345
- for (i = 0; i < hart_count; i++) {
346
- riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
347
- base_hartid + i, true, 1,
348
- VIRT_IRQCHIP_NUM_MSIS);
349
+ if (!kvm_enabled()) {
350
+ /* Per-socket M-level IMSICs */
351
+ addr = memmap[VIRT_IMSIC_M].base +
352
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
353
+ for (i = 0; i < hart_count; i++) {
354
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
355
+ base_hartid + i, true, 1,
356
+ VIRT_IRQCHIP_NUM_MSIS);
357
+ }
358
}
359
360
/* Per-socket S-level IMSICs */
361
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
362
}
363
}
364
365
- /* Per-socket M-level APLIC */
366
- aplic_m = riscv_aplic_create(
367
- memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
368
- memmap[VIRT_APLIC_M].size,
369
- (msimode) ? 0 : base_hartid,
370
- (msimode) ? 0 : hart_count,
371
- VIRT_IRQCHIP_NUM_SOURCES,
372
- VIRT_IRQCHIP_NUM_PRIO_BITS,
373
- msimode, true, NULL);
374
-
375
- if (aplic_m) {
376
- /* Per-socket S-level APLIC */
377
- riscv_aplic_create(
378
- memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
379
- memmap[VIRT_APLIC_S].size,
380
- (msimode) ? 0 : base_hartid,
381
- (msimode) ? 0 : hart_count,
382
- VIRT_IRQCHIP_NUM_SOURCES,
383
- VIRT_IRQCHIP_NUM_PRIO_BITS,
384
- msimode, false, aplic_m);
385
+ if (!kvm_enabled()) {
386
+ /* Per-socket M-level APLIC */
387
+ aplic_m = riscv_aplic_create(memmap[VIRT_APLIC_M].base +
388
+ socket * memmap[VIRT_APLIC_M].size,
389
+ memmap[VIRT_APLIC_M].size,
390
+ (msimode) ? 0 : base_hartid,
391
+ (msimode) ? 0 : hart_count,
392
+ VIRT_IRQCHIP_NUM_SOURCES,
393
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
394
+ msimode, true, NULL);
395
}
396
397
- return aplic_m;
398
+ /* Per-socket S-level APLIC */
399
+ aplic_s = riscv_aplic_create(memmap[VIRT_APLIC_S].base +
400
+ socket * memmap[VIRT_APLIC_S].size,
401
+ memmap[VIRT_APLIC_S].size,
402
+ (msimode) ? 0 : base_hartid,
403
+ (msimode) ? 0 : hart_count,
404
+ VIRT_IRQCHIP_NUM_SOURCES,
405
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
406
+ msimode, false, aplic_m);
407
+
408
+ return kvm_enabled() ? aplic_s : aplic_m;
409
}
410
411
static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
102
--
412
--
103
2.36.1
413
2.41.0
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
We check the in-kernel irqchip support when using KVM acceleration.
4
5
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
6
Reviewed-by: Jim Shu <jim.shu@sifive.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Message-ID: <20230727102439.22554-3-yongxuan.wang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/kvm.c | 10 +++++++++-
13
1 file changed, 9 insertions(+), 1 deletion(-)
14
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/kvm.c
18
+++ b/target/riscv/kvm.c
19
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init(MachineState *ms, KVMState *s)
20
21
int kvm_arch_irqchip_create(KVMState *s)
22
{
23
- return 0;
24
+ if (kvm_kernel_irqchip_split()) {
25
+ error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
26
+ exit(1);
27
+ }
28
+
29
+ /*
30
+ * We can create the VAIA using the newer device control API.
31
+ */
32
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
33
}
34
35
int kvm_arch_process_async_events(CPUState *cs)
36
--
37
2.41.0
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Currently, the predicate function for PMU related CSRs only works if
3
We create a vAIA chip by using the KVM_DEV_TYPE_RISCV_AIA and then set up
4
virtualization is enabled. It also does not check mcounteren bits before
4
the chip with the KVM_DEV_RISCV_AIA_GRP_* APIs.
5
before cycle/minstret/hpmcounterx access.
5
We also extend KVM accelerator to specify the KVM AIA mode. The "riscv-aia"
6
parameter is passed along with --accel in QEMU command-line.
7
1) "riscv-aia=emul": IMSIC is emulated by hypervisor
8
2) "riscv-aia=hwaccel": use hardware guest IMSIC
9
3) "riscv-aia=auto": use the hardware guest IMSICs whenever available
10
otherwise we fallback to software emulation.
6
11
7
Support supervisor mode access in the predicate function as well.
12
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
8
13
Reviewed-by: Jim Shu <jim.shu@sifive.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
15
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Signed-off-by: Atish Patra <atish.patra@wdc.com>
16
Message-ID: <20230727102439.22554-4-yongxuan.wang@sifive.com>
12
Signed-off-by: Atish Patra <atishp@rivosinc.com>
13
Message-Id: <20220620231603.2547260-3-atishp@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
18
---
16
target/riscv/csr.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++
19
target/riscv/kvm_riscv.h | 4 +
17
1 file changed, 51 insertions(+)
20
target/riscv/kvm.c | 186 +++++++++++++++++++++++++++++++++++++++
21
2 files changed, 190 insertions(+)
18
22
19
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
23
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
20
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/csr.c
25
--- a/target/riscv/kvm_riscv.h
22
+++ b/target/riscv/csr.c
26
+++ b/target/riscv/kvm_riscv.h
23
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
27
@@ -XXX,XX +XXX,XX @@
24
return RISCV_EXCP_ILLEGAL_INST;
28
void kvm_riscv_init_user_properties(Object *cpu_obj);
25
}
29
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
26
30
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
27
+ if (env->priv == PRV_S) {
31
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
28
+ switch (csrno) {
32
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
29
+ case CSR_CYCLE:
33
+ uint64_t aplic_base, uint64_t imsic_base,
30
+ if (!get_field(env->mcounteren, COUNTEREN_CY)) {
34
+ uint64_t guest_num);
31
+ return RISCV_EXCP_ILLEGAL_INST;
35
32
+ }
36
#endif
33
+ break;
37
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
34
+ case CSR_TIME:
38
index XXXXXXX..XXXXXXX 100644
35
+ if (!get_field(env->mcounteren, COUNTEREN_TM)) {
39
--- a/target/riscv/kvm.c
36
+ return RISCV_EXCP_ILLEGAL_INST;
40
+++ b/target/riscv/kvm.c
37
+ }
41
@@ -XXX,XX +XXX,XX @@
38
+ break;
42
#include "exec/address-spaces.h"
39
+ case CSR_INSTRET:
43
#include "hw/boards.h"
40
+ if (!get_field(env->mcounteren, COUNTEREN_IR)) {
44
#include "hw/irq.h"
41
+ return RISCV_EXCP_ILLEGAL_INST;
45
+#include "hw/intc/riscv_imsic.h"
42
+ }
46
#include "qemu/log.h"
43
+ break;
47
#include "hw/loader.h"
44
+ case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
48
#include "kvm_riscv.h"
45
+ ctr_index = csrno - CSR_CYCLE;
49
@@ -XXX,XX +XXX,XX @@
46
+ if (!get_field(env->mcounteren, 1 << ctr_index)) {
50
#include "chardev/char-fe.h"
47
+ return RISCV_EXCP_ILLEGAL_INST;
51
#include "migration/migration.h"
48
+ }
52
#include "sysemu/runstate.h"
49
+ break;
53
+#include "hw/riscv/numa.h"
54
55
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
56
uint64_t idx)
57
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_cpu_check_are_resettable(void)
58
return true;
59
}
60
61
+static int aia_mode;
62
+
63
+static const char *kvm_aia_mode_str(uint64_t mode)
64
+{
65
+ switch (mode) {
66
+ case KVM_DEV_RISCV_AIA_MODE_EMUL:
67
+ return "emul";
68
+ case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
69
+ return "hwaccel";
70
+ case KVM_DEV_RISCV_AIA_MODE_AUTO:
71
+ default:
72
+ return "auto";
73
+ };
74
+}
75
+
76
+static char *riscv_get_kvm_aia(Object *obj, Error **errp)
77
+{
78
+ return g_strdup(kvm_aia_mode_str(aia_mode));
79
+}
80
+
81
+static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
82
+{
83
+ if (!strcmp(val, "emul")) {
84
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
85
+ } else if (!strcmp(val, "hwaccel")) {
86
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
87
+ } else if (!strcmp(val, "auto")) {
88
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
89
+ } else {
90
+ error_setg(errp, "Invalid KVM AIA mode");
91
+ error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
92
+ }
93
+}
94
+
95
void kvm_arch_accel_class_init(ObjectClass *oc)
96
{
97
+ object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
98
+ riscv_set_kvm_aia);
99
+ object_class_property_set_description(oc, "riscv-aia",
100
+ "Set KVM AIA mode. Valid values are "
101
+ "emul, hwaccel, and auto. Default "
102
+ "is auto.");
103
+ object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
104
+ "auto");
105
+}
106
+
107
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
108
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
109
+ uint64_t aplic_base, uint64_t imsic_base,
110
+ uint64_t guest_num)
111
+{
112
+ int ret, i;
113
+ int aia_fd = -1;
114
+ uint64_t default_aia_mode;
115
+ uint64_t socket_count = riscv_socket_count(machine);
116
+ uint64_t max_hart_per_socket = 0;
117
+ uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
118
+ uint64_t socket_bits, hart_bits, guest_bits;
119
+
120
+ aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
121
+
122
+ if (aia_fd < 0) {
123
+ error_report("Unable to create in-kernel irqchip");
124
+ exit(1);
125
+ }
126
+
127
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
128
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
129
+ &default_aia_mode, false, NULL);
130
+ if (ret < 0) {
131
+ error_report("KVM AIA: failed to get current KVM AIA mode");
132
+ exit(1);
133
+ }
134
+ qemu_log("KVM AIA: default mode is %s\n",
135
+ kvm_aia_mode_str(default_aia_mode));
136
+
137
+ if (default_aia_mode != aia_mode) {
138
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
139
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
140
+ &aia_mode, true, NULL);
141
+ if (ret < 0)
142
+ warn_report("KVM AIA: failed to set KVM AIA mode");
143
+ else
144
+ qemu_log("KVM AIA: set current mode to %s\n",
145
+ kvm_aia_mode_str(aia_mode));
146
+ }
147
+
148
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
149
+ KVM_DEV_RISCV_AIA_CONFIG_SRCS,
150
+ &aia_irq_num, true, NULL);
151
+ if (ret < 0) {
152
+ error_report("KVM AIA: failed to set number of input irq lines");
153
+ exit(1);
154
+ }
155
+
156
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
157
+ KVM_DEV_RISCV_AIA_CONFIG_IDS,
158
+ &aia_msi_num, true, NULL);
159
+ if (ret < 0) {
160
+ error_report("KVM AIA: failed to set number of msi");
161
+ exit(1);
162
+ }
163
+
164
+ socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
165
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
166
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
167
+ &socket_bits, true, NULL);
168
+ if (ret < 0) {
169
+ error_report("KVM AIA: failed to set group_bits");
170
+ exit(1);
171
+ }
172
+
173
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
174
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
175
+ &group_shift, true, NULL);
176
+ if (ret < 0) {
177
+ error_report("KVM AIA: failed to set group_shift");
178
+ exit(1);
179
+ }
180
+
181
+ guest_bits = guest_num == 0 ? 0 :
182
+ find_last_bit(&guest_num, BITS_PER_LONG) + 1;
183
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
184
+ KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
185
+ &guest_bits, true, NULL);
186
+ if (ret < 0) {
187
+ error_report("KVM AIA: failed to set guest_bits");
188
+ exit(1);
189
+ }
190
+
191
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
192
+ KVM_DEV_RISCV_AIA_ADDR_APLIC,
193
+ &aplic_base, true, NULL);
194
+ if (ret < 0) {
195
+ error_report("KVM AIA: failed to set the base address of APLIC");
196
+ exit(1);
197
+ }
198
+
199
+ for (socket = 0; socket < socket_count; socket++) {
200
+ socket_imsic_base = imsic_base + socket * (1U << group_shift);
201
+ hart_count = riscv_socket_hart_count(machine, socket);
202
+ base_hart = riscv_socket_first_hartid(machine, socket);
203
+
204
+ if (max_hart_per_socket < hart_count) {
205
+ max_hart_per_socket = hart_count;
50
+ }
206
+ }
51
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
207
+
52
+ switch (csrno) {
208
+ for (i = 0; i < hart_count; i++) {
53
+ case CSR_CYCLEH:
209
+ imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
54
+ if (!get_field(env->mcounteren, COUNTEREN_CY)) {
210
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
55
+ return RISCV_EXCP_ILLEGAL_INST;
211
+ KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
56
+ }
212
+ &imsic_addr, true, NULL);
57
+ break;
213
+ if (ret < 0) {
58
+ case CSR_TIMEH:
214
+ error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
59
+ if (!get_field(env->mcounteren, COUNTEREN_TM)) {
215
+ exit(1);
60
+ return RISCV_EXCP_ILLEGAL_INST;
61
+ }
62
+ break;
63
+ case CSR_INSTRETH:
64
+ if (!get_field(env->mcounteren, COUNTEREN_IR)) {
65
+ return RISCV_EXCP_ILLEGAL_INST;
66
+ }
67
+ break;
68
+ case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
69
+ ctr_index = csrno - CSR_CYCLEH;
70
+ if (!get_field(env->mcounteren, 1 << ctr_index)) {
71
+ return RISCV_EXCP_ILLEGAL_INST;
72
+ }
73
+ break;
74
+ }
216
+ }
75
+ }
217
+ }
76
+ }
218
+ }
77
+
219
+
78
if (riscv_cpu_virt_enabled(env)) {
220
+ hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
79
switch (csrno) {
221
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
80
case CSR_CYCLE:
222
+ KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
223
+ &hart_bits, true, NULL);
224
+ if (ret < 0) {
225
+ error_report("KVM AIA: failed to set hart_bits");
226
+ exit(1);
227
+ }
228
+
229
+ if (kvm_has_gsi_routing()) {
230
+ for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
231
+ /* KVM AIA only has one APLIC instance */
232
+ kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
233
+ }
234
+ kvm_gsi_routing_allowed = true;
235
+ kvm_irqchip_commit_routes(kvm_state);
236
+ }
237
+
238
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
239
+ KVM_DEV_RISCV_AIA_CTRL_INIT,
240
+ NULL, true, NULL);
241
+ if (ret < 0) {
242
+ error_report("KVM AIA: initialized fail");
243
+ exit(1);
244
+ }
245
+
246
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
247
}
81
--
248
--
82
2.36.1
249
2.41.0
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
KVM AIA can't emulate APLIC only. When "aia=aplic" parameter is passed,
4
APLIC devices is emulated by QEMU. For "aia=aplic-imsic", remove the
5
mmio operations of APLIC when using KVM AIA and send wired interrupt
6
signal via KVM_IRQ_LINE API.
7
After KVM AIA enabled, MSI messages are delivered by KVM_SIGNAL_MSI API
8
when the IMSICs receive mmio write requests.
9
10
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
11
Reviewed-by: Jim Shu <jim.shu@sifive.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
14
Message-ID: <20230727102439.22554-5-yongxuan.wang@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
hw/intc/riscv_aplic.c | 56 ++++++++++++++++++++++++++++++-------------
18
hw/intc/riscv_imsic.c | 25 +++++++++++++++----
19
2 files changed, 61 insertions(+), 20 deletions(-)
20
21
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/intc/riscv_aplic.c
24
+++ b/hw/intc/riscv_aplic.c
25
@@ -XXX,XX +XXX,XX @@
26
#include "hw/irq.h"
27
#include "target/riscv/cpu.h"
28
#include "sysemu/sysemu.h"
29
+#include "sysemu/kvm.h"
30
#include "migration/vmstate.h"
31
32
#define APLIC_MAX_IDC (1UL << 14)
33
@@ -XXX,XX +XXX,XX @@
34
35
#define APLIC_IDC_CLAIMI 0x1c
36
37
+/*
38
+ * KVM AIA only supports APLIC MSI, fallback to QEMU emulation if we want to use
39
+ * APLIC Wired.
40
+ */
41
+static bool is_kvm_aia(bool msimode)
42
+{
43
+ return kvm_irqchip_in_kernel() && msimode;
44
+}
45
+
46
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
47
uint32_t word)
48
{
49
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
50
return topi;
51
}
52
53
+static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
54
+{
55
+ kvm_set_irq(kvm_state, irq, !!level);
56
+}
57
+
58
static void riscv_aplic_request(void *opaque, int irq, int level)
59
{
60
bool update = false;
61
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
62
uint32_t i;
63
RISCVAPLICState *aplic = RISCV_APLIC(dev);
64
65
- aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
66
- aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
67
- aplic->state = g_new0(uint32_t, aplic->num_irqs);
68
- aplic->target = g_new0(uint32_t, aplic->num_irqs);
69
- if (!aplic->msimode) {
70
- for (i = 0; i < aplic->num_irqs; i++) {
71
- aplic->target[i] = 1;
72
+ if (!is_kvm_aia(aplic->msimode)) {
73
+ aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
74
+ aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
75
+ aplic->state = g_new0(uint32_t, aplic->num_irqs);
76
+ aplic->target = g_new0(uint32_t, aplic->num_irqs);
77
+ if (!aplic->msimode) {
78
+ for (i = 0; i < aplic->num_irqs; i++) {
79
+ aplic->target[i] = 1;
80
+ }
81
}
82
- }
83
- aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
84
- aplic->iforce = g_new0(uint32_t, aplic->num_harts);
85
- aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
86
+ aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
87
+ aplic->iforce = g_new0(uint32_t, aplic->num_harts);
88
+ aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
89
90
- memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic,
91
- TYPE_RISCV_APLIC, aplic->aperture_size);
92
- sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
93
+ memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops,
94
+ aplic, TYPE_RISCV_APLIC, aplic->aperture_size);
95
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
96
+ }
97
98
/*
99
* Only root APLICs have hardware IRQ lines. All non-root APLICs
100
* have IRQ lines delegated by their parent APLIC.
101
*/
102
if (!aplic->parent) {
103
- qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
104
+ if (is_kvm_aia(aplic->msimode)) {
105
+ qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
106
+ } else {
107
+ qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
108
+ }
109
}
110
111
/* Create output IRQ lines for non-MSI mode */
112
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
113
qdev_prop_set_bit(dev, "mmode", mmode);
114
115
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
116
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
117
+
118
+ if (!is_kvm_aia(msimode)) {
119
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
120
+ }
121
122
if (parent) {
123
riscv_aplic_add_child(parent, dev);
124
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/hw/intc/riscv_imsic.c
127
+++ b/hw/intc/riscv_imsic.c
128
@@ -XXX,XX +XXX,XX @@
129
#include "target/riscv/cpu.h"
130
#include "target/riscv/cpu_bits.h"
131
#include "sysemu/sysemu.h"
132
+#include "sysemu/kvm.h"
133
#include "migration/vmstate.h"
134
135
#define IMSIC_MMIO_PAGE_LE 0x00
136
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
137
goto err;
138
}
139
140
+#if defined(CONFIG_KVM)
141
+ if (kvm_irqchip_in_kernel()) {
142
+ struct kvm_msi msi;
143
+
144
+ msi.address_lo = extract64(imsic->mmio.addr + addr, 0, 32);
145
+ msi.address_hi = extract64(imsic->mmio.addr + addr, 32, 32);
146
+ msi.data = le32_to_cpu(value);
147
+
148
+ kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
149
+
150
+ return;
151
+ }
152
+#endif
153
+
154
/* Writes only supported for MSI little-endian registers */
155
page = addr >> IMSIC_MMIO_PAGE_SHIFT;
156
if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
157
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
158
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
159
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
160
161
- imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
162
- imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
163
- imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
164
- imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
165
+ if (!kvm_irqchip_in_kernel()) {
166
+ imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
167
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
168
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
169
+ imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
170
+ }
171
172
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
173
imsic, TYPE_RISCV_IMSIC,
174
--
175
2.41.0
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
Select KVM AIA when the host kernel has in-kernel AIA chip support.
4
Since KVM AIA only has one APLIC instance, we map the QEMU APLIC
5
devices to KVM APLIC.
6
7
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
8
Reviewed-by: Jim Shu <jim.shu@sifive.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Message-ID: <20230727102439.22554-6-yongxuan.wang@sifive.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
hw/riscv/virt.c | 94 +++++++++++++++++++++++++++++++++----------------
15
1 file changed, 63 insertions(+), 31 deletions(-)
16
17
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/riscv/virt.c
20
+++ b/hw/riscv/virt.c
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/riscv/virt.h"
23
#include "hw/riscv/boot.h"
24
#include "hw/riscv/numa.h"
25
+#include "kvm_riscv.h"
26
#include "hw/intc/riscv_aclint.h"
27
#include "hw/intc/riscv_aplic.h"
28
#include "hw/intc/riscv_imsic.h"
29
@@ -XXX,XX +XXX,XX @@
30
#error "Can't accommodate all IMSIC groups in address space"
31
#endif
32
33
+/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
34
+static bool virt_use_kvm_aia(RISCVVirtState *s)
35
+{
36
+ return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
37
+}
38
+
39
static const MemMapEntry virt_memmap[] = {
40
[VIRT_DEBUG] = { 0x0, 0x100 },
41
[VIRT_MROM] = { 0x1000, 0xf000 },
42
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
43
uint32_t *intc_phandles,
44
uint32_t aplic_phandle,
45
uint32_t aplic_child_phandle,
46
- bool m_mode)
47
+ bool m_mode, int num_harts)
48
{
49
int cpu;
50
char *aplic_name;
51
uint32_t *aplic_cells;
52
MachineState *ms = MACHINE(s);
53
54
- aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
55
+ aplic_cells = g_new0(uint32_t, num_harts * 2);
56
57
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
58
+ for (cpu = 0; cpu < num_harts; cpu++) {
59
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
60
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
61
}
62
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
63
64
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
65
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
66
- aplic_cells,
67
- s->soc[socket].num_harts * sizeof(uint32_t) * 2);
68
+ aplic_cells, num_harts * sizeof(uint32_t) * 2);
69
} else {
70
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
71
}
72
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
73
uint32_t msi_s_phandle,
74
uint32_t *phandle,
75
uint32_t *intc_phandles,
76
- uint32_t *aplic_phandles)
77
+ uint32_t *aplic_phandles,
78
+ int num_harts)
79
{
80
char *aplic_name;
81
unsigned long aplic_addr;
82
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
83
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
84
msi_m_phandle, intc_phandles,
85
aplic_m_phandle, aplic_s_phandle,
86
- true);
87
+ true, num_harts);
88
}
89
90
/* S-level APLIC node */
91
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
92
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
93
msi_s_phandle, intc_phandles,
94
aplic_s_phandle, 0,
95
- false);
96
+ false, num_harts);
97
98
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
99
100
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
101
*msi_pcie_phandle = msi_s_phandle;
102
}
103
104
- phandle_pos = ms->smp.cpus;
105
- for (socket = (socket_count - 1); socket >= 0; socket--) {
106
- phandle_pos -= s->soc[socket].num_harts;
107
-
108
- if (s->aia_type == VIRT_AIA_TYPE_NONE) {
109
- create_fdt_socket_plic(s, memmap, socket, phandle,
110
- &intc_phandles[phandle_pos], xplic_phandles);
111
- } else {
112
- create_fdt_socket_aplic(s, memmap, socket,
113
- msi_m_phandle, msi_s_phandle, phandle,
114
- &intc_phandles[phandle_pos], xplic_phandles);
115
+ /* KVM AIA only has one APLIC instance */
116
+ if (virt_use_kvm_aia(s)) {
117
+ create_fdt_socket_aplic(s, memmap, 0,
118
+ msi_m_phandle, msi_s_phandle, phandle,
119
+ &intc_phandles[0], xplic_phandles,
120
+ ms->smp.cpus);
121
+ } else {
122
+ phandle_pos = ms->smp.cpus;
123
+ for (socket = (socket_count - 1); socket >= 0; socket--) {
124
+ phandle_pos -= s->soc[socket].num_harts;
125
+
126
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
127
+ create_fdt_socket_plic(s, memmap, socket, phandle,
128
+ &intc_phandles[phandle_pos],
129
+ xplic_phandles);
130
+ } else {
131
+ create_fdt_socket_aplic(s, memmap, socket,
132
+ msi_m_phandle, msi_s_phandle, phandle,
133
+ &intc_phandles[phandle_pos],
134
+ xplic_phandles,
135
+ s->soc[socket].num_harts);
136
+ }
137
}
138
}
139
140
g_free(intc_phandles);
141
142
- for (socket = 0; socket < socket_count; socket++) {
143
- if (socket == 0) {
144
- *irq_mmio_phandle = xplic_phandles[socket];
145
- *irq_virtio_phandle = xplic_phandles[socket];
146
- *irq_pcie_phandle = xplic_phandles[socket];
147
- }
148
- if (socket == 1) {
149
- *irq_virtio_phandle = xplic_phandles[socket];
150
- *irq_pcie_phandle = xplic_phandles[socket];
151
- }
152
- if (socket == 2) {
153
- *irq_pcie_phandle = xplic_phandles[socket];
154
+ if (virt_use_kvm_aia(s)) {
155
+ *irq_mmio_phandle = xplic_phandles[0];
156
+ *irq_virtio_phandle = xplic_phandles[0];
157
+ *irq_pcie_phandle = xplic_phandles[0];
158
+ } else {
159
+ for (socket = 0; socket < socket_count; socket++) {
160
+ if (socket == 0) {
161
+ *irq_mmio_phandle = xplic_phandles[socket];
162
+ *irq_virtio_phandle = xplic_phandles[socket];
163
+ *irq_pcie_phandle = xplic_phandles[socket];
164
+ }
165
+ if (socket == 1) {
166
+ *irq_virtio_phandle = xplic_phandles[socket];
167
+ *irq_pcie_phandle = xplic_phandles[socket];
168
+ }
169
+ if (socket == 2) {
170
+ *irq_pcie_phandle = xplic_phandles[socket];
171
+ }
172
}
173
}
174
175
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
176
}
177
}
178
179
+ if (virt_use_kvm_aia(s)) {
180
+ kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
181
+ VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
182
+ memmap[VIRT_APLIC_S].base,
183
+ memmap[VIRT_IMSIC_S].base,
184
+ s->aia_guests);
185
+ }
186
+
187
if (riscv_is_32bit(&s->soc[0])) {
188
#if HOST_LONG_BITS == 64
189
/* limit RAM size in a 32-bit system */
190
--
191
2.41.0
diff view generated by jsdifflib
New patch
1
From: Conor Dooley <conor.dooley@microchip.com>
1
2
3
On a dtb dumped from the virt machine, dt-validate complains:
4
soc: pmu: {'riscv,event-to-mhpmcounters': [[1, 1, 524281], [2, 2, 524284], [65561, 65561, 524280], [65563, 65563, 524280], [65569, 65569, 524280]], 'compatible': ['riscv,pmu']} should not be valid under {'type': 'object'}
5
from schema $id: http://devicetree.org/schemas/simple-bus.yaml#
6
That's pretty cryptic, but running the dtb back through dtc produces
7
something a lot more reasonable:
8
Warning (simple_bus_reg): /soc/pmu: missing or empty reg/ranges property
9
10
Moving the riscv,pmu node out of the soc bus solves the problem.
11
12
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Message-ID: <20230727-groom-decline-2c57ce42841c@spud>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
hw/riscv/virt.c | 2 +-
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
21
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/riscv/virt.c
24
+++ b/hw/riscv/virt.c
25
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pmu(RISCVVirtState *s)
26
MachineState *ms = MACHINE(s);
27
RISCVCPU hart = s->soc[0].harts[0];
28
29
- pmu_name = g_strdup_printf("/soc/pmu");
30
+ pmu_name = g_strdup_printf("/pmu");
31
qemu_fdt_add_subnode(ms->fdt, pmu_name);
32
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
33
riscv_pmu_generate_fdt_node(ms->fdt, hart.cfg.pmu_num, pmu_name);
34
--
35
2.41.0
diff view generated by jsdifflib
1
From: Anup Patel <apatel@ventanamicro.com>
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
2
3
The latest AIA draft v0.3.0 defines a relatively simpler scheme for
3
The Svadu specification updated the name of the *envcfg bit from
4
default priority assignments where:
4
HADE to ADUE.
5
1) local interrupts 24 to 31 and 48 to 63 are reserved for custom use
6
and have implementation specific default priority.
7
2) remaining local interrupts 0 to 23 and 32 to 47 have a recommended
8
(not mandatory) priority assignments.
9
5
10
We update the default priority table and hviprio mapping as-per above.
6
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
7
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
12
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20230816141916.66898-1-liweiwei@iscas.ac.cn>
14
Message-Id: <20220616031543.953776-3-apatel@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
11
---
17
target/riscv/cpu_bits.h | 2 +-
12
target/riscv/cpu_bits.h | 8 ++++----
18
target/riscv/cpu_helper.c | 134 ++++++++++++++++++--------------------
13
target/riscv/cpu.c | 4 ++--
19
2 files changed, 66 insertions(+), 70 deletions(-)
14
target/riscv/cpu_helper.c | 6 +++---
15
target/riscv/csr.c | 12 ++++++------
16
4 files changed, 15 insertions(+), 15 deletions(-)
20
17
21
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
18
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
22
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu_bits.h
20
--- a/target/riscv/cpu_bits.h
24
+++ b/target/riscv/cpu_bits.h
21
+++ b/target/riscv/cpu_bits.h
25
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
22
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
26
#define IPRIO_IRQ_BITS 8
23
#define MENVCFG_CBIE (3UL << 4)
27
#define IPRIO_MMAXIPRIO 255
24
#define MENVCFG_CBCFE BIT(6)
28
#define IPRIO_DEFAULT_UPPER 4
25
#define MENVCFG_CBZE BIT(7)
29
-#define IPRIO_DEFAULT_MIDDLE (IPRIO_DEFAULT_UPPER + 24)
26
-#define MENVCFG_HADE (1ULL << 61)
30
+#define IPRIO_DEFAULT_MIDDLE (IPRIO_DEFAULT_UPPER + 12)
27
+#define MENVCFG_ADUE (1ULL << 61)
31
#define IPRIO_DEFAULT_M IPRIO_DEFAULT_MIDDLE
28
#define MENVCFG_PBMTE (1ULL << 62)
32
#define IPRIO_DEFAULT_S (IPRIO_DEFAULT_M + 3)
29
#define MENVCFG_STCE (1ULL << 63)
33
#define IPRIO_DEFAULT_SGEXT (IPRIO_DEFAULT_S + 3)
30
31
/* For RV32 */
32
-#define MENVCFGH_HADE BIT(29)
33
+#define MENVCFGH_ADUE BIT(29)
34
#define MENVCFGH_PBMTE BIT(30)
35
#define MENVCFGH_STCE BIT(31)
36
37
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
38
#define HENVCFG_CBIE MENVCFG_CBIE
39
#define HENVCFG_CBCFE MENVCFG_CBCFE
40
#define HENVCFG_CBZE MENVCFG_CBZE
41
-#define HENVCFG_HADE MENVCFG_HADE
42
+#define HENVCFG_ADUE MENVCFG_ADUE
43
#define HENVCFG_PBMTE MENVCFG_PBMTE
44
#define HENVCFG_STCE MENVCFG_STCE
45
46
/* For RV32 */
47
-#define HENVCFGH_HADE MENVCFGH_HADE
48
+#define HENVCFGH_ADUE MENVCFGH_ADUE
49
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
50
#define HENVCFGH_STCE MENVCFGH_STCE
51
52
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/riscv/cpu.c
55
+++ b/target/riscv/cpu.c
56
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
57
env->two_stage_lookup = false;
58
59
env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
60
- (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
61
+ (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
62
env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
63
- (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
64
+ (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
65
66
/* Initialized default priorities of local interrupts. */
67
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
34
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
68
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
35
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/cpu_helper.c
70
--- a/target/riscv/cpu_helper.c
37
+++ b/target/riscv/cpu_helper.c
71
+++ b/target/riscv/cpu_helper.c
38
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_update_mask(CPURISCVState *env)
72
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
39
* 14 "
73
}
40
* 15 "
74
41
* 16 "
75
bool pbmte = env->menvcfg & MENVCFG_PBMTE;
42
- * 18 Debug/trace interrupt
76
- bool hade = env->menvcfg & MENVCFG_HADE;
43
- * 20 (Reserved interrupt)
77
+ bool adue = env->menvcfg & MENVCFG_ADUE;
44
+ * 17 "
78
45
+ * 18 "
79
if (first_stage && two_stage && env->virt_enabled) {
46
+ * 19 "
80
pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
47
+ * 20 "
81
- hade = hade && (env->henvcfg & HENVCFG_HADE);
48
+ * 21 "
82
+ adue = adue && (env->henvcfg & HENVCFG_ADUE);
49
* 22 "
83
}
50
- * 24 "
84
51
- * 26 "
85
int ptshift = (levels - 1) * ptidxbits;
52
- * 28 "
86
@@ -XXX,XX +XXX,XX @@ restart:
53
- * 30 (Reserved for standard reporting of bus or system errors)
87
54
+ * 23 "
88
/* Page table updates need to be atomic with MTTCG enabled */
55
*/
89
if (updated_pte != pte && !is_debug) {
56
90
- if (!hade) {
57
static const int hviprio_index2irq[] = {
91
+ if (!adue) {
58
- 0, 1, 4, 5, 8, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30 };
92
return TRANSLATE_FAIL;
59
+ 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
93
}
60
static const int hviprio_index2rdzero[] = {
94
61
1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
95
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
62
96
index XXXXXXX..XXXXXXX 100644
63
@@ -XXX,XX +XXX,XX @@ int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
97
--- a/target/riscv/csr.c
64
* Default |
98
+++ b/target/riscv/csr.c
65
* Priority | Major Interrupt Numbers
99
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
66
* ----------------------------------------------------------------
100
if (riscv_cpu_mxl(env) == MXL_RV64) {
67
- * Highest | 63 (3f), 62 (3e), 31 (1f), 30 (1e), 61 (3d), 60 (3c),
101
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
68
- * | 59 (3b), 58 (3a), 29 (1d), 28 (1c), 57 (39), 56 (38),
102
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
69
- * | 55 (37), 54 (36), 27 (1b), 26 (1a), 53 (35), 52 (34),
103
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
70
- * | 51 (33), 50 (32), 25 (19), 24 (18), 49 (31), 48 (30)
104
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
71
+ * Highest | 47, 23, 46, 45, 22, 44,
105
}
72
+ * | 43, 21, 42, 41, 20, 40
106
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
73
* |
107
74
* | 11 (0b), 3 (03), 7 (07)
108
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
75
* | 9 (09), 1 (01), 5 (05)
109
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
76
* | 12 (0c)
110
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
77
* | 10 (0a), 2 (02), 6 (06)
111
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
78
* |
112
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
79
- * | 47 (2f), 46 (2e), 23 (17), 22 (16), 45 (2d), 44 (2c),
113
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
80
- * | 43 (2b), 42 (2a), 21 (15), 20 (14), 41 (29), 40 (28),
114
uint64_t valh = (uint64_t)val << 32;
81
- * | 39 (27), 38 (26), 19 (13), 18 (12), 37 (25), 36 (24),
115
82
- * Lowest | 35 (23), 34 (22), 17 (11), 16 (10), 33 (21), 32 (20)
116
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
83
+ * | 39, 19, 38, 37, 18, 36,
117
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
84
+ * Lowest | 35, 17, 34, 33, 16, 32
118
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
85
* ----------------------------------------------------------------
119
* henvcfg.hade is read_only 0 when menvcfg.hade = 0
86
*/
120
*/
87
static const uint8_t default_iprio[64] = {
121
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
88
- [63] = IPRIO_DEFAULT_UPPER,
122
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
89
- [62] = IPRIO_DEFAULT_UPPER + 1,
123
env->menvcfg);
90
- [31] = IPRIO_DEFAULT_UPPER + 2,
124
return RISCV_EXCP_NONE;
91
- [30] = IPRIO_DEFAULT_UPPER + 3,
125
}
92
- [61] = IPRIO_DEFAULT_UPPER + 4,
126
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
93
- [60] = IPRIO_DEFAULT_UPPER + 5,
127
}
94
-
128
95
- [59] = IPRIO_DEFAULT_UPPER + 6,
129
if (riscv_cpu_mxl(env) == MXL_RV64) {
96
- [58] = IPRIO_DEFAULT_UPPER + 7,
130
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE);
97
- [29] = IPRIO_DEFAULT_UPPER + 8,
131
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
98
- [28] = IPRIO_DEFAULT_UPPER + 9,
132
}
99
- [57] = IPRIO_DEFAULT_UPPER + 10,
133
100
- [56] = IPRIO_DEFAULT_UPPER + 11,
134
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
101
-
135
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
102
- [55] = IPRIO_DEFAULT_UPPER + 12,
136
return ret;
103
- [54] = IPRIO_DEFAULT_UPPER + 13,
137
}
104
- [27] = IPRIO_DEFAULT_UPPER + 14,
138
105
- [26] = IPRIO_DEFAULT_UPPER + 15,
139
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
106
- [53] = IPRIO_DEFAULT_UPPER + 16,
140
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
107
- [52] = IPRIO_DEFAULT_UPPER + 17,
141
env->menvcfg)) >> 32;
108
-
142
return RISCV_EXCP_NONE;
109
- [51] = IPRIO_DEFAULT_UPPER + 18,
143
}
110
- [50] = IPRIO_DEFAULT_UPPER + 19,
144
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
111
- [25] = IPRIO_DEFAULT_UPPER + 20,
145
target_ulong val)
112
- [24] = IPRIO_DEFAULT_UPPER + 21,
146
{
113
- [49] = IPRIO_DEFAULT_UPPER + 22,
147
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
114
- [48] = IPRIO_DEFAULT_UPPER + 23,
148
- HENVCFG_HADE);
115
+ /* Custom interrupts 48 to 63 */
149
+ HENVCFG_ADUE);
116
+ [63] = IPRIO_MMAXIPRIO,
150
uint64_t valh = (uint64_t)val << 32;
117
+ [62] = IPRIO_MMAXIPRIO,
151
RISCVException ret;
118
+ [61] = IPRIO_MMAXIPRIO,
152
119
+ [60] = IPRIO_MMAXIPRIO,
120
+ [59] = IPRIO_MMAXIPRIO,
121
+ [58] = IPRIO_MMAXIPRIO,
122
+ [57] = IPRIO_MMAXIPRIO,
123
+ [56] = IPRIO_MMAXIPRIO,
124
+ [55] = IPRIO_MMAXIPRIO,
125
+ [54] = IPRIO_MMAXIPRIO,
126
+ [53] = IPRIO_MMAXIPRIO,
127
+ [52] = IPRIO_MMAXIPRIO,
128
+ [51] = IPRIO_MMAXIPRIO,
129
+ [50] = IPRIO_MMAXIPRIO,
130
+ [49] = IPRIO_MMAXIPRIO,
131
+ [48] = IPRIO_MMAXIPRIO,
132
+
133
+ /* Custom interrupts 24 to 31 */
134
+ [31] = IPRIO_MMAXIPRIO,
135
+ [30] = IPRIO_MMAXIPRIO,
136
+ [29] = IPRIO_MMAXIPRIO,
137
+ [28] = IPRIO_MMAXIPRIO,
138
+ [27] = IPRIO_MMAXIPRIO,
139
+ [26] = IPRIO_MMAXIPRIO,
140
+ [25] = IPRIO_MMAXIPRIO,
141
+ [24] = IPRIO_MMAXIPRIO,
142
+
143
+ [47] = IPRIO_DEFAULT_UPPER,
144
+ [23] = IPRIO_DEFAULT_UPPER + 1,
145
+ [46] = IPRIO_DEFAULT_UPPER + 2,
146
+ [45] = IPRIO_DEFAULT_UPPER + 3,
147
+ [22] = IPRIO_DEFAULT_UPPER + 4,
148
+ [44] = IPRIO_DEFAULT_UPPER + 5,
149
+
150
+ [43] = IPRIO_DEFAULT_UPPER + 6,
151
+ [21] = IPRIO_DEFAULT_UPPER + 7,
152
+ [42] = IPRIO_DEFAULT_UPPER + 8,
153
+ [41] = IPRIO_DEFAULT_UPPER + 9,
154
+ [20] = IPRIO_DEFAULT_UPPER + 10,
155
+ [40] = IPRIO_DEFAULT_UPPER + 11,
156
157
[11] = IPRIO_DEFAULT_M,
158
[3] = IPRIO_DEFAULT_M + 1,
159
@@ -XXX,XX +XXX,XX @@ static const uint8_t default_iprio[64] = {
160
[2] = IPRIO_DEFAULT_VS + 1,
161
[6] = IPRIO_DEFAULT_VS + 2,
162
163
- [47] = IPRIO_DEFAULT_LOWER,
164
- [46] = IPRIO_DEFAULT_LOWER + 1,
165
- [23] = IPRIO_DEFAULT_LOWER + 2,
166
- [22] = IPRIO_DEFAULT_LOWER + 3,
167
- [45] = IPRIO_DEFAULT_LOWER + 4,
168
- [44] = IPRIO_DEFAULT_LOWER + 5,
169
-
170
- [43] = IPRIO_DEFAULT_LOWER + 6,
171
- [42] = IPRIO_DEFAULT_LOWER + 7,
172
- [21] = IPRIO_DEFAULT_LOWER + 8,
173
- [20] = IPRIO_DEFAULT_LOWER + 9,
174
- [41] = IPRIO_DEFAULT_LOWER + 10,
175
- [40] = IPRIO_DEFAULT_LOWER + 11,
176
-
177
- [39] = IPRIO_DEFAULT_LOWER + 12,
178
- [38] = IPRIO_DEFAULT_LOWER + 13,
179
- [19] = IPRIO_DEFAULT_LOWER + 14,
180
- [18] = IPRIO_DEFAULT_LOWER + 15,
181
- [37] = IPRIO_DEFAULT_LOWER + 16,
182
- [36] = IPRIO_DEFAULT_LOWER + 17,
183
-
184
- [35] = IPRIO_DEFAULT_LOWER + 18,
185
- [34] = IPRIO_DEFAULT_LOWER + 19,
186
- [17] = IPRIO_DEFAULT_LOWER + 20,
187
- [16] = IPRIO_DEFAULT_LOWER + 21,
188
- [33] = IPRIO_DEFAULT_LOWER + 22,
189
- [32] = IPRIO_DEFAULT_LOWER + 23,
190
+ [39] = IPRIO_DEFAULT_LOWER,
191
+ [19] = IPRIO_DEFAULT_LOWER + 1,
192
+ [38] = IPRIO_DEFAULT_LOWER + 2,
193
+ [37] = IPRIO_DEFAULT_LOWER + 3,
194
+ [18] = IPRIO_DEFAULT_LOWER + 4,
195
+ [36] = IPRIO_DEFAULT_LOWER + 5,
196
+
197
+ [35] = IPRIO_DEFAULT_LOWER + 6,
198
+ [17] = IPRIO_DEFAULT_LOWER + 7,
199
+ [34] = IPRIO_DEFAULT_LOWER + 8,
200
+ [33] = IPRIO_DEFAULT_LOWER + 9,
201
+ [16] = IPRIO_DEFAULT_LOWER + 10,
202
+ [32] = IPRIO_DEFAULT_LOWER + 11,
203
};
204
205
uint8_t riscv_cpu_default_priority(int irq)
206
--
153
--
207
2.36.1
154
2.41.0
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
In the same emulated RISC-V host, the 'host' KVM CPU takes 4 times
4
longer to boot than the 'rv64' KVM CPU.
5
6
The reason is an unintended behavior of riscv_cpu_satp_mode_finalize()
7
when satp_mode.supported = 0, i.e. when cpu_init() does not set
8
satp_mode_max_supported(). satp_mode_max_from_map(map) does:
9
10
31 - __builtin_clz(map)
11
12
This means that, if satp_mode.supported = 0, satp_mode_supported_max
13
wil be '31 - 32'. But this is C, so satp_mode_supported_max will gladly
14
set it to UINT_MAX (4294967295). After that, if the user didn't set a
15
satp_mode, set_satp_mode_default_map(cpu) will make
16
17
cfg.satp_mode.map = cfg.satp_mode.supported
18
19
So satp_mode.map = 0. And then satp_mode_map_max will be set to
20
satp_mode_max_from_map(cpu->cfg.satp_mode.map), i.e. also UINT_MAX. The
21
guard "satp_mode_map_max > satp_mode_supported_max" doesn't protect us
22
here since both are UINT_MAX.
23
24
And finally we have 2 loops:
25
26
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
27
28
Which are, in fact, 2 loops from UINT_MAX -1 to -1. This is where the
29
extra delay when booting the 'host' CPU is coming from.
30
31
Commit 43d1de32f8 already set a precedence for satp_mode.supported = 0
32
in a different manner. We're doing the same here. If supported == 0,
33
interpret as 'the CPU wants the OS to handle satp mode alone' and skip
34
satp_mode_finalize().
35
36
We'll also put a guard in satp_mode_max_from_map() to assert out if map
37
is 0 since the function is not ready to deal with it.
38
39
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
40
Fixes: 6f23aaeb9b ("riscv: Allow user to set the satp mode")
41
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
42
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
43
Message-ID: <20230817152903.694926-1-dbarboza@ventanamicro.com>
44
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
45
---
46
target/riscv/cpu.c | 23 ++++++++++++++++++++---
47
1 file changed, 20 insertions(+), 3 deletions(-)
48
49
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/riscv/cpu.c
52
+++ b/target/riscv/cpu.c
53
@@ -XXX,XX +XXX,XX @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
54
55
uint8_t satp_mode_max_from_map(uint32_t map)
56
{
57
+ /*
58
+ * 'map = 0' will make us return (31 - 32), which C will
59
+ * happily overflow to UINT_MAX. There's no good result to
60
+ * return if 'map = 0' (e.g. returning 0 will be ambiguous
61
+ * with the result for 'map = 1').
62
+ *
63
+ * Assert out if map = 0. Callers will have to deal with
64
+ * it outside of this function.
65
+ */
66
+ g_assert(map > 0);
67
+
68
/* map here has at least one bit set, so no problem with clz */
69
return 31 - __builtin_clz(map);
70
}
71
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
72
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
73
{
74
bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
75
- uint8_t satp_mode_map_max;
76
- uint8_t satp_mode_supported_max =
77
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
78
+ uint8_t satp_mode_map_max, satp_mode_supported_max;
79
+
80
+ /* The CPU wants the OS to decide which satp mode to use */
81
+ if (cpu->cfg.satp_mode.supported == 0) {
82
+ return;
83
+ }
84
+
85
+ satp_mode_supported_max =
86
+ satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
87
88
if (cpu->cfg.satp_mode.map == 0) {
89
if (cpu->cfg.satp_mode.init == 0) {
90
--
91
2.41.0
diff view generated by jsdifflib
1
From: Alistair Francis <alistair.francis@wdc.com>
1
From: Vineet Gupta <vineetg@rivosinc.com>
2
2
3
The Ibex CPU supports version 1.11 of the priv spec [1], so let's
3
zicond is now codegen supported in both llvm and gcc.
4
correct that in QEMU as well.
5
4
6
1: https://ibex-core.readthedocs.io/en/latest/01_overview/compliance.html
5
This change allows seamless enabling/testing of zicond in downstream
6
projects. e.g. currently riscv-gnu-toolchain parses elf attributes
7
to create a cmdline for qemu but fails short of enabling it because of
8
the "x-" prefix.
7
9
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Vineet Gupta <vineetg@rivosinc.com>
9
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
11
Message-ID: <20230808181715.436395-1-vineetg@rivosinc.com>
10
Message-Id: <20220629233102.275181-3-alistair.francis@opensource.wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
14
---
13
target/riscv/cpu.c | 2 +-
15
target/riscv/cpu.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
16
1 file changed, 1 insertion(+), 1 deletion(-)
15
17
16
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.c
20
--- a/target/riscv/cpu.c
19
+++ b/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
20
@@ -XXX,XX +XXX,XX @@ static void rv32_ibex_cpu_init(Object *obj)
22
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
21
RISCVCPU *cpu = RISCV_CPU(obj);
23
DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
22
24
DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
23
set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
25
DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
24
- set_priv_version(env, PRIV_VERSION_1_10_0);
26
+ DEFINE_PROP_BOOL("zicond", RISCVCPU, cfg.ext_zicond, false),
25
+ set_priv_version(env, PRIV_VERSION_1_11_0);
27
26
cpu->cfg.mmu = false;
28
/* Vendor-specific custom extensions */
27
cpu->cfg.epmp = true;
29
DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
28
}
30
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
31
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
32
33
/* These are experimental so mark with 'x-' */
34
- DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
35
36
/* ePMP 0.9.3 */
37
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
29
--
38
--
30
2.36.1
39
2.41.0
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
A build with --enable-debug and without KVM will fail as follows:
4
5
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_riscv_virt.c.o: in function `virt_machine_init':
6
./qemu/build/../hw/riscv/virt.c:1465: undefined reference to `kvm_riscv_aia_create'
7
8
This happens because the code block with "if virt_use_kvm_aia(s)" isn't
9
being ignored by the debug build, resulting in an undefined reference to
10
a KVM only function.
11
12
Add a 'kvm_enabled()' conditional together with virt_use_kvm_aia() will
13
make the compiler crop the kvm_riscv_aia_create() call entirely from a
14
non-KVM build. Note that adding the 'kvm_enabled()' conditional inside
15
virt_use_kvm_aia() won't fix the build because this function would need
16
to be inlined multiple times to make the compiler zero out the entire
17
block.
18
19
While we're at it, use kvm_enabled() in all instances where
20
virt_use_kvm_aia() is checked to allow the compiler to elide these other
21
kvm-only instances as well.
22
23
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
24
Fixes: dbdb99948e ("target/riscv: select KVM AIA in riscv virt machine")
25
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
26
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
27
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-ID: <20230830133503.711138-2-dbarboza@ventanamicro.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
31
---
32
hw/riscv/virt.c | 6 +++---
33
1 file changed, 3 insertions(+), 3 deletions(-)
34
35
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/hw/riscv/virt.c
38
+++ b/hw/riscv/virt.c
39
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
40
}
41
42
/* KVM AIA only has one APLIC instance */
43
- if (virt_use_kvm_aia(s)) {
44
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
45
create_fdt_socket_aplic(s, memmap, 0,
46
msi_m_phandle, msi_s_phandle, phandle,
47
&intc_phandles[0], xplic_phandles,
48
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
49
50
g_free(intc_phandles);
51
52
- if (virt_use_kvm_aia(s)) {
53
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
54
*irq_mmio_phandle = xplic_phandles[0];
55
*irq_virtio_phandle = xplic_phandles[0];
56
*irq_pcie_phandle = xplic_phandles[0];
57
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
58
}
59
}
60
61
- if (virt_use_kvm_aia(s)) {
62
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
63
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
64
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
65
memmap[VIRT_APLIC_S].base,
66
--
67
2.41.0
68
69
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Commit 6df0b37e2ab breaks a --enable-debug build in a non-KVM
4
environment with the following error:
5
6
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_intc_riscv_aplic.c.o: in function `riscv_kvm_aplic_request':
7
./qemu/build/../hw/intc/riscv_aplic.c:486: undefined reference to `kvm_set_irq'
8
collect2: error: ld returned 1 exit status
9
10
This happens because the debug build will poke into the
11
'if (is_kvm_aia(aplic->msimode))' block and fail to find a reference to
12
the KVM only function riscv_kvm_aplic_request().
13
14
There are multiple solutions to fix this. We'll go with the same
15
solution from the previous patch, i.e. add a kvm_enabled() conditional
16
to filter out the block. But there's a catch: riscv_kvm_aplic_request()
17
is a local function that would end up being used if the compiler crops
18
the block, and this won't work. Quoting Richard Henderson's explanation
19
in [1]:
20
21
"(...) the compiler won't eliminate entire unused functions with -O0"
22
23
We'll solve it by moving riscv_kvm_aplic_request() to kvm.c and add its
24
declaration in kvm_riscv.h, where all other KVM specific public
25
functions are already declared. Other archs handles KVM specific code in
26
this manner and we expect to do the same from now on.
27
28
[1] https://lore.kernel.org/qemu-riscv/d2f1ad02-eb03-138f-9d08-db676deeed05@linaro.org/
29
30
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
32
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Message-ID: <20230830133503.711138-3-dbarboza@ventanamicro.com>
35
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
36
---
37
target/riscv/kvm_riscv.h | 1 +
38
hw/intc/riscv_aplic.c | 8 ++------
39
target/riscv/kvm.c | 5 +++++
40
3 files changed, 8 insertions(+), 6 deletions(-)
41
42
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/kvm_riscv.h
45
+++ b/target/riscv/kvm_riscv.h
46
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
47
uint64_t aia_irq_num, uint64_t aia_msi_num,
48
uint64_t aplic_base, uint64_t imsic_base,
49
uint64_t guest_num);
50
+void riscv_kvm_aplic_request(void *opaque, int irq, int level);
51
52
#endif
53
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/intc/riscv_aplic.c
56
+++ b/hw/intc/riscv_aplic.c
57
@@ -XXX,XX +XXX,XX @@
58
#include "target/riscv/cpu.h"
59
#include "sysemu/sysemu.h"
60
#include "sysemu/kvm.h"
61
+#include "kvm_riscv.h"
62
#include "migration/vmstate.h"
63
64
#define APLIC_MAX_IDC (1UL << 14)
65
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
66
return topi;
67
}
68
69
-static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
70
-{
71
- kvm_set_irq(kvm_state, irq, !!level);
72
-}
73
-
74
static void riscv_aplic_request(void *opaque, int irq, int level)
75
{
76
bool update = false;
77
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
78
* have IRQ lines delegated by their parent APLIC.
79
*/
80
if (!aplic->parent) {
81
- if (is_kvm_aia(aplic->msimode)) {
82
+ if (kvm_enabled() && is_kvm_aia(aplic->msimode)) {
83
qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
84
} else {
85
qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
86
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/riscv/kvm.c
89
+++ b/target/riscv/kvm.c
90
@@ -XXX,XX +XXX,XX @@
91
#include "sysemu/runstate.h"
92
#include "hw/riscv/numa.h"
93
94
+void riscv_kvm_aplic_request(void *opaque, int irq, int level)
95
+{
96
+ kvm_set_irq(kvm_state, irq, !!level);
97
+}
98
+
99
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
100
uint64_t idx)
101
{
102
--
103
2.41.0
104
105
diff view generated by jsdifflib
New patch
1
From: Robbin Ehn <rehn@rivosinc.com>
1
2
3
This patch adds the new extensions in
4
linux 6.5 to the hwprobe syscall.
5
6
And fixes RVC check to OR with correct value.
7
The previous variable contains 0 therefore it
8
did work.
9
10
Signed-off-by: Robbin Ehn <rehn@rivosinc.com>
11
Acked-by: Richard Henderson <richard.henderson@linaro.org>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <bc82203b72d7efb30f1b4a8f9eb3d94699799dc8.camel@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
linux-user/syscall.c | 14 +++++++++++++-
17
1 file changed, 13 insertions(+), 1 deletion(-)
18
19
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/linux-user/syscall.c
22
+++ b/linux-user/syscall.c
23
@@ -XXX,XX +XXX,XX @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
24
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
25
#define RISCV_HWPROBE_IMA_FD (1 << 0)
26
#define RISCV_HWPROBE_IMA_C (1 << 1)
27
+#define RISCV_HWPROBE_IMA_V (1 << 2)
28
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
29
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
30
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
31
32
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
33
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
34
@@ -XXX,XX +XXX,XX @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
35
riscv_has_ext(env, RVD) ?
36
RISCV_HWPROBE_IMA_FD : 0;
37
value |= riscv_has_ext(env, RVC) ?
38
- RISCV_HWPROBE_IMA_C : pair->value;
39
+ RISCV_HWPROBE_IMA_C : 0;
40
+ value |= riscv_has_ext(env, RVV) ?
41
+ RISCV_HWPROBE_IMA_V : 0;
42
+ value |= cfg->ext_zba ?
43
+ RISCV_HWPROBE_EXT_ZBA : 0;
44
+ value |= cfg->ext_zbb ?
45
+ RISCV_HWPROBE_EXT_ZBB : 0;
46
+ value |= cfg->ext_zbs ?
47
+ RISCV_HWPROBE_EXT_ZBS : 0;
48
__put_user(value, &pair->value);
49
break;
50
case RISCV_HWPROBE_KEY_CPUPERF_0:
51
--
52
2.41.0
diff view generated by jsdifflib
New patch
1
From: Ard Biesheuvel <ardb@kernel.org>
1
2
3
Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
4
implement the first half of the key schedule derivation. This does not
5
actually involve shifting rows, so clone the same value into all four
6
columns of the AES vector to counter that operation.
7
8
Cc: Richard Henderson <richard.henderson@linaro.org>
9
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Cc: Palmer Dabbelt <palmer@dabbelt.com>
11
Cc: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20230831154118.138727-1-ardb@kernel.org>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
target/riscv/crypto_helper.c | 17 +++++------------
19
1 file changed, 5 insertions(+), 12 deletions(-)
20
21
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/crypto_helper.c
24
+++ b/target/riscv/crypto_helper.c
25
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
26
27
uint8_t enc_rnum = rnum;
28
uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
29
- uint8_t rcon_ = 0;
30
- target_ulong result;
31
+ AESState t, rc = {};
32
33
if (enc_rnum != 0xA) {
34
temp = ror32(temp, 8); /* Rotate right by 8 */
35
- rcon_ = round_consts[enc_rnum];
36
+ rc.w[0] = rc.w[1] = round_consts[enc_rnum];
37
}
38
39
- temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
40
- ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
41
- ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
42
- ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
43
+ t.w[0] = t.w[1] = t.w[2] = t.w[3] = temp;
44
+ aesenc_SB_SR_AK(&t, &t, &rc, false);
45
46
- temp ^= rcon_;
47
-
48
- result = ((uint64_t)temp << 32) | temp;
49
-
50
- return result;
51
+ return t.d[0];
52
}
53
54
target_ulong HELPER(aes64im)(target_ulong rs1)
55
--
56
2.41.0
57
58
diff view generated by jsdifflib
1
From: Atish Patra <atishp@rivosinc.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
The RISC-V privilege specification provides flexibility to implement
3
riscv_trigger_init() had been called on reset events that can happen
4
any number of counters from 29 programmable counters. However, the QEMU
4
several times for a CPU and it allocated timers for itrigger. If old
5
implements all the counters.
5
timers were present, they were simply overwritten by the new timers,
6
resulting in a memory leak.
6
7
7
Make it configurable through pmu config parameter which now will indicate
8
Divide riscv_trigger_init() into two functions, namely
8
how many programmable counters should be implemented by the cpu.
9
riscv_trigger_realize() and riscv_trigger_reset() and call them in
10
appropriate timing. The timer allocation will happen only once for a
11
CPU in riscv_trigger_realize().
9
12
10
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
13
Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
14
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Atish Patra <atish.patra@wdc.com>
18
Message-ID: <20230818034059.9146-1-akihiko.odaki@daynix.com>
13
Signed-off-by: Atish Patra <atishp@rivosinc.com>
14
Message-Id: <20220620231603.2547260-5-atishp@rivosinc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
20
---
17
target/riscv/cpu.h | 2 +-
21
target/riscv/debug.h | 3 ++-
18
target/riscv/cpu.c | 3 +-
22
target/riscv/cpu.c | 8 +++++++-
19
target/riscv/csr.c | 94 ++++++++++++++++++++++++++++++----------------
23
target/riscv/debug.c | 15 ++++++++++++---
20
3 files changed, 63 insertions(+), 36 deletions(-)
24
3 files changed, 21 insertions(+), 5 deletions(-)
21
25
22
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
26
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
23
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu.h
28
--- a/target/riscv/debug.h
25
+++ b/target/riscv/cpu.h
29
+++ b/target/riscv/debug.h
26
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
30
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
27
bool ext_zksed;
31
bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
28
bool ext_zksh;
32
bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
29
bool ext_zkt;
33
30
- bool ext_pmu;
34
-void riscv_trigger_init(CPURISCVState *env);
31
bool ext_ifencei;
35
+void riscv_trigger_realize(CPURISCVState *env);
32
bool ext_icsr;
36
+void riscv_trigger_reset_hold(CPURISCVState *env);
33
bool ext_svinval;
37
34
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
38
bool riscv_itrigger_enabled(CPURISCVState *env);
35
/* Vendor-specific custom extensions */
39
void riscv_itrigger_update_priv(CPURISCVState *env);
36
bool ext_XVentanaCondOps;
37
38
+ uint8_t pmu_num;
39
char *priv_spec;
40
char *user_spec;
41
char *bext_spec;
42
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
40
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
43
index XXXXXXX..XXXXXXX 100644
41
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/cpu.c
42
--- a/target/riscv/cpu.c
45
+++ b/target/riscv/cpu.c
43
+++ b/target/riscv/cpu.c
46
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_init(Object *obj)
44
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
47
{
45
48
RISCVCPU *cpu = RISCV_CPU(obj);
46
#ifndef CONFIG_USER_ONLY
49
47
if (cpu->cfg.debug) {
50
- cpu->cfg.ext_pmu = true;
48
- riscv_trigger_init(env);
51
cpu->cfg.ext_ifencei = true;
49
+ riscv_trigger_reset_hold(env);
52
cpu->cfg.ext_icsr = true;
50
}
53
cpu->cfg.mmu = true;
51
54
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
52
if (kvm_enabled()) {
55
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
53
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
56
DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
54
57
DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
55
riscv_cpu_register_gdb_regs_for_features(cs);
58
- DEFINE_PROP_BOOL("pmu", RISCVCPU, cfg.ext_pmu, true),
56
59
+ DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
57
+#ifndef CONFIG_USER_ONLY
60
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
58
+ if (cpu->cfg.debug) {
61
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
59
+ riscv_trigger_realize(&cpu->env);
62
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
60
+ }
63
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
61
+#endif
62
+
63
qemu_init_vcpu(cs);
64
cpu_reset(cs);
65
66
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
64
index XXXXXXX..XXXXXXX 100644
67
index XXXXXXX..XXXXXXX 100644
65
--- a/target/riscv/csr.c
68
--- a/target/riscv/debug.c
66
+++ b/target/riscv/csr.c
69
+++ b/target/riscv/debug.c
67
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
70
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
68
CPUState *cs = env_cpu(env);
71
return false;
69
RISCVCPU *cpu = RISCV_CPU(cs);
72
}
70
int ctr_index;
73
71
+ int base_csrno = CSR_HPMCOUNTER3;
74
-void riscv_trigger_init(CPURISCVState *env)
72
+ bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
75
+void riscv_trigger_realize(CPURISCVState *env)
73
76
+{
74
- if (!cpu->cfg.ext_pmu) {
77
+ int i;
75
- /* The PMU extension is not enabled */
78
+
76
+ if (rv32 && csrno >= CSR_CYCLEH) {
79
+ for (i = 0; i < RV_MAX_TRIGGERS; i++) {
77
+ /* Offset for RV32 hpmcounternh counters */
80
+ env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
78
+ base_csrno += 0x80;
81
+ riscv_itrigger_timer_cb, env);
79
+ }
82
+ }
80
+ ctr_index = csrno - base_csrno;
81
+
82
+ if (!cpu->cfg.pmu_num || ctr_index >= (cpu->cfg.pmu_num)) {
83
+ /* No counter is enabled in PMU or the counter is out of range */
84
return RISCV_EXCP_ILLEGAL_INST;
85
}
86
87
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
88
}
89
break;
90
}
91
- if (riscv_cpu_mxl(env) == MXL_RV32) {
92
+ if (rv32) {
93
switch (csrno) {
94
case CSR_CYCLEH:
95
if (!get_field(env->mcounteren, COUNTEREN_CY)) {
96
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
97
}
98
break;
99
}
100
- if (riscv_cpu_mxl(env) == MXL_RV32) {
101
+ if (rv32) {
102
switch (csrno) {
103
case CSR_CYCLEH:
104
if (!get_field(env->hcounteren, COUNTEREN_CY) &&
105
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr32(CPURISCVState *env, int csrno)
106
}
107
108
#if !defined(CONFIG_USER_ONLY)
109
+static RISCVException mctr(CPURISCVState *env, int csrno)
110
+{
111
+ CPUState *cs = env_cpu(env);
112
+ RISCVCPU *cpu = RISCV_CPU(cs);
113
+ int ctr_index;
114
+ int base_csrno = CSR_MHPMCOUNTER3;
115
+
116
+ if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
117
+ /* Offset for RV32 mhpmcounternh counters */
118
+ base_csrno += 0x80;
119
+ }
120
+ ctr_index = csrno - base_csrno;
121
+ if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
122
+ /* The PMU is not enabled or counter is out of range*/
123
+ return RISCV_EXCP_ILLEGAL_INST;
124
+ }
125
+
126
+ return RISCV_EXCP_NONE;
127
+}
83
+}
128
+
84
+
129
static RISCVException any(CPURISCVState *env, int csrno)
85
+void riscv_trigger_reset_hold(CPURISCVState *env)
130
{
86
{
131
return RISCV_EXCP_NONE;
87
target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
132
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
88
int i;
133
[CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
89
@@ -XXX,XX +XXX,XX @@ void riscv_trigger_init(CPURISCVState *env)
134
[CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
90
env->tdata3[i] = 0;
135
91
env->cpu_breakpoint[i] = NULL;
136
- [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero },
92
env->cpu_watchpoint[i] = NULL;
137
- [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero },
93
- env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
138
- [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero },
94
- riscv_itrigger_timer_cb, env);
139
- [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero },
95
+ timer_del(env->itrigger_timer[i]);
140
- [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero },
96
}
141
- [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero },
97
}
142
- [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero },
143
- [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero },
144
- [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero },
145
- [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero },
146
- [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero },
147
- [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero },
148
- [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero },
149
- [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero },
150
- [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero },
151
- [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero },
152
- [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero },
153
- [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero },
154
- [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero },
155
- [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero },
156
- [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero },
157
- [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero },
158
- [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero },
159
- [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero },
160
- [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero },
161
- [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero },
162
- [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero },
163
- [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero },
164
- [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero },
165
+ [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_zero },
166
+ [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_zero },
167
+ [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_zero },
168
+ [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_zero },
169
+ [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_zero },
170
+ [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_zero },
171
+ [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_zero },
172
+ [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_zero },
173
+ [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_zero },
174
+ [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_zero },
175
+ [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_zero },
176
+ [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_zero },
177
+ [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_zero },
178
+ [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_zero },
179
+ [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_zero },
180
+ [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_zero },
181
+ [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_zero },
182
+ [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_zero },
183
+ [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_zero },
184
+ [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_zero },
185
+ [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_zero },
186
+ [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_zero },
187
+ [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_zero },
188
+ [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_zero },
189
+ [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_zero },
190
+ [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_zero },
191
+ [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_zero },
192
+ [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_zero },
193
+ [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_zero },
194
195
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
196
[CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
197
--
98
--
198
2.36.1
99
2.41.0
100
101
diff view generated by jsdifflib
1
From: Nicolas Pitre <nico@fluxnic.net>
1
From: Leon Schuermann <leons@opentitan.org>
2
2
3
For a TOR entry to match, the stard address must be lower than the end
3
When the rule-lock bypass (RLB) bit is set in the mseccfg CSR, the PMP
4
address. Normally this is always the case, but correct code might still
4
configuration lock bits must not apply. While this behavior is
5
run into the following scenario:
5
implemented for the pmpcfgX CSRs, this bit is not respected for
6
changes to the pmpaddrX CSRs. This patch ensures that pmpaddrX CSR
7
writes work even on locked regions when the global rule-lock bypass is
8
enabled.
6
9
7
Initial state:
10
Signed-off-by: Leon Schuermann <leons@opentitan.org>
8
11
Reviewed-by: Mayuresh Chitale <mchitale@ventanamicro.com>
9
    pmpaddr3 = 0x2000    pmp3cfg = OFF
10
    pmpaddr4 = 0x3000    pmp4cfg = TOR
11
12
Execution:
13
14
    1. write 0x40ff to pmpaddr3
15
    2. write 0x32ff to pmpaddr4
16
    3. set pmp3cfg to NAPOT with a read-modify-write on pmpcfg0
17
    4. set pmp4cfg to NAPOT with a read-modify-write on pmpcfg1
18
19
When (2) is emulated, a call to pmp_update_rule() creates a negative
20
range for pmp4 as pmp4cfg is still set to TOR. And when (3) is emulated,
21
a call to tlb_flush() is performed, causing pmp_get_tlb_size() to return
22
a very creatively large TLB size for pmp4. This, in turn, may result in
23
accesses to non-existent/unitialized memory regions and a fault, so that
24
(4) ends up never being executed.
25
26
This is in m-mode with MPRV unset, meaning that unlocked PMP entries
27
should have no effect. Therefore such a behavior based on PMP content
28
is very unexpected.
29
30
Make sure no negative PMP range can be created, whether explicitly by
31
the emulated code or implicitly like the above.
32
33
Signed-off-by: Nicolas Pitre <nico@fluxnic.net>
34
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
35
Message-Id: <3oq0sqs1-67o0-145-5n1s-453o118804q@syhkavp.arg>
13
Message-ID: <20230829215046.1430463-1-leon@is.currently.online>
36
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
37
---
15
---
38
target/riscv/pmp.c | 3 +++
16
target/riscv/pmp.c | 4 ++++
39
1 file changed, 3 insertions(+)
17
1 file changed, 4 insertions(+)
40
18
41
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
19
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
42
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
43
--- a/target/riscv/pmp.c
21
--- a/target/riscv/pmp.c
44
+++ b/target/riscv/pmp.c
22
+++ b/target/riscv/pmp.c
45
@@ -XXX,XX +XXX,XX @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
23
@@ -XXX,XX +XXX,XX @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
46
case PMP_AMATCH_TOR:
24
*/
47
sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
25
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
48
ea = (this_addr << 2) - 1u;
26
{
49
+ if (sa > ea) {
27
+ /* mseccfg.RLB is set */
50
+ sa = ea = 0u;
28
+ if (MSECCFG_RLB_ISSET(env)) {
51
+ }
29
+ return 0;
52
break;
30
+ }
53
31
54
case PMP_AMATCH_NA4:
32
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
33
return 1;
55
--
34
--
56
2.36.1
35
2.41.0
diff view generated by jsdifflib
1
From: Alistair Francis <alistair.francis@wdc.com>
1
From: Tommy Wu <tommy.wu@sifive.com>
2
2
3
There is nothing in the RISC-V spec that mandates version 1.12 is
3
According to the new spec, when vsiselect has a reserved value, attempts
4
required for ePMP and there is currently hardware [1] that implements
4
from M-mode or HS-mode to access vsireg, or from VS-mode to access
5
ePMP (a draft version though) with the 1.11 priv spec.
5
sireg, should preferably raise an illegal instruction exception.
6
6
7
1: https://ibex-core.readthedocs.io/en/latest/01_overview/compliance.html
7
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
8
8
Reviewed-by: Frank Chang <frank.chang@sifive.com>
9
Fixes: a4b2fa433125 ("target/riscv: Introduce privilege version field in the CSR ops.")
9
Message-ID: <20230816061647.600672-1-tommy.wu@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
12
Message-Id: <20220629233102.275181-2-alistair.francis@opensource.wdc.com>
13
---
11
---
14
target/riscv/csr.c | 2 +-
12
target/riscv/csr.c | 7 +++++--
15
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 5 insertions(+), 2 deletions(-)
16
14
17
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
15
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/csr.c
17
--- a/target/riscv/csr.c
20
+++ b/target/riscv/csr.c
18
+++ b/target/riscv/csr.c
21
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
19
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
22
20
static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
23
/* Physical Memory Protection */
21
target_ulong new_val, target_ulong wr_mask)
24
[CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
22
{
25
- .min_priv_ver = PRIV_VERSION_1_12_0 },
23
- bool virt;
26
+ .min_priv_ver = PRIV_VERSION_1_11_0 },
24
+ bool virt, isel_reserved;
27
[CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
25
uint8_t *iprio;
28
[CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
26
int ret = -EINVAL;
29
[CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
27
target_ulong priv, isel, vgein;
28
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
29
30
/* Decode register details from CSR number */
31
virt = false;
32
+ isel_reserved = false;
33
switch (csrno) {
34
case CSR_MIREG:
35
iprio = env->miprio;
36
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
37
riscv_cpu_mxl_bits(env)),
38
val, new_val, wr_mask);
39
}
40
+ } else {
41
+ isel_reserved = true;
42
}
43
44
done:
45
if (ret) {
46
- return (env->virt_enabled && virt) ?
47
+ return (env->virt_enabled && virt && !isel_reserved) ?
48
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
49
}
50
return RISCV_EXCP_NONE;
30
--
51
--
31
2.36.1
52
2.41.0
diff view generated by jsdifflib
1
From: Atish Patra <atish.patra@wdc.com>
1
From: Nikita Shubin <n.shubin@yadro.com>
2
2
3
The predicate function calculates the counter index incorrectly for
3
As per ISA:
4
hpmcounterx. Fix the counter index to reflect correct CSR number.
5
4
6
Fixes: e39a8320b088 ("target/riscv: Support the Virtual Instruction fault")
5
"For CSRRWI, if rd=x0, then the instruction shall not read the CSR and
6
shall not cause any of the side effects that might occur on a CSR read."
7
8
trans_csrrwi() and trans_csrrw() call do_csrw() if rd=x0, do_csrw() calls
9
riscv_csrrw_do64(), via helper_csrw() passing NULL as *ret_value.
10
11
Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
13
Message-ID: <20230808090914.17634-1-nikita.shubin@maquefel.me>
9
Signed-off-by: Atish Patra <atish.patra@wdc.com>
10
Signed-off-by: Atish Patra <atishp@rivosinc.com>
11
Message-Id: <20220620231603.2547260-2-atishp@rivosinc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
15
---
14
target/riscv/csr.c | 11 +++++++----
16
target/riscv/csr.c | 24 +++++++++++++++---------
15
1 file changed, 7 insertions(+), 4 deletions(-)
17
1 file changed, 15 insertions(+), 9 deletions(-)
16
18
17
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
19
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/csr.c
21
--- a/target/riscv/csr.c
20
+++ b/target/riscv/csr.c
22
+++ b/target/riscv/csr.c
21
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
23
@@ -XXX,XX +XXX,XX @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
22
#if !defined(CONFIG_USER_ONLY)
24
target_ulong write_mask)
23
CPUState *cs = env_cpu(env);
25
{
24
RISCVCPU *cpu = RISCV_CPU(cs);
26
RISCVException ret;
25
+ int ctr_index;
27
- target_ulong old_value;
26
28
+ target_ulong old_value = 0;
27
if (!cpu->cfg.ext_counters) {
29
28
/* The Counters extensions is not enabled */
30
/* execute combined read/write operation if it exists */
29
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
31
if (csr_ops[csrno].op) {
30
}
32
return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
31
break;
33
}
32
case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
34
33
- if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) &&
35
- /* if no accessor exists then return failure */
34
- get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) {
36
- if (!csr_ops[csrno].read) {
35
+ ctr_index = csrno - CSR_CYCLE;
37
- return RISCV_EXCP_ILLEGAL_INST;
36
+ if (!get_field(env->hcounteren, 1 << ctr_index) &&
38
- }
37
+ get_field(env->mcounteren, 1 << ctr_index)) {
39
- /* read old value */
38
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
40
- ret = csr_ops[csrno].read(env, csrno, &old_value);
39
}
41
- if (ret != RISCV_EXCP_NONE) {
40
break;
42
- return ret;
41
@@ -XXX,XX +XXX,XX @@ static RISCVException ctr(CPURISCVState *env, int csrno)
43
+ /*
42
}
44
+ * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
43
break;
45
+ * and we can't throw side effects caused by CSR reads.
44
case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
46
+ */
45
- if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) &&
47
+ if (ret_value) {
46
- get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) {
48
+ /* if no accessor exists then return failure */
47
+ ctr_index = csrno - CSR_CYCLEH;
49
+ if (!csr_ops[csrno].read) {
48
+ if (!get_field(env->hcounteren, 1 << ctr_index) &&
50
+ return RISCV_EXCP_ILLEGAL_INST;
49
+ get_field(env->mcounteren, 1 << ctr_index)) {
51
+ }
50
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
52
+ /* read old value */
51
}
53
+ ret = csr_ops[csrno].read(env, csrno, &old_value);
52
break;
54
+ if (ret != RISCV_EXCP_NONE) {
55
+ return ret;
56
+ }
57
}
58
59
/* write value if writable and write mask set, otherwise drop writes */
53
--
60
--
54
2.36.1
61
2.41.0
diff view generated by jsdifflib