1
From: Alistair Francis <alistair.francis@wdc.com>
1
The following changes since commit 661c2e1ab29cd9c4d268ae3f44712e8d421c0e56:
2
2
3
The following changes since commit 64ada298b98a51eb2512607f6e6180cb330c47b1:
3
scripts/checkpatch: Fix a typo (2025-03-04 09:30:26 +0800)
4
5
Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220302' into staging (2022-03-02 12:38:46 +0000)
6
4
7
are available in the Git repository at:
5
are available in the Git repository at:
8
6
9
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20220303
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20250305-1
10
8
11
for you to fetch changes up to 6b1accefd4876ea5475d55454c7d5b52c02cb73c:
9
for you to fetch changes up to 4db19d5b21e058e6eb3474b6be470d1184afaa9e:
12
10
13
target/riscv: expose zfinx, zdinx, zhinx{min} properties (2022-03-03 13:14:50 +1000)
11
target/riscv/kvm: add missing KVM CSRs (2025-03-04 15:42:54 +1000)
14
12
15
----------------------------------------------------------------
13
----------------------------------------------------------------
16
Fifth RISC-V PR for QEMU 7.0
14
Third RISC-V PR for 10.0
17
15
18
* Fixup checks for ext_zb[abcs]
16
* CSR coverity fixes
19
* Add AIA support for virt machine
17
* Fix unexpected behavior of vector reduction instructions when vl is 0
20
* Increase maximum number of CPUs in virt machine
18
* Fix incorrect vlen comparison in prop_vlen_set
21
* Fixup OpenTitan SPI address
19
* Throw debug exception before page fault
22
* Add support for zfinx, zdinx and zhinx{min} extensions
20
* Remove redundant "hart_idx" masking from APLIC
21
* Add support for Control Transfer Records Ext
22
* Remove redundant struct members from the IOMMU
23
* Remove duplicate definitions from the IOMMU
24
* Fix tick_offset migration for Goldfish RTC
25
* Add serial alias in virt machine DTB
26
* Remove Bin Meng from RISC-V maintainers
27
* Add support for Control Transfer Records Ext
28
* Log guest errors when reserved bits are set in PTEs
29
* Add missing Sdtrig disas CSRs
30
* Correct the hpmevent sscofpmf mask
31
* Mask upper sscofpmf bits during validation
32
* Remove warnings about Smdbltrp/Smrnmi being disabled
33
* Respect mseccfg.RLB bit for TOR mode PMP entry
34
* Update KVM support to Linux 6.14-rc3
35
* IOMMU HPM support
36
* Support Sscofpmf/Svade/Svadu/Smnpm/Ssnpm extensions in KVM
37
* Add --ignore-family option to binfmt
38
* Refinement for AIA with KVM acceleration
39
* Reset time changes for KVM
23
40
24
----------------------------------------------------------------
41
----------------------------------------------------------------
25
Anup Patel (5):
42
Alistair Francis (1):
26
hw/riscv: virt: Add optional AIA APLIC support to virt machine
43
MAINTAINERS: Remove Bin Meng from RISC-V maintainers
27
hw/intc: Add RISC-V AIA IMSIC device emulation
28
hw/riscv: virt: Add optional AIA IMSIC support to virt machine
29
docs/system: riscv: Document AIA options for virt machine
30
hw/riscv: virt: Increase maximum number of allowed CPUs
31
44
32
Philipp Tomsich (1):
45
Andrea Bolognani (3):
33
target/riscv: fix inverted checks for ext_zb[abcs]
46
binfmt: Shuffle things around
47
binfmt: Normalize host CPU architecture
48
binfmt: Add --ignore-family option
34
49
35
Weiwei Li (6):
50
Atish Patra (2):
36
target/riscv: add cfg properties for zfinx, zdinx and zhinx{min}
51
target/riscv: Fix the hpmevent mask
37
target/riscv: hardwire mstatus.FS to zero when enable zfinx
52
target/riscv: Mask out upper sscofpmf bits during validation
38
target/riscv: add support for zfinx
39
target/riscv: add support for zdinx
40
target/riscv: add support for zhinx/zhinxmin
41
target/riscv: expose zfinx, zdinx, zhinx{min} properties
42
53
43
Wilfred Mallawa (1):
54
Clément Léger (1):
44
hw: riscv: opentitan: fixup SPI addresses
55
target/riscv: remove warnings about Smdbltrp/Smrnmi being disabled
45
56
46
docs/system/riscv/virt.rst | 16 +
57
Daniel Henrique Barboza (22):
47
include/hw/intc/riscv_imsic.h | 68 +++
58
target/riscv/csr.c: fix deadcode in rmw_xireg()
48
include/hw/riscv/opentitan.h | 4 +-
59
target/riscv/csr.c: fix 'ret' deadcode in rmw_xireg()
49
include/hw/riscv/virt.h | 41 +-
60
target/riscv/csr.c: fix deadcode in rmw_xiregi()
50
target/riscv/cpu.h | 4 +
61
target/riscv/csr.c: fix deadcode in aia_smode32()
51
target/riscv/helper.h | 4 +-
62
target/riscv/cpu_helper.c: fix bad_shift in riscv_cpu_interrupt()
52
target/riscv/internals.h | 32 +-
63
target/riscv/debug.c: use wp size = 4 for 32-bit CPUs
53
hw/intc/riscv_imsic.c | 448 +++++++++++++++++++
64
target/riscv: throw debug exception before page fault
54
hw/riscv/opentitan.c | 12 +-
65
target/riscv: add ssu64xl
55
hw/riscv/virt.c | 698 +++++++++++++++++++++++++-----
66
target/riscv: use RVB in RVA22U64
56
target/riscv/cpu.c | 17 +
67
target/riscv: add profile u_parent and s_parent
57
target/riscv/cpu_helper.c | 6 +-
68
target/riscv: change priv_ver check in validate_profile()
58
target/riscv/csr.c | 25 +-
69
target/riscv: add RVA23U64 profile
59
target/riscv/fpu_helper.c | 178 ++++----
70
target/riscv: add RVA23S64 profile
60
target/riscv/translate.c | 149 ++++++-
71
linux-headers: Update to Linux v6.14-rc3
61
target/riscv/insn_trans/trans_rvb.c.inc | 8 +-
72
target/riscv/cpu.c: create flag for ziccrse
62
target/riscv/insn_trans/trans_rvd.c.inc | 285 ++++++++----
73
target/riscv/kvm: add extensions after 6.14-rc3 update
63
target/riscv/insn_trans/trans_rvf.c.inc | 314 ++++++++++----
74
hw/riscv/riscv-iommu.h: add missing headers
64
target/riscv/insn_trans/trans_rvzfh.c.inc | 332 ++++++++++----
75
hw/riscv: add IOMMU HPM trace events
65
hw/intc/Kconfig | 3 +
76
docs/specs/riscv-iommu.rst: add HPM support info
66
hw/intc/meson.build | 1 +
77
target/riscv/cpu: remove unneeded !kvm_enabled() check
67
hw/riscv/Kconfig | 2 +
78
target/riscv/kvm: add kvm_riscv_reset_regs_csr()
68
22 files changed, 2146 insertions(+), 501 deletions(-)
79
target/riscv/kvm: add missing KVM CSRs
69
create mode 100644 include/hw/intc/riscv_imsic.h
80
70
create mode 100644 hw/intc/riscv_imsic.c
81
Huang Borong (1):
82
hw/intc/riscv_aplic: Remove redundant "hart_idx" masking
83
84
Jason Chien (2):
85
hw/riscv/riscv-iommu: Remove redundant struct members
86
hw/riscv/riscv-iommu-bits: Remove duplicate definitions
87
88
Max Chou (2):
89
target/riscv: rvv: Fix unexpected behavior of vector reduction instructions when vl is 0
90
target/riscv: rvv: Fix incorrect vlen comparison in prop_vlen_set
91
92
Quan Zhou (1):
93
target/riscv/kvm: Add some exts support
94
95
Rajnesh Kanwal (7):
96
target/riscv: Remove obsolete sfence.vm instruction
97
target/riscv: Add Control Transfer Records CSR definitions.
98
target/riscv: Add support for Control Transfer Records extension CSRs.
99
target/riscv: Add support to record CTR entries.
100
target/riscv: Add CTR sctrclr instruction.
101
target/riscv: machine: Add Control Transfer Record state description
102
target/riscv: Add support to access ctrsource, ctrtarget, ctrdata regs.
103
104
Rob Bradford (3):
105
disas/riscv: Fix minor whitespace issues
106
disas/riscv: Add missing Sdtrig CSRs
107
target/riscv: Respect mseccfg.RLB bit for TOR mode PMP entry
108
109
Rodrigo Dias Correa (1):
110
goldfish_rtc: Fix tick_offset migration
111
112
Tomasz Jeznach (8):
113
hw/riscv/riscv-iommu-bits.h: HPM bits
114
hw/riscv/riscv-iommu: add riscv-iommu-hpm file
115
hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr()
116
hw/riscv/riscv-iommu: instantiate hpm_timer
117
hw/riscv/riscv-iommu: add IOCOUNTINH mmio writes
118
hw/riscv/riscv-iommu: add IOHPMCYCLES mmio write
119
hw/riscv/riscv-iommu: add hpm events mmio write
120
hw/riscv/riscv-iommu.c: add RISCV_IOMMU_CAP_HPM cap
121
122
Vasilis Liaskovitis (1):
123
hw/riscv/virt: Add serial alias in DTB
124
125
Yong-Xuan Wang (3):
126
hw/intc/imsic: refine the IMSIC realize
127
hw/intc/aplic: refine the APLIC realize
128
hw/intc/aplic: refine kvm_msicfgaddr
129
130
julia (1):
131
target/riscv: log guest errors when reserved bits are set in PTEs
132
133
MAINTAINERS | 5 +-
134
docs/specs/riscv-iommu.rst | 2 +
135
hw/riscv/riscv-iommu-bits.h | 69 +++-
136
hw/riscv/riscv-iommu-hpm.h | 33 ++
137
hw/riscv/riscv-iommu.h | 32 +-
138
include/standard-headers/linux/ethtool.h | 4 +
139
include/standard-headers/linux/fuse.h | 76 +++-
140
include/standard-headers/linux/input-event-codes.h | 1 +
141
include/standard-headers/linux/pci_regs.h | 16 +-
142
include/standard-headers/linux/virtio_pci.h | 14 +
143
linux-headers/asm-arm64/kvm.h | 3 -
144
linux-headers/asm-loongarch/kvm_para.h | 1 +
145
linux-headers/asm-riscv/kvm.h | 7 +-
146
linux-headers/asm-x86/kvm.h | 1 +
147
linux-headers/linux/iommufd.h | 35 +-
148
linux-headers/linux/kvm.h | 8 +-
149
linux-headers/linux/stddef.h | 13 +-
150
linux-headers/linux/vduse.h | 2 +-
151
target/riscv/cpu-qom.h | 2 +
152
target/riscv/cpu.h | 16 +-
153
target/riscv/cpu_bits.h | 150 +++++++-
154
target/riscv/cpu_cfg.h | 5 +
155
target/riscv/helper.h | 2 +
156
target/riscv/insn32.decode | 2 +-
157
disas/riscv.c | 16 +-
158
hw/intc/riscv_aplic.c | 74 ++--
159
hw/intc/riscv_imsic.c | 47 +--
160
hw/riscv/riscv-iommu-hpm.c | 381 +++++++++++++++++++++
161
hw/riscv/riscv-iommu.c | 131 ++++++-
162
hw/riscv/virt.c | 3 +
163
hw/rtc/goldfish_rtc.c | 43 +--
164
target/riscv/cpu.c | 115 ++++++-
165
target/riscv/cpu_helper.c | 315 ++++++++++++++++-
166
target/riscv/csr.c | 318 +++++++++++++++--
167
target/riscv/debug.c | 6 +-
168
target/riscv/kvm/kvm-cpu.c | 40 ++-
169
target/riscv/machine.c | 25 ++
170
target/riscv/op_helper.c | 48 +++
171
target/riscv/pmp.c | 2 +-
172
target/riscv/pmu.c | 2 +-
173
target/riscv/tcg/tcg-cpu.c | 58 +++-
174
target/riscv/translate.c | 46 +++
175
target/riscv/vector_helper.c | 8 +-
176
target/riscv/insn_trans/trans_privileged.c.inc | 18 +-
177
target/riscv/insn_trans/trans_rvi.c.inc | 75 ++++
178
target/riscv/insn_trans/trans_rvzce.c.inc | 21 ++
179
hw/riscv/meson.build | 3 +-
180
hw/riscv/trace-events | 5 +
181
scripts/qemu-binfmt-conf.sh | 78 +++--
182
tests/data/acpi/riscv64/virt/RHCT | Bin 390 -> 400 bytes
183
50 files changed, 2106 insertions(+), 271 deletions(-)
184
create mode 100644 hw/riscv/riscv-iommu-hpm.h
185
create mode 100644 hw/riscv/riscv-iommu-hpm.c
186
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Coverity found a DEADCODE issue in rmw_xireg() claiming that we can't
4
reach 'RISCV_EXCP_VIRT_INSTRUCTION_FAULT' at the 'done' label:
5
6
done:
7
if (ret) {
8
return (env->virt_enabled && virt) ?
9
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
10
}
11
return RISCV_EXCP_NONE;
12
13
This happens because the 'virt' flag, which is only used by 'done', is
14
set to 'false' and it will always remain 'false' in any condition where
15
we'll jump to 'done':
16
17
switch (csrno) {
18
(...)
19
case CSR_VSIREG:
20
isel = env->vsiselect;
21
virt = true;
22
break;
23
default:
24
goto done;
25
};
26
27
'virt = true' will never reach 'done' because we have a if/else-if/else
28
block right before the label that will always return:
29
30
if (xiselect_aia_range(isel)) {
31
return ...
32
} else if (...) {
33
return ...
34
} else {
35
return RISCV_EXCP_ILLEGAL_INST;
36
}
37
38
All this means that we can preserve the current logic by reducing the
39
'done' label to:
40
41
done:
42
if (ret) {
43
return RISCV_EXCP_ILLEGAL_INST;
44
}
45
return RISCV_EXCP_NONE;
46
47
The flag 'virt' is now unused. Remove it.
48
49
Fix the 'goto done' identation while we're at it.
50
51
Resolves: Coverity CID 1590359
52
Fixes: dc0280723d ("target/riscv: Decouple AIA processing from xiselect and xireg")
53
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
54
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
55
Message-ID: <20250121184847.2109128-2-dbarboza@ventanamicro.com>
56
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
57
---
58
target/riscv/csr.c | 7 ++-----
59
1 file changed, 2 insertions(+), 5 deletions(-)
60
61
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/riscv/csr.c
64
+++ b/target/riscv/csr.c
65
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
66
target_ulong *val, target_ulong new_val,
67
target_ulong wr_mask)
68
{
69
- bool virt = false;
70
int ret = -EINVAL;
71
target_ulong isel;
72
73
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
74
break;
75
case CSR_VSIREG:
76
isel = env->vsiselect;
77
- virt = true;
78
break;
79
default:
80
- goto done;
81
+ goto done;
82
};
83
84
/*
85
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
86
87
done:
88
if (ret) {
89
- return (env->virt_enabled && virt) ?
90
- RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
91
+ return RISCV_EXCP_ILLEGAL_INST;
92
}
93
return RISCV_EXCP_NONE;
94
}
95
--
96
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Coverity found a second DEADCODE issue in rmw_xireg() claiming that we can't
4
reach 'RISCV_EXCP_NONE' at the 'done' label:
5
6
> 2706 done:
7
> 2707 if (ret) {
8
> 2708 return (env->virt_enabled && virt) ?
9
> 2709 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
10
> 2710 }
11
>>>> CID 1590356: Control flow issues (DEADCODE)
12
>>>> Execution cannot reach this statement: "return RISCV_EXCP_NONE;".
13
> 2711 return RISCV_EXCP_NONE;
14
15
Our label is now reduced after fixing another deadcode in the previous
16
patch but the problem reported here still remains:
17
18
done:
19
if (ret) {
20
return RISCV_EXCP_ILLEGAL_INST;
21
}
22
return RISCV_EXCP_NONE;
23
24
This happens because 'ret' changes only once at the start of the
25
function:
26
27
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
28
if (ret != RISCV_EXCP_NONE) {
29
return ret;
30
}
31
32
So it's a guarantee that ret will be RISCV_EXCP_NONE (-1) if we ever
33
reach the label, i.e. "if (ret)" will always be true, and the label can
34
be even further reduced to:
35
36
done:
37
return RISCV_EXCP_ILLEGAL_INST;
38
39
To make a better use of the label, remove the 'else' from the
40
xiselect_aia_range() chain and let it fall-through to the 'done' label
41
since they are now both returning RISCV_EXCP_ILLEGAL_INST.
42
43
Resolves: Coverity CID 1590356
44
Fixes: dc0280723d ("target/riscv: Decouple AIA processing from xiselect and xireg")
45
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
46
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
47
Message-ID: <20250121184847.2109128-3-dbarboza@ventanamicro.com>
48
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
49
---
50
target/riscv/csr.c | 7 +------
51
1 file changed, 1 insertion(+), 6 deletions(-)
52
53
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/riscv/csr.c
56
+++ b/target/riscv/csr.c
57
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
58
} else if (riscv_cpu_cfg(env)->ext_smcsrind ||
59
riscv_cpu_cfg(env)->ext_sscsrind) {
60
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
61
- } else {
62
- return RISCV_EXCP_ILLEGAL_INST;
63
}
64
65
done:
66
- if (ret) {
67
- return RISCV_EXCP_ILLEGAL_INST;
68
- }
69
- return RISCV_EXCP_NONE;
70
+ return RISCV_EXCP_ILLEGAL_INST;
71
}
72
73
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
74
--
75
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Coverity found a DEADCODE issue in rmw_xiregi() claiming that we can't
4
reach 'RISCV_EXCP_VIRT_INSTRUCTION_FAULT' at the 'done' label:
5
6
> 2652 done:
7
>>>> CID 1590357: Control flow issues (DEADCODE)
8
>>>> Execution cannot reach the expression "RISCV_EXCP_VIRT_INSTRUCTION_FAULT"
9
inside this statement: "return (env->virt_enabled &...".
10
> 2653 return (env->virt_enabled && virt) ?
11
> 2654 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
12
13
This happens because 'virt' is being set to 'false' and it will remain
14
as 'false' in any code path where 'done' will be called. The label can
15
be safely reduced to:
16
17
done:
18
return RISCV_EXCP_ILLEGAL_INST;
19
20
And that will leave us with the following usage of a 'goto' skipping a
21
single 'return' to do another single 'return':
22
23
} else {
24
goto done;
25
}
26
27
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
28
29
done:
30
return RISCV_EXCP_ILLEGAL_INST;
31
32
Which we will eliminate it and just do 'return RISCV_EXCP_ILLEGAL_INST'
33
instead.
34
35
Resolves: Coverity CID 1590357
36
Fixes: 5e33a20827 ("target/riscv: Support generic CSR indirect access")
37
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
38
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
39
Message-ID: <20250121184847.2109128-4-dbarboza@ventanamicro.com>
40
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
41
---
42
target/riscv/csr.c | 8 +-------
43
1 file changed, 1 insertion(+), 7 deletions(-)
44
45
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/csr.c
48
+++ b/target/riscv/csr.c
49
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
50
static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
51
target_ulong new_val, target_ulong wr_mask)
52
{
53
- bool virt = false;
54
int ret = -EINVAL;
55
target_ulong isel;
56
57
@@ -XXX,XX +XXX,XX @@ static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
58
} else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
59
csrno != CSR_VSIREG4 - 1) {
60
isel = env->vsiselect;
61
- virt = true;
62
} else {
63
- goto done;
64
+ return RISCV_EXCP_ILLEGAL_INST;
65
}
66
67
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
68
-
69
-done:
70
- return (env->virt_enabled && virt) ?
71
- RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
72
}
73
74
static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
75
--
76
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Coverity reported a DEADCODE ticket in this function, as follows:
4
5
>>>> CID 1590358: Control flow issues (DEADCODE)
6
>>>> Execution cannot reach this statement: "return ret;".
7
> 380 return ret;
8
> 381 }
9
10
The cause is that the 'if (ret != RISCV_EXCP_NONE)' conditional is
11
duplicated:
12
13
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
14
if (ret != RISCV_EXCP_NONE) {
15
return ret;
16
}
17
18
if (ret != RISCV_EXCP_NONE) {
19
return ret;
20
}
21
22
Remove the duplication to fix the deadcode.
23
24
Resolves: Coverity CID 1590358
25
Fixes: dbcb6e1ccf ("target/riscv: Enable S*stateen bits for AIA")
26
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
27
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
28
Message-ID: <20250121184847.2109128-5-dbarboza@ventanamicro.com>
29
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
---
31
target/riscv/csr.c | 4 ----
32
1 file changed, 4 deletions(-)
33
34
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/csr.c
37
+++ b/target/riscv/csr.c
38
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_smode32(CPURISCVState *env, int csrno)
39
return ret;
40
}
41
42
- if (ret != RISCV_EXCP_NONE) {
43
- return ret;
44
- }
45
-
46
return smode32(env, csrno);
47
}
48
49
--
50
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Coverity reported a BAD_SHIFT issue in the following code:
4
5
> 2097
6
>>>> CID 1590355: Integer handling issues (BAD_SHIFT)
7
>>>> In expression "hdeleg >> cause", right shifting by more than 63
8
bits has undefined behavior. The shift amount, "cause", is at least 64.
9
> 2098 vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
10
> 2099 /*
11
12
It is not clear to me how the tool guarantees that '"cause" is at least
13
64', but indeed there's no guarantees that it would be < 64 in the
14
'async = true' code path.
15
16
A simple fix to avoid a potential UB is to add a 'cause < 64' guard like
17
'mode' is already doing right before 'vsmode_exc'.
18
19
Resolves: Coverity CID 1590355
20
Fixes: 967760f62c ("target/riscv: Implement Ssdbltrp exception handling")
21
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
22
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Message-ID: <20250121184847.2109128-6-dbarboza@ventanamicro.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
---
26
target/riscv/cpu_helper.c | 4 +++-
27
1 file changed, 3 insertions(+), 1 deletion(-)
28
29
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu_helper.c
32
+++ b/target/riscv/cpu_helper.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
34
mode = env->priv <= PRV_S && cause < 64 &&
35
(((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
36
37
- vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
38
+ vsmode_exc = env->virt_enabled && cause < 64 &&
39
+ (((hdeleg >> cause) & 1) || vs_injected);
40
+
41
/*
42
* Check double trap condition only if already in S-mode and targeting
43
* S-mode
44
--
45
2.48.1
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
According to the Vector Reduction Operations section in the RISC-V "V"
4
Vector Extension spec,
5
"If vl=0, no operation is performed and the destination register is not
6
updated."
7
8
The vd should be updated when vl is larger than 0.
9
10
Fixes: fe5c9ab1fc ("target/riscv: vector single-width integer reduction instructions")
11
Fixes: f714361ed7 ("target/riscv: rvv-1.0: implement vstart CSR")
12
Signed-off-by: Max Chou <max.chou@sifive.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250124101452.2519171-1-max.chou@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/vector_helper.c | 8 ++++++--
18
1 file changed, 6 insertions(+), 2 deletions(-)
19
20
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/vector_helper.c
23
+++ b/target/riscv/vector_helper.c
24
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
25
} \
26
s1 = OP(s1, (TD)s2); \
27
} \
28
- *((TD *)vd + HD(0)) = s1; \
29
+ if (vl > 0) { \
30
+ *((TD *)vd + HD(0)) = s1; \
31
+ } \
32
env->vstart = 0; \
33
/* set tail elements to 1s */ \
34
vext_set_elems_1s(vd, vta, esz, vlenb); \
35
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
36
} \
37
s1 = OP(s1, (TD)s2, &env->fp_status); \
38
} \
39
- *((TD *)vd + HD(0)) = s1; \
40
+ if (vl > 0) { \
41
+ *((TD *)vd + HD(0)) = s1; \
42
+ } \
43
env->vstart = 0; \
44
/* set tail elements to 1s */ \
45
vext_set_elems_1s(vd, vta, esz, vlenb); \
46
--
47
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
3
In prop_vlen_set function, there is an incorrect comparison between
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
vlen(bit) and vlenb(byte).
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
5
This will cause unexpected error when user applies the `vlen=1024` cpu
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
option with a vendor predefined cpu type that the default vlen is
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
1024(vlenb=128).
8
Message-Id: <20220211043920.28981-7-liweiwei@iscas.ac.cn>
8
9
Fixes: 4f6d036ccc ("target/riscv/cpu.c: remove cpu->cfg.vlen")
10
Signed-off-by: Max Chou <max.chou@sifive.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-ID: <20250124090539.2506448-1-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
14
---
11
target/riscv/cpu.c | 5 +++++
15
target/riscv/cpu.c | 5 +++--
12
1 file changed, 5 insertions(+)
16
1 file changed, 3 insertions(+), 2 deletions(-)
13
17
14
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu.c
20
--- a/target/riscv/cpu.c
17
+++ b/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
18
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_properties[] = {
22
@@ -XXX,XX +XXX,XX @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
19
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
23
void *opaque, Error **errp)
20
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
24
{
21
25
RISCVCPU *cpu = RISCV_CPU(obj);
22
+ DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
26
+ uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
23
+ DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
27
uint16_t value;
24
+ DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
28
25
+ DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
29
if (!visit_type_uint16(v, name, &value, errp)) {
26
+
30
@@ -XXX,XX +XXX,XX @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
27
/* Vendor-specific custom extensions */
31
return;
28
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
32
}
33
34
- if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
35
+ if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
36
cpu_set_prop_err(cpu, name, errp);
37
error_append_hint(errp, "Current '%s' val: %u\n",
38
- name, cpu->cfg.vlenb << 3);
39
+ name, cpu_vlen);
40
return;
41
}
29
42
30
--
43
--
31
2.35.1
44
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
The mcontrol select bit (19) is always zero, meaning our triggers will
4
always match virtual addresses. In this condition, if the user does not
5
specify a size for the trigger, the access size defaults to XLEN.
6
7
At this moment we're using def_size = 8 regardless of CPU XLEN. Use
8
def_size = 4 in case we're running 32 bits.
9
10
Fixes: 95799e36c1 ("target/riscv: Add initial support for the Sdtrig extension")
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Message-ID: <20250121170626.1992570-2-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/debug.c | 6 ++++--
18
1 file changed, 4 insertions(+), 2 deletions(-)
19
20
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/debug.c
23
+++ b/target/riscv/debug.c
24
@@ -XXX,XX +XXX,XX @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
25
bool enabled = type2_breakpoint_enabled(ctrl);
26
CPUState *cs = env_cpu(env);
27
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
28
- uint32_t size;
29
+ uint32_t size, def_size;
30
31
if (!enabled) {
32
return;
33
@@ -XXX,XX +XXX,XX @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
34
cpu_watchpoint_insert(cs, addr, size, flags,
35
&env->cpu_watchpoint[index]);
36
} else {
37
- cpu_watchpoint_insert(cs, addr, 8, flags,
38
+ def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4;
39
+
40
+ cpu_watchpoint_insert(cs, addr, def_size, flags,
41
&env->cpu_watchpoint[index]);
42
}
43
}
44
--
45
2.48.1
46
47
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
In the RISC-V privileged ISA section 3.1.15 table 15, it is determined
4
that a debug exception that is triggered from a load/store has a higher
5
priority than a possible fault that this access might trigger.
6
7
This is not the case ATM as shown in [1]. Adding a breakpoint in an
8
address that deliberately will fault is causing a load page fault
9
instead of a debug exception. The reason is that we're throwing in the
10
page fault as soon as the fault occurs (end of riscv_cpu_tlb_fill(),
11
raise_mmu_exception()), not allowing the installed watchpoints to
12
trigger.
13
14
Call cpu_check_watchpoint() in the page fault path to search and execute
15
any watchpoints that might exist for the address, never returning back
16
to the fault path. If no watchpoints are found cpu_check_watchpoint()
17
will return and we'll fall-through the regular path to
18
raise_mmu_exception().
19
20
[1] https://gitlab.com/qemu-project/qemu/-/issues/2627
21
22
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2627
23
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-ID: <20250121170626.1992570-3-dbarboza@ventanamicro.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
---
29
target/riscv/cpu_helper.c | 18 ++++++++++++++++++
30
1 file changed, 18 insertions(+)
31
32
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_helper.c
35
+++ b/target/riscv/cpu_helper.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "exec/page-protection.h"
38
#include "instmap.h"
39
#include "tcg/tcg-op.h"
40
+#include "hw/core/tcg-cpu-ops.h"
41
#include "trace.h"
42
#include "semihosting/common-semi.h"
43
#include "system/cpu-timers.h"
44
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
45
} else if (probe) {
46
return false;
47
} else {
48
+ int wp_access = 0;
49
+
50
+ if (access_type == MMU_DATA_LOAD) {
51
+ wp_access |= BP_MEM_READ;
52
+ } else if (access_type == MMU_DATA_STORE) {
53
+ wp_access |= BP_MEM_WRITE;
54
+ }
55
+
56
+ /*
57
+ * If a watchpoint isn't found for 'addr' this will
58
+ * be a no-op and we'll resume the mmu_exception path.
59
+ * Otherwise we'll throw a debug exception and execution
60
+ * will continue elsewhere.
61
+ */
62
+ cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
63
+ wp_access, retaddr);
64
+
65
raise_mmu_exception(env, address, access_type, pmp_violation,
66
first_stage_error, two_stage_lookup,
67
two_stage_indirect_error);
68
--
69
2.48.1
diff view generated by jsdifflib
New patch
1
From: Huang Borong <huangborong@bosc.ac.cn>
1
2
3
Remove the redundant masking of "hart_idx", as the same operation is
4
performed later during address calculation.
5
6
This change impacts the "hart_idx" value in the final qemu_log_mask()
7
call. The original "hart_idx" parameter should be used for logging to
8
ensure accuracy, rather than the masked value.
9
10
Signed-off-by: Huang Borong <huangborong@bosc.ac.cn>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
13
Message-ID: <20250115035105.19600-1-huangborong@bosc.ac.cn>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
hw/intc/riscv_aplic.c | 1 -
17
1 file changed, 1 deletion(-)
18
19
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/intc/riscv_aplic.c
22
+++ b/hw/intc/riscv_aplic.c
23
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
24
APLIC_xMSICFGADDRH_HHXW_MASK;
25
26
group_idx = hart_idx >> lhxw;
27
- hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw);
28
29
addr = msicfgaddr;
30
addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32;
31
--
32
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
ssu64xl is defined in RVA22 as:
4
5
"sstatus.UXL must be capable of holding the value 2 (i.e., UXLEN=64 must
6
be supported)."
7
8
This is always true in TCG and it's mandatory for RVA23, so claim
9
support for it.
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-ID: <20250115184316.2344583-2-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/cpu.c | 1 +
18
tests/data/acpi/riscv64/virt/RHCT | Bin 390 -> 398 bytes
19
2 files changed, 1 insertion(+)
20
21
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu.c
24
+++ b/target/riscv/cpu.c
25
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
26
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
27
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
28
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
29
+ ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
30
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
31
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
32
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
33
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
34
index XXXXXXX..XXXXXXX 100644
35
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
36
--
37
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
From the time we added RVA22U64 until now the spec didn't declare 'RVB'
4
as a dependency, using zba/zbb/zbs instead. Since then the RVA22 spec
5
[1] added the following in the 'RVA22U64 Mandatory Extensions' section:
6
7
"B Bit-manipulation instructions
8
9
Note: The B extension comprises the Zba, Zbb, and Zbs extensions. At the
10
time of RVA22U64's ratification, the B extension had not yet been
11
defined, and so RVA22U64 explicitly mandated Zba, Zbb, and Zbs instead.
12
Mandating B is equivalent."
13
14
It is also equivalent to QEMU (see riscv_cpu_validate_b() in
15
target/riscv/tcg/tcg-cpu.c).
16
17
Finally, RVA23U64 [2] directly mentions RVB as a mandatory extension,
18
not citing zba/zbb/zbs.
19
20
To make it clear that RVA23U64 will extend RVA22U64 (i.e. RVA22 is a
21
parent of RVA23), use RVB in RVA22U64 as well.
22
23
(bios-tables-test change: RVB added to riscv,isa)
24
25
[1] https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#61-rva22u64-profile
26
[2] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc#rva23u64-profile
27
28
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
29
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
30
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
31
Message-ID: <20250115184316.2344583-3-dbarboza@ventanamicro.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
---
34
target/riscv/cpu.c | 2 +-
35
tests/data/acpi/riscv64/virt/RHCT | Bin 398 -> 400 bytes
36
2 files changed, 1 insertion(+), 1 deletion(-)
37
38
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/cpu.c
41
+++ b/target/riscv/cpu.c
42
@@ -XXX,XX +XXX,XX @@ static const PropertyInfo prop_marchid = {
43
static RISCVCPUProfile RVA22U64 = {
44
.parent = NULL,
45
.name = "rva22u64",
46
- .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
47
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
48
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
49
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
50
.ext_offsets = {
51
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
52
index XXXXXXX..XXXXXXX 100644
53
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
54
--
55
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
3
The current 'parent' mechanic for profiles allows for one profile to be
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
a child of a previous/older profile, enabling all its extensions (and
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
5
the parent profile itself) and sparing us from tediously listing all
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
extensions for every profile.
7
8
This works fine for u-mode profiles. For s-mode profiles this is not
9
enough: a s-mode profile extends not only his equivalent u-mode profile
10
but also the previous s-mode profile. This means, for example, that
11
RVA23S64 extends both RVA23U64 and RVA22S64.
12
13
To fit this usage, rename the existing 'parent' to 'u_parent' and add a
14
new 's_parent' attribute for profiles. Handle both like we were doing
15
with the previous 'parent' attribute, i.e. if set, enable it. This
16
change does nothing for the existing profiles but will make RVA23S64
17
simpler.
18
19
Suggested-by: Andrew Jones <ajones@ventanamicro.com>
20
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
21
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-Id: <20220211043920.28981-2-liweiwei@iscas.ac.cn>
23
Message-ID: <20250115184316.2344583-4-dbarboza@ventanamicro.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
25
---
11
target/riscv/cpu.h | 4 ++++
26
target/riscv/cpu.h | 3 ++-
12
target/riscv/cpu.c | 12 ++++++++++++
27
target/riscv/cpu.c | 6 ++++--
13
2 files changed, 16 insertions(+)
28
target/riscv/tcg/tcg-cpu.c | 35 ++++++++++++++++++++++++++---------
29
3 files changed, 32 insertions(+), 12 deletions(-)
14
30
15
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
31
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
16
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.h
33
--- a/target/riscv/cpu.h
18
+++ b/target/riscv/cpu.h
34
+++ b/target/riscv/cpu.h
19
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
35
@@ -XXX,XX +XXX,XX @@ const char *riscv_get_misa_ext_description(uint32_t bit);
20
bool ext_svinval;
36
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
21
bool ext_svnapot;
37
22
bool ext_svpbmt;
38
typedef struct riscv_cpu_profile {
23
+ bool ext_zdinx;
39
- struct riscv_cpu_profile *parent;
24
bool ext_zfh;
40
+ struct riscv_cpu_profile *u_parent;
25
bool ext_zfhmin;
41
+ struct riscv_cpu_profile *s_parent;
26
+ bool ext_zfinx;
42
const char *name;
27
+ bool ext_zhinx;
43
uint32_t misa_ext;
28
+ bool ext_zhinxmin;
44
bool enabled;
29
bool ext_zve32f;
30
bool ext_zve64f;
31
32
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
45
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
33
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu.c
47
--- a/target/riscv/cpu.c
35
+++ b/target/riscv/cpu.c
48
+++ b/target/riscv/cpu.c
36
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
49
@@ -XXX,XX +XXX,XX @@ static const PropertyInfo prop_marchid = {
37
cpu->cfg.ext_d = true;
50
* doesn't need to be manually enabled by the profile.
38
}
51
*/
39
52
static RISCVCPUProfile RVA22U64 = {
40
+ if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
53
- .parent = NULL,
41
+ cpu->cfg.ext_zhinxmin) {
54
+ .u_parent = NULL,
42
+ cpu->cfg.ext_zfinx = true;
55
+ .s_parent = NULL,
43
+ }
56
.name = "rva22u64",
57
.misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
58
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
59
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA22U64 = {
60
* The remaining features/extensions comes from RVA22U64.
61
*/
62
static RISCVCPUProfile RVA22S64 = {
63
- .parent = &RVA22U64,
64
+ .u_parent = &RVA22U64,
65
+ .s_parent = NULL,
66
.name = "rva22s64",
67
.misa_ext = RVS,
68
.priv_spec = PRIV_VERSION_1_12_0,
69
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/riscv/tcg/tcg-cpu.c
72
+++ b/target/riscv/tcg/tcg-cpu.c
73
@@ -XXX,XX +XXX,XX @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
74
}
75
#endif
76
77
+static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
78
+ RISCVCPUProfile *profile,
79
+ RISCVCPUProfile *parent)
80
+{
81
+ const char *parent_name;
82
+ bool parent_enabled;
44
+
83
+
45
/* Set the ISA extensions, checks should have happened above */
84
+ if (!profile->enabled || !parent) {
46
if (cpu->cfg.ext_i) {
85
+ return;
47
ext |= RVI;
86
+ }
48
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
87
+
49
if (cpu->cfg.ext_j) {
88
+ parent_name = parent->name;
50
ext |= RVJ;
89
+ parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
51
}
90
+ profile->enabled = parent_enabled;
52
+ if (cpu->cfg.ext_zfinx && ((ext & (RVF | RVD)) || cpu->cfg.ext_zfh ||
91
+}
53
+ cpu->cfg.ext_zfhmin)) {
92
+
54
+ error_setg(errp,
93
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
55
+ "'Zfinx' cannot be supported together with 'F', 'D', 'Zfh',"
94
RISCVCPUProfile *profile)
56
+ " 'Zfhmin'");
95
{
57
+ return;
96
CPURISCVState *env = &cpu->env;
58
+ }
97
const char *warn_msg = "Profile %s mandates disabled extension %s";
59
98
bool send_warn = profile->user_set && profile->enabled;
60
set_misa(env, env->misa_mxl, ext);
99
- bool parent_enabled, profile_impl = true;
100
+ bool profile_impl = true;
101
int i;
102
103
#ifndef CONFIG_USER_ONLY
104
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
105
106
profile->enabled = profile_impl;
107
108
- if (profile->parent != NULL) {
109
- parent_enabled = object_property_get_bool(OBJECT(cpu),
110
- profile->parent->name,
111
- NULL);
112
- profile->enabled = profile->enabled && parent_enabled;
113
- }
114
+ riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
115
+ riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
116
}
117
118
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
119
@@ -XXX,XX +XXX,XX @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
120
profile->user_set = true;
121
profile->enabled = value;
122
123
- if (profile->parent != NULL) {
124
- object_property_set_bool(obj, profile->parent->name,
125
+ if (profile->u_parent != NULL) {
126
+ object_property_set_bool(obj, profile->u_parent->name,
127
+ profile->enabled, NULL);
128
+ }
129
+
130
+ if (profile->s_parent != NULL) {
131
+ object_property_set_bool(obj, profile->s_parent->name,
132
profile->enabled, NULL);
61
}
133
}
134
62
--
135
--
63
2.35.1
136
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
The S profiles do a priv_ver check during validation to see if the
4
running priv_ver is compatible with it. This check is done by comparing
5
if the running priv_ver is equal to the priv_ver the profile specifies.
6
7
There is an universe where we added RVA23S64 support based on both
8
RVA23U64 and RVA22S64 and this error is being thrown:
9
10
qemu-system-riscv64: warning: Profile rva22s64 requires
11
priv spec v1.12.0, but priv ver v1.13.0 was set
12
13
We're enabling RVA22S64 (priv_ver 1.12) as a dependency of RVA23S64
14
(priv_ver 1.13) and complaining to users about what we did ourselves.
15
16
There's no drawback in allowing a profile to run in an env that has a
17
priv_ver newer than it's required by it. So, like Hiro Nakamura saves
18
the future by changing the past, change the priv_ver check now to allow
19
profiles to run in a newer priv_ver. This universe will have one less
20
warning to deal with.
21
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
23
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Message-ID: <20250115184316.2344583-5-dbarboza@ventanamicro.com>
26
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
27
---
28
target/riscv/tcg/tcg-cpu.c | 2 +-
29
1 file changed, 1 insertion(+), 1 deletion(-)
30
31
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/tcg/tcg-cpu.c
34
+++ b/target/riscv/tcg/tcg-cpu.c
35
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
36
#endif
37
38
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
39
- profile->priv_spec != env->priv_ver) {
40
+ profile->priv_spec > env->priv_ver) {
41
profile_impl = false;
42
43
if (send_warn) {
44
--
45
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Add RVA23U64 as described in [1]. Add it as a child of RVA22U64 since
4
all RVA22U64 mandatory extensions are also present in RVA23U64. What's
5
left then is to list the mandatory extensions that are RVA23 only.
6
7
A new "rva23u64" CPU is also added.
8
9
[1] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-ID: <20250115184316.2344583-6-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/cpu-qom.h | 1 +
18
target/riscv/cpu.c | 33 +++++++++++++++++++++++++++++++++
19
2 files changed, 34 insertions(+)
20
21
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu-qom.h
24
+++ b/target/riscv/cpu-qom.h
25
@@ -XXX,XX +XXX,XX @@
26
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
27
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
28
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
29
+#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
30
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
31
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
32
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
33
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/cpu.c
36
+++ b/target/riscv/cpu.c
37
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA22S64 = {
38
}
39
};
40
41
+/*
42
+ * All mandatory extensions from RVA22U64 are present
43
+ * in RVA23U64 so set RVA22 as a parent. We need to
44
+ * declare just the newly added mandatory extensions.
45
+ */
46
+static RISCVCPUProfile RVA23U64 = {
47
+ .u_parent = &RVA22U64,
48
+ .s_parent = NULL,
49
+ .name = "rva23u64",
50
+ .misa_ext = RVV,
51
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
52
+ .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
53
+ .ext_offsets = {
54
+ CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
55
+ CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
56
+ CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
57
+ CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
58
+ CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
59
+ CPU_CFG_OFFSET(ext_supm),
60
+
61
+ RISCV_PROFILE_EXT_LIST_END
62
+ }
63
+};
64
+
65
RISCVCPUProfile *riscv_profiles[] = {
66
&RVA22U64,
67
&RVA22S64,
68
+ &RVA23U64,
69
NULL,
70
};
71
72
@@ -XXX,XX +XXX,XX @@ static void rva22s64_profile_cpu_init(Object *obj)
73
74
RVA22S64.enabled = true;
75
}
76
+
77
+static void rva23u64_profile_cpu_init(Object *obj)
78
+{
79
+ rv64i_bare_cpu_init(obj);
80
+
81
+ RVA23U64.enabled = true;
82
+}
83
#endif
84
85
static const gchar *riscv_gdb_arch_name(CPUState *cs)
86
@@ -XXX,XX +XXX,XX @@ static const TypeInfo riscv_cpu_type_infos[] = {
87
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
88
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
89
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
90
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
91
#endif /* TARGET_RISCV64 */
92
};
93
94
--
95
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Add RVA23S64 as described in [1]. This profile inherits all mandatory
4
extensions of RVA23U64 and RVA22S64, making it a child of both profiles.
5
6
A new "rva23s64" profile CPU is also added. This is the generated
7
riscv,isa for it (taken via -M dumpdtb):
8
9
rv64imafdcbvh_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_
10
ziccrse_zicond_zicntr_zicsr_zifencei_zihintntl_zihintpause_zihpm_zimop_
11
zmmul_za64rs_zaamo_zalrsc_zawrs_zfa_zfhmin_zca_zcb_zcd_zcmop_zba_zbb_zbs_
12
zkt_zvbb_zve32f_zve32x_zve64f_zve64d_zve64x_zvfhmin_zvkb_zvkt_shcounterenw_
13
sha_shgatpa_shtvala_shvsatpa_shvstvala_shvstvecd_smnpm_smstateen_ssccptr_
14
sscofpmf_sscounterenw_ssnpm_ssstateen_sstc_sstvala_sstvecd_ssu64xl_
15
supm_svade_svinval_svnapot_svpbmt
16
17
[1] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc
18
19
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
20
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
21
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Message-ID: <20250115184316.2344583-7-dbarboza@ventanamicro.com>
23
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
---
25
target/riscv/cpu-qom.h | 1 +
26
target/riscv/cpu.c | 39 +++++++++++++++++++++++++++++++++++++++
27
2 files changed, 40 insertions(+)
28
29
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu-qom.h
32
+++ b/target/riscv/cpu-qom.h
33
@@ -XXX,XX +XXX,XX @@
34
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
35
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
36
#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
37
+#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
38
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
39
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
40
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
41
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/riscv/cpu.c
44
+++ b/target/riscv/cpu.c
45
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA23U64 = {
46
}
47
};
48
49
+/*
50
+ * As with RVA23U64, RVA23S64 also defines 'named features'.
51
+ *
52
+ * Cache related features that we consider enabled since we don't
53
+ * implement cache: Ssccptr
54
+ *
55
+ * Other named features that we already implement: Sstvecd, Sstvala,
56
+ * Sscounterenw, Ssu64xl
57
+ *
58
+ * The remaining features/extensions comes from RVA23S64.
59
+ */
60
+static RISCVCPUProfile RVA23S64 = {
61
+ .u_parent = &RVA23U64,
62
+ .s_parent = &RVA22S64,
63
+ .name = "rva23s64",
64
+ .misa_ext = RVS,
65
+ .priv_spec = PRIV_VERSION_1_13_0,
66
+ .satp_mode = VM_1_10_SV39,
67
+ .ext_offsets = {
68
+ /* New in RVA23S64 */
69
+ CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
70
+ CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
71
+
72
+ /* Named features: Sha */
73
+ CPU_CFG_OFFSET(ext_sha),
74
+
75
+ RISCV_PROFILE_EXT_LIST_END
76
+ }
77
+};
78
+
79
RISCVCPUProfile *riscv_profiles[] = {
80
&RVA22U64,
81
&RVA22S64,
82
&RVA23U64,
83
+ &RVA23S64,
84
NULL,
85
};
86
87
@@ -XXX,XX +XXX,XX @@ static void rva23u64_profile_cpu_init(Object *obj)
88
89
RVA23U64.enabled = true;
90
}
91
+
92
+static void rva23s64_profile_cpu_init(Object *obj)
93
+{
94
+ rv64i_bare_cpu_init(obj);
95
+
96
+ RVA23S64.enabled = true;
97
+}
98
#endif
99
100
static const gchar *riscv_gdb_arch_name(CPUState *cs)
101
@@ -XXX,XX +XXX,XX @@ static const TypeInfo riscv_cpu_type_infos[] = {
102
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
103
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
104
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
105
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init),
106
#endif /* TARGET_RISCV64 */
107
};
108
109
--
110
2.48.1
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
Initially, the IOMMU would create a thread, but this thread was removed in
4
the merged version. The struct members for thread control should have been
5
removed as well, but they were not removed in commit 0c54acb8243
6
("hw/riscv: add RISC-V IOMMU base emulation").
7
8
Signed-off-by: Jason Chien <jason.chien@sifive.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250115141730.30858-1-jason.chien@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
hw/riscv/riscv-iommu.h | 5 -----
16
1 file changed, 5 deletions(-)
17
18
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu.h
21
+++ b/hw/riscv/riscv-iommu.h
22
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
23
/* interrupt notifier */
24
void (*notify)(RISCVIOMMUState *iommu, unsigned vector);
25
26
- /* IOMMU State Machine */
27
- QemuThread core_proc; /* Background processing thread */
28
- QemuCond core_cond; /* Background processing wake up signal */
29
- unsigned core_exec; /* Processing thread execution actions */
30
-
31
/* IOMMU target address space */
32
AddressSpace *target_as;
33
MemoryRegion *target_mr;
34
--
35
2.48.1
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
The header contains duplicate macro definitions.
4
This commit eliminates the duplicate part.
5
6
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250115141730.30858-2-jason.chien@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
hw/riscv/riscv-iommu-bits.h | 22 ++++++----------------
14
1 file changed, 6 insertions(+), 16 deletions(-)
15
16
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/riscv/riscv-iommu-bits.h
19
+++ b/hw/riscv/riscv-iommu-bits.h
20
@@ -XXX,XX +XXX,XX @@ struct riscv_iommu_pq_record {
21
#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
22
#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
23
#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
24
+
25
/* Payload fields */
26
+#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
27
+#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
28
+#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
29
#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
30
+#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
31
+#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
32
33
/* Common field positions */
34
#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10)
35
@@ -XXX,XX +XXX,XX @@ enum riscv_iommu_fq_ttypes {
36
RISCV_IOMMU_FW_TTYPE_PCIE_MSG_REQ = 9,
37
};
38
39
-/* Header fields */
40
-#define RISCV_IOMMU_PREQ_HDR_PID GENMASK_ULL(31, 12)
41
-#define RISCV_IOMMU_PREQ_HDR_PV BIT_ULL(32)
42
-#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
43
-#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
44
-#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
45
-
46
-/* Payload fields */
47
-#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
48
-#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
49
-#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
50
-#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
51
-#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
52
-#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
53
-
54
-
55
/*
56
* struct riscv_iommu_msi_pte - MSI Page Table Entry
57
*/
58
--
59
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rodrigo Dias Correa <r@drigo.nl>
1
2
3
Instead of migrating the raw tick_offset, goldfish_rtc migrates a
4
recalculated value based on QEMU_CLOCK_VIRTUAL. As QEMU_CLOCK_VIRTUAL
5
stands still across a save-and-restore cycle, the guest RTC becomes out
6
of sync with the host RTC when the VM is restored.
7
8
As described in the bug description, it looks like this calculation was
9
copied from pl031 RTC, which had its tick_offset migration fixed by
10
Commit 032cfe6a79c8 ("pl031: Correctly migrate state when using -rtc
11
clock=host").
12
13
Migrate the tick_offset directly, adding it as a version-dependent field
14
to VMState. Keep the old behavior when migrating from previous versions.
15
16
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2033
17
Signed-off-by: Rodrigo Dias Correa <r@drigo.nl>
18
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20250114212150.228241-1-r@drigo.nl>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
22
hw/rtc/goldfish_rtc.c | 43 +++++++++++++------------------------------
23
1 file changed, 13 insertions(+), 30 deletions(-)
24
25
diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/rtc/goldfish_rtc.c
28
+++ b/hw/rtc/goldfish_rtc.c
29
@@ -XXX,XX +XXX,XX @@ static void goldfish_rtc_write(void *opaque, hwaddr offset,
30
trace_goldfish_rtc_write(offset, value);
31
}
32
33
-static int goldfish_rtc_pre_save(void *opaque)
34
-{
35
- uint64_t delta;
36
- GoldfishRTCState *s = opaque;
37
-
38
- /*
39
- * We want to migrate this offset, which sounds straightforward.
40
- * Unfortunately, we cannot directly pass tick_offset because
41
- * rtc_clock on destination Host might not be same source Host.
42
- *
43
- * To tackle, this we pass tick_offset relative to vm_clock from
44
- * source Host and make it relative to rtc_clock at destination Host.
45
- */
46
- delta = qemu_clock_get_ns(rtc_clock) -
47
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
48
- s->tick_offset_vmstate = s->tick_offset + delta;
49
-
50
- return 0;
51
-}
52
-
53
static int goldfish_rtc_post_load(void *opaque, int version_id)
54
{
55
- uint64_t delta;
56
GoldfishRTCState *s = opaque;
57
58
- /*
59
- * We extract tick_offset from tick_offset_vmstate by doing
60
- * reverse math compared to pre_save() function.
61
- */
62
- delta = qemu_clock_get_ns(rtc_clock) -
63
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
64
- s->tick_offset = s->tick_offset_vmstate - delta;
65
+ if (version_id < 3) {
66
+ /*
67
+ * Previous versions didn't migrate tick_offset directly. Instead, they
68
+ * migrated tick_offset_vmstate, which is a recalculation based on
69
+ * QEMU_CLOCK_VIRTUAL. We use tick_offset_vmstate when migrating from
70
+ * older versions.
71
+ */
72
+ uint64_t delta = qemu_clock_get_ns(rtc_clock) -
73
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
74
+ s->tick_offset = s->tick_offset_vmstate - delta;
75
+ }
76
77
goldfish_rtc_set_alarm(s);
78
79
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps goldfish_rtc_ops[2] = {
80
81
static const VMStateDescription goldfish_rtc_vmstate = {
82
.name = TYPE_GOLDFISH_RTC,
83
- .version_id = 2,
84
- .pre_save = goldfish_rtc_pre_save,
85
+ .version_id = 3,
86
.post_load = goldfish_rtc_post_load,
87
.fields = (const VMStateField[]) {
88
VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState),
89
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription goldfish_rtc_vmstate = {
90
VMSTATE_UINT32(irq_pending, GoldfishRTCState),
91
VMSTATE_UINT32(irq_enabled, GoldfishRTCState),
92
VMSTATE_UINT32(time_high, GoldfishRTCState),
93
+ VMSTATE_UINT64_V(tick_offset, GoldfishRTCState, 3),
94
VMSTATE_END_OF_LIST()
95
}
96
};
97
--
98
2.48.1
diff view generated by jsdifflib
1
From: Anup Patel <anup.patel@wdc.com>
1
From: Vasilis Liaskovitis <vliaskovitis@suse.com>
2
2
3
To facilitate software development of RISC-V systems with large number
3
Add an "aliases" node with a "serial0" entry for the single UART
4
of HARTs, we increase the maximum number of allowed CPUs to 512 (2^9).
4
in the riscv virt machine.
5
5
6
We also add a detailed source level comments about limit defines which
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2774
7
impact the physical address space utilization.
7
Signed-off-by: Vasilis Liaskovitis <vliaskovitis@suse.com>
8
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Signed-off-by: Anup Patel <anup.patel@wdc.com>
10
Signed-off-by: Anup Patel <anup@brainfault.org>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Frank Chang <frank.chang@sifive.com>
10
Message-ID: <20250116161007.39710-1-vliaskovitis@suse.com>
13
Message-Id: <20220220085526.808674-6-anup@brainfault.org>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
12
---
16
include/hw/riscv/virt.h | 2 +-
13
hw/riscv/virt.c | 3 +++
17
hw/riscv/virt.c | 10 ++++++++++
14
1 file changed, 3 insertions(+)
18
2 files changed, 11 insertions(+), 1 deletion(-)
19
15
20
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/riscv/virt.h
23
+++ b/include/hw/riscv/virt.h
24
@@ -XXX,XX +XXX,XX @@
25
#include "hw/block/flash.h"
26
#include "qom/object.h"
27
28
-#define VIRT_CPUS_MAX_BITS 3
29
+#define VIRT_CPUS_MAX_BITS 9
30
#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
31
#define VIRT_SOCKETS_MAX_BITS 2
32
#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
33
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
16
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
34
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
35
--- a/hw/riscv/virt.c
18
--- a/hw/riscv/virt.c
36
+++ b/hw/riscv/virt.c
19
+++ b/hw/riscv/virt.c
37
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
38
#include "hw/pci-host/gpex.h"
21
}
39
#include "hw/display/ramfb.h"
22
40
23
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
41
+/*
24
+ qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name);
42
+ * The virt machine physical address space used by some of the devices
25
}
43
+ * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets,
26
44
+ * number of CPUs, and number of IMSIC guest files.
27
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
45
+ *
28
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
46
+ * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS,
29
qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed",
47
+ * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization
30
rng_seed, sizeof(rng_seed));
48
+ * of virt machine physical address space.
31
49
+ */
32
+ qemu_fdt_add_subnode(ms->fdt, "/aliases");
50
+
33
+
51
#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
34
create_fdt_flash(s, memmap);
52
#if VIRT_IMSIC_GROUP_MAX_SIZE < \
35
create_fdt_fw_cfg(s, memmap);
53
IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
36
create_fdt_pmu(s);
54
--
37
--
55
2.35.1
38
2.48.1
diff view generated by jsdifflib
New patch
1
From: Alistair Francis <alistair23@gmail.com>
1
2
3
Bin Meng has been a long time contributor and maintainer for QEMU RISC-V
4
and has been very beneficial to the RISC-V ecosystem.
5
6
Unfortunately his email has started to bounce so this patch is removing
7
them from MAINTAINERS. If in the future Bin Meng wants to return we will
8
happily re-add them.
9
10
Note that I'm not removing Bin Meng as a "SD (Secure Card)" maintainer.
11
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Acked-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250128060546.1374394-1-alistair.francis@wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
MAINTAINERS | 5 +----
18
1 file changed, 1 insertion(+), 4 deletions(-)
19
20
diff --git a/MAINTAINERS b/MAINTAINERS
21
index XXXXXXX..XXXXXXX 100644
22
--- a/MAINTAINERS
23
+++ b/MAINTAINERS
24
@@ -XXX,XX +XXX,XX @@ F: tests/functional/test_ppc_74xx.py
25
RISC-V TCG CPUs
26
M: Palmer Dabbelt <palmer@dabbelt.com>
27
M: Alistair Francis <alistair.francis@wdc.com>
28
-M: Bin Meng <bmeng.cn@gmail.com>
29
R: Weiwei Li <liwei1518@gmail.com>
30
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
32
@@ -XXX,XX +XXX,XX @@ F: include/hw/riscv/opentitan.h
33
F: include/hw/*/ibex_*.h
34
35
Microchip PolarFire SoC Icicle Kit
36
-M: Bin Meng <bmeng.cn@gmail.com>
37
L: qemu-riscv@nongnu.org
38
S: Supported
39
F: docs/system/riscv/microchip-icicle-kit.rst
40
@@ -XXX,XX +XXX,XX @@ F: include/hw/char/shakti_uart.h
41
42
SiFive Machines
43
M: Alistair Francis <Alistair.Francis@wdc.com>
44
-M: Bin Meng <bmeng.cn@gmail.com>
45
M: Palmer Dabbelt <palmer@dabbelt.com>
46
L: qemu-riscv@nongnu.org
47
S: Supported
48
@@ -XXX,XX +XXX,XX @@ S: Orphan
49
F: hw/i386/amd_iommu.?
50
51
OpenSBI Firmware
52
-M: Bin Meng <bmeng.cn@gmail.com>
53
+L: qemu-riscv@nongnu.org
54
S: Supported
55
F: pc-bios/opensbi-*
56
F: .gitlab-ci.d/opensbi.yml
57
--
58
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
2
3
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Jason Chien <jason.chien@sifive.com>
6
Message-ID: <20250205-b4-ctr_upstream_v6-v6-1-439d8e06c8ef@rivosinc.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/insn32.decode | 1 -
10
target/riscv/insn_trans/trans_privileged.c.inc | 5 -----
11
2 files changed, 6 deletions(-)
12
13
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/insn32.decode
16
+++ b/target/riscv/insn32.decode
17
@@ -XXX,XX +XXX,XX @@ sret 0001000 00010 00000 000 00000 1110011
18
mret 0011000 00010 00000 000 00000 1110011
19
wfi 0001000 00101 00000 000 00000 1110011
20
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
21
-sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
22
23
# *** NMI ***
24
mnret 0111000 00010 00000 000 00000 1110011
25
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/insn_trans/trans_privileged.c.inc
28
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
29
@@ -XXX,XX +XXX,XX @@ static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
30
#endif
31
return false;
32
}
33
-
34
-static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
35
-{
36
- return false;
37
-}
38
--
39
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
2
3
The Control Transfer Records (CTR) extension provides a method to
4
record a limited branch history in register-accessible internal chip
5
storage.
6
7
This extension is similar to Arch LBR in x86 and BRBE in ARM.
8
The Extension has been stable and the latest release can be found here
9
https://github.com/riscv/riscv-control-transfer-records/releases/tag/v1.0_rc5
10
11
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20250205-b4-ctr_upstream_v6-v6-2-439d8e06c8ef@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu_bits.h | 145 ++++++++++++++++++++++++++++++++++++++++
17
1 file changed, 145 insertions(+)
18
19
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu_bits.h
22
+++ b/target/riscv/cpu_bits.h
23
@@ -XXX,XX +XXX,XX @@
24
#define CSR_SIEH 0x114
25
#define CSR_SIPH 0x154
26
27
+/* Machine-Level Control transfer records CSRs */
28
+#define CSR_MCTRCTL 0x34e
29
+
30
+/* Supervisor-Level Control transfer records CSRs */
31
+#define CSR_SCTRCTL 0x14e
32
+#define CSR_SCTRSTATUS 0x14f
33
+#define CSR_SCTRDEPTH 0x15f
34
+
35
+/* VS-Level Control transfer records CSRs */
36
+#define CSR_VSCTRCTL 0x24e
37
+
38
/* Hpervisor CSRs */
39
#define CSR_HSTATUS 0x600
40
#define CSR_HEDELEG 0x602
41
@@ -XXX,XX +XXX,XX @@
42
#define SMSTATEEN0_CS (1ULL << 0)
43
#define SMSTATEEN0_FCSR (1ULL << 1)
44
#define SMSTATEEN0_JVT (1ULL << 2)
45
+#define SMSTATEEN0_CTR (1ULL << 54)
46
#define SMSTATEEN0_P1P13 (1ULL << 56)
47
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
48
#define SMSTATEEN0_IMSIC (1ULL << 58)
49
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
50
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
51
#define HENVCFGH_STCE MENVCFGH_STCE
52
53
+/* Offsets for every pair of control bits per each priv level */
54
+#define XS_OFFSET 0ULL
55
+#define U_OFFSET 2ULL
56
+#define S_OFFSET 5ULL
57
+#define M_OFFSET 8ULL
58
+
59
+#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
60
+#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
61
+#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
62
+#define U_PM_INSN (PM_INSN << U_OFFSET)
63
+#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
64
+#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
65
+#define S_PM_INSN (PM_INSN << S_OFFSET)
66
+#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
67
+#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
68
+#define M_PM_INSN (PM_INSN << M_OFFSET)
69
+
70
+/* mmte CSR bits */
71
+#define MMTE_PM_XS_BITS PM_XS_BITS
72
+#define MMTE_U_PM_ENABLE U_PM_ENABLE
73
+#define MMTE_U_PM_CURRENT U_PM_CURRENT
74
+#define MMTE_U_PM_INSN U_PM_INSN
75
+#define MMTE_S_PM_ENABLE S_PM_ENABLE
76
+#define MMTE_S_PM_CURRENT S_PM_CURRENT
77
+#define MMTE_S_PM_INSN S_PM_INSN
78
+#define MMTE_M_PM_ENABLE M_PM_ENABLE
79
+#define MMTE_M_PM_CURRENT M_PM_CURRENT
80
+#define MMTE_M_PM_INSN M_PM_INSN
81
+#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
82
+ MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
83
+ MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
84
+ MMTE_PM_XS_BITS)
85
+
86
+/* (v)smte CSR bits */
87
+#define SMTE_PM_XS_BITS PM_XS_BITS
88
+#define SMTE_U_PM_ENABLE U_PM_ENABLE
89
+#define SMTE_U_PM_CURRENT U_PM_CURRENT
90
+#define SMTE_U_PM_INSN U_PM_INSN
91
+#define SMTE_S_PM_ENABLE S_PM_ENABLE
92
+#define SMTE_S_PM_CURRENT S_PM_CURRENT
93
+#define SMTE_S_PM_INSN S_PM_INSN
94
+#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
95
+ SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
96
+ SMTE_PM_XS_BITS)
97
+
98
+/* umte CSR bits */
99
+#define UMTE_U_PM_ENABLE U_PM_ENABLE
100
+#define UMTE_U_PM_CURRENT U_PM_CURRENT
101
+#define UMTE_U_PM_INSN U_PM_INSN
102
+#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
103
+
104
+/* CTR control register commom fields */
105
+#define XCTRCTL_U BIT_ULL(0)
106
+#define XCTRCTL_S BIT_ULL(1)
107
+#define XCTRCTL_RASEMU BIT_ULL(7)
108
+#define XCTRCTL_STE BIT_ULL(8)
109
+#define XCTRCTL_BPFRZ BIT_ULL(11)
110
+#define XCTRCTL_LCOFIFRZ BIT_ULL(12)
111
+#define XCTRCTL_EXCINH BIT_ULL(33)
112
+#define XCTRCTL_INTRINH BIT_ULL(34)
113
+#define XCTRCTL_TRETINH BIT_ULL(35)
114
+#define XCTRCTL_NTBREN BIT_ULL(36)
115
+#define XCTRCTL_TKBRINH BIT_ULL(37)
116
+#define XCTRCTL_INDCALLINH BIT_ULL(40)
117
+#define XCTRCTL_DIRCALLINH BIT_ULL(41)
118
+#define XCTRCTL_INDJMPINH BIT_ULL(42)
119
+#define XCTRCTL_DIRJMPINH BIT_ULL(43)
120
+#define XCTRCTL_CORSWAPINH BIT_ULL(44)
121
+#define XCTRCTL_RETINH BIT_ULL(45)
122
+#define XCTRCTL_INDLJMPINH BIT_ULL(46)
123
+#define XCTRCTL_DIRLJMPINH BIT_ULL(47)
124
+
125
+#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \
126
+ XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \
127
+ XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \
128
+ XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \
129
+ XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \
130
+ XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \
131
+ XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH)
132
+
133
+#define XCTRCTL_INH_START 32U
134
+
135
+/* CTR mctrctl bits */
136
+#define MCTRCTL_M BIT_ULL(2)
137
+#define MCTRCTL_MTE BIT_ULL(9)
138
+
139
+#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE)
140
+#define SCTRCTL_MASK XCTRCTL_MASK
141
+#define VSCTRCTL_MASK XCTRCTL_MASK
142
+
143
+/* sctrstatus CSR bits. */
144
+#define SCTRSTATUS_WRPTR_MASK 0xFF
145
+#define SCTRSTATUS_FROZEN BIT(31)
146
+#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN)
147
+
148
+/* sctrdepth CSR bits. */
149
+#define SCTRDEPTH_MASK 0x7
150
+#define SCTRDEPTH_MIN 0U /* 16 Entries. */
151
+#define SCTRDEPTH_MAX 4U /* 256 Entries. */
152
+
153
+#define CTR_ENTRIES_FIRST 0x200
154
+#define CTR_ENTRIES_LAST 0x2ff
155
+
156
+#define CTRSOURCE_VALID BIT(0)
157
+#define CTRTARGET_MISP BIT(0)
158
+
159
+#define CTRDATA_TYPE_MASK 0xF
160
+#define CTRDATA_CCV BIT(15)
161
+#define CTRDATA_CCM_MASK 0xFFF0000
162
+#define CTRDATA_CCE_MASK 0xF0000000
163
+
164
+#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \
165
+ CTRDATA_CCM_MASK | CTRDATA_CCE_MASK)
166
+
167
+typedef enum CTRType {
168
+ CTRDATA_TYPE_NONE = 0,
169
+ CTRDATA_TYPE_EXCEPTION = 1,
170
+ CTRDATA_TYPE_INTERRUPT = 2,
171
+ CTRDATA_TYPE_EXCEP_INT_RET = 3,
172
+ CTRDATA_TYPE_NONTAKEN_BRANCH = 4,
173
+ CTRDATA_TYPE_TAKEN_BRANCH = 5,
174
+ CTRDATA_TYPE_RESERVED_0 = 6,
175
+ CTRDATA_TYPE_RESERVED_1 = 7,
176
+ CTRDATA_TYPE_INDIRECT_CALL = 8,
177
+ CTRDATA_TYPE_DIRECT_CALL = 9,
178
+ CTRDATA_TYPE_INDIRECT_JUMP = 10,
179
+ CTRDATA_TYPE_DIRECT_JUMP = 11,
180
+ CTRDATA_TYPE_CO_ROUTINE_SWAP = 12,
181
+ CTRDATA_TYPE_RETURN = 13,
182
+ CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14,
183
+ CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15,
184
+} CTRType;
185
+
186
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
187
#define ISELECT_IPRIO0 0x30
188
#define ISELECT_IPRIO15 0x3f
189
--
190
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
2
3
This commit adds support for [m|s|vs]ctrcontrol, sctrstatus and
4
sctrdepth CSRs handling.
5
6
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <20250205-b4-ctr_upstream_v6-v6-3-439d8e06c8ef@rivosinc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/cpu.h | 5 ++
12
target/riscv/cpu_cfg.h | 2 +
13
target/riscv/csr.c | 144 +++++++++++++++++++++++++++++++++++++++++
14
3 files changed, 151 insertions(+)
15
16
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.h
19
+++ b/target/riscv/cpu.h
20
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
21
target_ulong mcause;
22
target_ulong mtval; /* since: priv-1.10.0 */
23
24
+ uint64_t mctrctl;
25
+ uint32_t sctrdepth;
26
+ uint32_t sctrstatus;
27
+ uint64_t vsctrctl;
28
+
29
/* Machine and Supervisor interrupt priorities */
30
uint8_t miprio[64];
31
uint8_t siprio[64];
32
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_cfg.h
35
+++ b/target/riscv/cpu_cfg.h
36
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
37
bool ext_zvfhmin;
38
bool ext_smaia;
39
bool ext_ssaia;
40
+ bool ext_smctr;
41
+ bool ext_ssctr;
42
bool ext_sscofpmf;
43
bool ext_smepmp;
44
bool ext_smrnmi;
45
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/csr.c
48
+++ b/target/riscv/csr.c
49
@@ -XXX,XX +XXX,XX @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
50
return hmode(env, csrno);
51
}
52
53
+/*
54
+ * M-mode:
55
+ * Without ext_smctr raise illegal inst excep.
56
+ * Otherwise everything is accessible to m-mode.
57
+ *
58
+ * S-mode:
59
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
60
+ * Otherwise everything other than mctrctl is accessible.
61
+ *
62
+ * VS-mode:
63
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
64
+ * Without hstateen.ctr raise virtual illegal inst excep.
65
+ * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
66
+ * Always raise illegal instruction exception for sctrdepth.
67
+ */
68
+static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
69
+{
70
+ /* Check if smctr-ext is present */
71
+ if (riscv_cpu_cfg(env)->ext_smctr) {
72
+ return RISCV_EXCP_NONE;
73
+ }
74
+
75
+ return RISCV_EXCP_ILLEGAL_INST;
76
+}
77
+
78
+static RISCVException ctr_smode(CPURISCVState *env, int csrno)
79
+{
80
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
81
+
82
+ if (!cfg->ext_smctr && !cfg->ext_ssctr) {
83
+ return RISCV_EXCP_ILLEGAL_INST;
84
+ }
85
+
86
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
87
+ if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
88
+ env->virt_enabled) {
89
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
90
+ }
91
+
92
+ return ret;
93
+}
94
+
95
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
96
{
97
int ret;
98
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
99
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
100
}
101
102
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
103
+ wr_mask |= SMSTATEEN0_CTR;
104
+ }
105
+
106
return write_mstateen(env, csrno, wr_mask, new_val);
107
}
108
109
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
110
wr_mask |= SMSTATEEN0_P1P13;
111
}
112
113
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
114
+ wr_mask |= SMSTATEEN0_CTR;
115
+ }
116
+
117
return write_mstateenh(env, csrno, wr_mask, new_val);
118
}
119
120
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
121
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
122
}
123
124
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
125
+ wr_mask |= SMSTATEEN0_CTR;
126
+ }
127
+
128
return write_hstateen(env, csrno, wr_mask, new_val);
129
}
130
131
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
132
{
133
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
134
135
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
136
+ wr_mask |= SMSTATEEN0_CTR;
137
+ }
138
+
139
return write_hstateenh(env, csrno, wr_mask, new_val);
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
143
return RISCV_EXCP_NONE;
144
}
145
146
+static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
147
+ target_ulong *ret_val,
148
+ target_ulong new_val, target_ulong wr_mask)
149
+{
150
+ uint64_t mask = wr_mask & SCTRDEPTH_MASK;
151
+
152
+ if (ret_val) {
153
+ *ret_val = env->sctrdepth;
154
+ }
155
+
156
+ env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
157
+
158
+ /* Correct depth. */
159
+ if (mask) {
160
+ uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
161
+
162
+ if (depth > SCTRDEPTH_MAX) {
163
+ depth = SCTRDEPTH_MAX;
164
+ env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
165
+ }
166
+
167
+ /* Update sctrstatus.WRPTR with a legal value */
168
+ depth = 16 << depth;
169
+ env->sctrstatus =
170
+ env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
171
+ }
172
+
173
+ return RISCV_EXCP_NONE;
174
+}
175
+
176
+static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
177
+ target_ulong *ret_val,
178
+ target_ulong new_val, target_ulong wr_mask)
179
+{
180
+ uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
181
+ uint32_t mask = wr_mask & SCTRSTATUS_MASK;
182
+
183
+ if (ret_val) {
184
+ *ret_val = env->sctrstatus;
185
+ }
186
+
187
+ env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
188
+
189
+ /* Update sctrstatus.WRPTR with a legal value */
190
+ env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
191
+
192
+ return RISCV_EXCP_NONE;
193
+}
194
+
195
+static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
196
+ target_ulong *ret_val,
197
+ target_ulong new_val, target_ulong wr_mask)
198
+{
199
+ uint64_t csr_mask, mask = wr_mask;
200
+ uint64_t *ctl_ptr = &env->mctrctl;
201
+
202
+ if (csrno == CSR_MCTRCTL) {
203
+ csr_mask = MCTRCTL_MASK;
204
+ } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
205
+ csr_mask = SCTRCTL_MASK;
206
+ } else {
207
+ /*
208
+ * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
209
+ * or csrno == CSR_VSCTRCTL.
210
+ */
211
+ csr_mask = VSCTRCTL_MASK;
212
+ ctl_ptr = &env->vsctrctl;
213
+ }
214
+
215
+ mask &= csr_mask;
216
+
217
+ if (ret_val) {
218
+ *ret_val = *ctl_ptr & csr_mask;
219
+ }
220
+
221
+ *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
222
+
223
+ return RISCV_EXCP_NONE;
224
+}
225
+
226
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
227
target_ulong *val)
228
{
229
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
230
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
231
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
232
233
+ [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
234
+ [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
235
+ [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
236
+ [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
237
+ [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
238
+
239
/* Performance Counters */
240
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
241
[CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
242
--
243
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
2
2
3
- update extension check REQUIRE_ZFINX_OR_F
3
This commit adds logic to records CTR entries of different types
4
- update single float point register read/write
4
and adds required hooks in TCG and interrupt/Exception logic to
5
- disable nanbox_s check
5
record events.
6
6
7
Co-authored-by: ardxwe <ardxwe@gmail.com>
7
This commit also adds support to invoke freeze CTR logic for breakpoint
8
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
exceptions and counter overflow interrupts.
9
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-Id: <20220211043920.28981-4-liweiwei@iscas.ac.cn>
12
Message-ID: <20250205-b4-ctr_upstream_v6-v6-4-439d8e06c8ef@rivosinc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
14
---
15
target/riscv/helper.h | 2 +-
15
target/riscv/cpu.h | 7 +
16
target/riscv/internals.h | 16 +-
16
target/riscv/helper.h | 1 +
17
target/riscv/fpu_helper.c | 89 +++----
17
target/riscv/cpu_helper.c | 259 ++++++++++++++++++
18
target/riscv/translate.c | 93 ++++++-
18
target/riscv/op_helper.c | 19 ++
19
target/riscv/insn_trans/trans_rvf.c.inc | 314 ++++++++++++++++--------
19
target/riscv/translate.c | 46 ++++
20
5 files changed, 369 insertions(+), 145 deletions(-)
20
.../riscv/insn_trans/trans_privileged.c.inc | 2 +
21
target/riscv/insn_trans/trans_rvi.c.inc | 75 +++++
22
target/riscv/insn_trans/trans_rvzce.c.inc | 21 ++
23
8 files changed, 430 insertions(+)
21
24
25
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/cpu.h
28
+++ b/target/riscv/cpu.h
29
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
30
uint32_t sctrstatus;
31
uint64_t vsctrctl;
32
33
+ uint64_t ctr_src[16 << SCTRDEPTH_MAX];
34
+ uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
35
+ uint64_t ctr_data[16 << SCTRDEPTH_MAX];
36
+
37
/* Machine and Supervisor interrupt priorities */
38
uint8_t miprio[64];
39
uint8_t siprio[64];
40
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
41
42
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
43
44
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
45
+ enum CTRType type, target_ulong prev_priv, bool prev_virt);
46
+
47
void riscv_translate_init(void);
48
void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
49
int *max_insns, vaddr pc, void *host_pc);
22
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
50
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
23
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/helper.h
52
--- a/target/riscv/helper.h
25
+++ b/target/riscv/helper.h
53
+++ b/target/riscv/helper.h
26
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
54
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(wfi, void, env)
27
DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
55
DEF_HELPER_1(wrs_nto, void, env)
28
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
56
DEF_HELPER_1(tlb_flush, void, env)
29
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
57
DEF_HELPER_1(tlb_flush_all, void, env)
30
-DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
58
+DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl)
31
+DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64)
59
/* Native Debug */
32
60
DEF_HELPER_1(itrigger_match, void, env)
33
/* Floating Point - Double Precision */
61
#endif
34
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
62
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
35
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
63
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
64
--- a/target/riscv/cpu_helper.c
37
--- a/target/riscv/internals.h
65
+++ b/target/riscv/cpu_helper.c
38
+++ b/target/riscv/internals.h
66
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
39
@@ -XXX,XX +XXX,XX @@ enum {
67
}
40
RISCV_FRM_ROD = 8, /* Round to Odd */
68
}
41
};
69
42
70
+static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
43
-static inline uint64_t nanbox_s(float32 f)
71
+ bool virt)
44
+static inline uint64_t nanbox_s(CPURISCVState *env, float32 f)
72
+{
73
+ uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
74
+
75
+ assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
76
+
77
+ if (ctl & freeze_mask) {
78
+ env->sctrstatus |= SCTRSTATUS_FROZEN;
79
+ }
80
+}
81
+
82
+static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
83
+{
84
+ switch (priv) {
85
+ case PRV_M:
86
+ return MCTRCTL_M;
87
+ case PRV_S:
88
+ if (virt) {
89
+ return XCTRCTL_S;
90
+ }
91
+ return XCTRCTL_S;
92
+ case PRV_U:
93
+ if (virt) {
94
+ return XCTRCTL_U;
95
+ }
96
+ return XCTRCTL_U;
97
+ }
98
+
99
+ g_assert_not_reached();
100
+}
101
+
102
+static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
103
+ bool virt)
104
+{
105
+ switch (priv) {
106
+ case PRV_M:
107
+ return env->mctrctl;
108
+ case PRV_S:
109
+ case PRV_U:
110
+ if (virt) {
111
+ return env->vsctrctl;
112
+ }
113
+ return env->mctrctl;
114
+ }
115
+
116
+ g_assert_not_reached();
117
+}
118
+
119
+/*
120
+ * This function assumes that src privilege and target privilege are not same
121
+ * and src privilege is less than target privilege. This includes the virtual
122
+ * state as well.
123
+ */
124
+static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
125
+ bool src_virt)
126
+{
127
+ target_long tgt_prv = env->priv;
128
+ bool res = true;
129
+
130
+ /*
131
+ * VS and U mode are same in terms of xTE bits required to record an
132
+ * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
133
+ * Requirements. This changes VS to U to simplify the logic a bit.
134
+ */
135
+ if (src_virt && src_prv == PRV_S) {
136
+ src_prv = PRV_U;
137
+ } else if (env->virt_enabled && tgt_prv == PRV_S) {
138
+ tgt_prv = PRV_U;
139
+ }
140
+
141
+ /* VU mode is an outlier here. */
142
+ if (src_virt && src_prv == PRV_U) {
143
+ res &= !!(env->vsctrctl & XCTRCTL_STE);
144
+ }
145
+
146
+ switch (src_prv) {
147
+ case PRV_U:
148
+ if (tgt_prv == PRV_U) {
149
+ break;
150
+ }
151
+ res &= !!(env->mctrctl & XCTRCTL_STE);
152
+ /* fall-through */
153
+ case PRV_S:
154
+ if (tgt_prv == PRV_S) {
155
+ break;
156
+ }
157
+ res &= !!(env->mctrctl & MCTRCTL_MTE);
158
+ /* fall-through */
159
+ case PRV_M:
160
+ break;
161
+ }
162
+
163
+ return res;
164
+}
165
+
166
+/*
167
+ * Special cases for traps and trap returns:
168
+ *
169
+ * 1- Traps, and trap returns, between enabled modes are recorded as normal.
170
+ * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
171
+ * enabled mode back to an inhibited mode, are partially recorded. In such
172
+ * cases, the PC from the inhibited mode (source PC for traps, and target PC
173
+ * for trap returns) is 0.
174
+ *
175
+ * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
176
+ * Traps from an enabled mode to an inhibited mode, known as external traps,
177
+ * receive special handling.
178
+ * By default external traps are not recorded, but a handshake mechanism exists
179
+ * to allow partial recording. Software running in the target mode of the trap
180
+ * can opt-in to allowing CTR to record traps into that mode even when the mode
181
+ * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
182
+ * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
183
+ * x is the target privilege mode of the trap, will CTR record the trap. In such
184
+ * cases, the target PC is 0.
185
+ */
186
+/*
187
+ * CTR arrays are implemented as circular buffers and new entry is stored at
188
+ * sctrstatus.WRPTR, but they are presented to software as moving circular
189
+ * buffers. Which means, software get's the illusion that whenever a new entry
190
+ * is added the whole buffer is moved by one place and the new entry is added at
191
+ * the start keeping new entry at idx 0 and older ones follow.
192
+ *
193
+ * Depth = 16.
194
+ *
195
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
196
+ * WRPTR W
197
+ * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
198
+ *
199
+ * When a new entry is added:
200
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
201
+ * WRPTR W
202
+ * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
203
+ *
204
+ * entry here denotes the logical entry number that software can access
205
+ * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
206
+ * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
207
+ * buffer[7]. Here is how we convert entry to buffer idx.
208
+ *
209
+ * entry = isel - CTR_ENTRIES_FIRST;
210
+ * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
211
+ */
212
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
213
+ enum CTRType type, target_ulong src_priv, bool src_virt)
214
+{
215
+ bool tgt_virt = env->virt_enabled;
216
+ uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
217
+ uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
218
+ uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
219
+ uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
220
+ uint64_t depth, head;
221
+ bool ext_trap = false;
222
+
223
+ /*
224
+ * Return immediately if both target and src recording is disabled or if
225
+ * CTR is in frozen state.
226
+ */
227
+ if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
228
+ env->sctrstatus & SCTRSTATUS_FROZEN) {
229
+ return;
230
+ }
231
+
232
+ /*
233
+ * With RAS Emul enabled, only allow Indirect, direct calls, Function
234
+ * returns and Co-routine swap types.
235
+ */
236
+ if (tgt_ctrl & XCTRCTL_RASEMU &&
237
+ type != CTRDATA_TYPE_INDIRECT_CALL &&
238
+ type != CTRDATA_TYPE_DIRECT_CALL &&
239
+ type != CTRDATA_TYPE_RETURN &&
240
+ type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
241
+ return;
242
+ }
243
+
244
+ if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
245
+ /* Case 2 for traps. */
246
+ if (!(src_ctrl & src_mask)) {
247
+ src = 0;
248
+ } else if (!(tgt_ctrl & tgt_mask)) {
249
+ /* Check if target priv-mode has allowed external trap recording. */
250
+ if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
251
+ return;
252
+ }
253
+
254
+ ext_trap = true;
255
+ dst = 0;
256
+ }
257
+ } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
258
+ /*
259
+ * Case 3 for trap returns. Trap returns from inhibited mode are not
260
+ * recorded.
261
+ */
262
+ if (!(src_ctrl & src_mask)) {
263
+ return;
264
+ }
265
+
266
+ /* Case 2 for trap returns. */
267
+ if (!(tgt_ctrl & tgt_mask)) {
268
+ dst = 0;
269
+ }
270
+ }
271
+
272
+ /* Ignore filters in case of RASEMU mode or External trap. */
273
+ if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
274
+ /*
275
+ * Check if the specific type is inhibited. Not taken branch filter is
276
+ * an enable bit and needs to be checked separatly.
277
+ */
278
+ bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
279
+ if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
280
+ (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
281
+ return;
282
+ }
283
+ }
284
+
285
+ head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
286
+
287
+ depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
288
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
289
+ head = (head - 1) & (depth - 1);
290
+
291
+ env->ctr_src[head] &= ~CTRSOURCE_VALID;
292
+ env->sctrstatus =
293
+ set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
294
+ return;
295
+ }
296
+
297
+ /* In case of Co-routine SWAP we overwrite latest entry. */
298
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
299
+ head = (head - 1) & (depth - 1);
300
+ }
301
+
302
+ env->ctr_src[head] = src | CTRSOURCE_VALID;
303
+ env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
304
+ env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
305
+
306
+ head = (head + 1) & (depth - 1);
307
+
308
+ env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
309
+}
310
+
311
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
45
{
312
{
46
- return f | MAKE_64BIT_MASK(32, 32);
313
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
47
+ /* the value is sign-extended instead of NaN-boxing for zfinx */
314
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
48
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
315
!(env->mip & (1ULL << cause));
49
+ return (int32_t)f;
316
bool smode_double_trap = false;
50
+ } else {
317
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
51
+ return f | MAKE_64BIT_MASK(32, 32);
318
+ const bool prev_virt = env->virt_enabled;
52
+ }
319
+ const target_ulong prev_priv = env->priv;
320
target_ulong tval = 0;
321
target_ulong tinst = 0;
322
target_ulong htval = 0;
323
target_ulong mtval2 = 0;
324
+ target_ulong src;
325
int sxlen = 0;
326
int mxlen = 16 << riscv_cpu_mxl(env);
327
bool nnmi_excep = false;
328
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
329
env->pc = (env->stvec >> 2 << 2) +
330
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
331
riscv_cpu_set_mode(env, PRV_S, virt);
332
+
333
+ src = env->sepc;
334
} else {
335
/*
336
* If the hart encounters an exception while executing in M-mode
337
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
338
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
339
}
340
riscv_cpu_set_mode(env, PRV_M, virt);
341
+ src = env->mepc;
342
+ }
343
+
344
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
345
+ if (async && cause == IRQ_PMU_OVF) {
346
+ riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
347
+ } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
348
+ riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
349
+ }
350
+
351
+ riscv_ctr_add_entry(env, src, env->pc,
352
+ async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
353
+ prev_priv, prev_virt);
354
}
355
356
/*
357
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/riscv/op_helper.c
360
+++ b/target/riscv/op_helper.c
361
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
362
{
363
uint64_t mstatus;
364
target_ulong prev_priv, prev_virt = env->virt_enabled;
365
+ const target_ulong src_priv = env->priv;
366
+ const bool src_virt = env->virt_enabled;
367
368
if (!(env->priv >= PRV_S)) {
369
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
370
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
371
}
372
env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
373
374
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
375
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
376
+ src_priv, src_virt);
377
+ }
378
+
379
return retpc;
53
}
380
}
54
381
55
-static inline float32 check_nanbox_s(uint64_t f)
382
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mret(CPURISCVState *env)
56
+static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
383
}
384
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
385
386
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
387
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
388
+ PRV_M, false);
389
+ }
390
+
391
return retpc;
392
}
393
394
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mnret(CPURISCVState *env)
395
return retpc;
396
}
397
398
+void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
399
+ target_ulong dest, target_ulong type)
400
+{
401
+ riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
402
+ env->priv, env->virt_enabled);
403
+}
404
+
405
void helper_wfi(CPURISCVState *env)
57
{
406
{
58
+ /* Disable NaN-boxing check when enable zfinx */
407
CPUState *cs = env_cpu(env);
59
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
60
+ return (uint32_t)f;
61
+ }
62
+
63
uint64_t mask = MAKE_64BIT_MASK(32, 32);
64
65
if (likely((f & mask) == mask)) {
66
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/fpu_helper.c
69
+++ b/target/riscv/fpu_helper.c
70
@@ -XXX,XX +XXX,XX @@ static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
71
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
72
uint64_t rs3, int flags)
73
{
74
- float32 frs1 = check_nanbox_s(rs1);
75
- float32 frs2 = check_nanbox_s(rs2);
76
- float32 frs3 = check_nanbox_s(rs3);
77
- return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status));
78
+ float32 frs1 = check_nanbox_s(env, rs1);
79
+ float32 frs2 = check_nanbox_s(env, rs2);
80
+ float32 frs3 = check_nanbox_s(env, rs3);
81
+ return nanbox_s(env, float32_muladd(frs1, frs2, frs3, flags,
82
+ &env->fp_status));
83
}
84
85
uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
86
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fnmadd_h(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
87
88
uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
89
{
90
- float32 frs1 = check_nanbox_s(rs1);
91
- float32 frs2 = check_nanbox_s(rs2);
92
- return nanbox_s(float32_add(frs1, frs2, &env->fp_status));
93
+ float32 frs1 = check_nanbox_s(env, rs1);
94
+ float32 frs2 = check_nanbox_s(env, rs2);
95
+ return nanbox_s(env, float32_add(frs1, frs2, &env->fp_status));
96
}
97
98
uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
99
{
100
- float32 frs1 = check_nanbox_s(rs1);
101
- float32 frs2 = check_nanbox_s(rs2);
102
- return nanbox_s(float32_sub(frs1, frs2, &env->fp_status));
103
+ float32 frs1 = check_nanbox_s(env, rs1);
104
+ float32 frs2 = check_nanbox_s(env, rs2);
105
+ return nanbox_s(env, float32_sub(frs1, frs2, &env->fp_status));
106
}
107
108
uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
109
{
110
- float32 frs1 = check_nanbox_s(rs1);
111
- float32 frs2 = check_nanbox_s(rs2);
112
- return nanbox_s(float32_mul(frs1, frs2, &env->fp_status));
113
+ float32 frs1 = check_nanbox_s(env, rs1);
114
+ float32 frs2 = check_nanbox_s(env, rs2);
115
+ return nanbox_s(env, float32_mul(frs1, frs2, &env->fp_status));
116
}
117
118
uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
119
{
120
- float32 frs1 = check_nanbox_s(rs1);
121
- float32 frs2 = check_nanbox_s(rs2);
122
- return nanbox_s(float32_div(frs1, frs2, &env->fp_status));
123
+ float32 frs1 = check_nanbox_s(env, rs1);
124
+ float32 frs2 = check_nanbox_s(env, rs2);
125
+ return nanbox_s(env, float32_div(frs1, frs2, &env->fp_status));
126
}
127
128
uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
129
{
130
- float32 frs1 = check_nanbox_s(rs1);
131
- float32 frs2 = check_nanbox_s(rs2);
132
- return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
133
+ float32 frs1 = check_nanbox_s(env, rs1);
134
+ float32 frs2 = check_nanbox_s(env, rs2);
135
+ return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
136
float32_minnum(frs1, frs2, &env->fp_status) :
137
float32_minimum_number(frs1, frs2, &env->fp_status));
138
}
139
140
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
141
{
142
- float32 frs1 = check_nanbox_s(rs1);
143
- float32 frs2 = check_nanbox_s(rs2);
144
- return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ?
145
+ float32 frs1 = check_nanbox_s(env, rs1);
146
+ float32 frs2 = check_nanbox_s(env, rs2);
147
+ return nanbox_s(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
148
float32_maxnum(frs1, frs2, &env->fp_status) :
149
float32_maximum_number(frs1, frs2, &env->fp_status));
150
}
151
152
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
153
{
154
- float32 frs1 = check_nanbox_s(rs1);
155
- return nanbox_s(float32_sqrt(frs1, &env->fp_status));
156
+ float32 frs1 = check_nanbox_s(env, rs1);
157
+ return nanbox_s(env, float32_sqrt(frs1, &env->fp_status));
158
}
159
160
target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
161
{
162
- float32 frs1 = check_nanbox_s(rs1);
163
- float32 frs2 = check_nanbox_s(rs2);
164
+ float32 frs1 = check_nanbox_s(env, rs1);
165
+ float32 frs2 = check_nanbox_s(env, rs2);
166
return float32_le(frs1, frs2, &env->fp_status);
167
}
168
169
target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
170
{
171
- float32 frs1 = check_nanbox_s(rs1);
172
- float32 frs2 = check_nanbox_s(rs2);
173
+ float32 frs1 = check_nanbox_s(env, rs1);
174
+ float32 frs2 = check_nanbox_s(env, rs2);
175
return float32_lt(frs1, frs2, &env->fp_status);
176
}
177
178
target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
179
{
180
- float32 frs1 = check_nanbox_s(rs1);
181
- float32 frs2 = check_nanbox_s(rs2);
182
+ float32 frs1 = check_nanbox_s(env, rs1);
183
+ float32 frs2 = check_nanbox_s(env, rs2);
184
return float32_eq_quiet(frs1, frs2, &env->fp_status);
185
}
186
187
target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1)
188
{
189
- float32 frs1 = check_nanbox_s(rs1);
190
+ float32 frs1 = check_nanbox_s(env, rs1);
191
return float32_to_int32(frs1, &env->fp_status);
192
}
193
194
target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1)
195
{
196
- float32 frs1 = check_nanbox_s(rs1);
197
+ float32 frs1 = check_nanbox_s(env, rs1);
198
return (int32_t)float32_to_uint32(frs1, &env->fp_status);
199
}
200
201
target_ulong helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1)
202
{
203
- float32 frs1 = check_nanbox_s(rs1);
204
+ float32 frs1 = check_nanbox_s(env, rs1);
205
return float32_to_int64(frs1, &env->fp_status);
206
}
207
208
target_ulong helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1)
209
{
210
- float32 frs1 = check_nanbox_s(rs1);
211
+ float32 frs1 = check_nanbox_s(env, rs1);
212
return float32_to_uint64(frs1, &env->fp_status);
213
}
214
215
uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
216
{
217
- return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status));
218
+ return nanbox_s(env, int32_to_float32((int32_t)rs1, &env->fp_status));
219
}
220
221
uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
222
{
223
- return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status));
224
+ return nanbox_s(env, uint32_to_float32((uint32_t)rs1, &env->fp_status));
225
}
226
227
uint64_t helper_fcvt_s_l(CPURISCVState *env, target_ulong rs1)
228
{
229
- return nanbox_s(int64_to_float32(rs1, &env->fp_status));
230
+ return nanbox_s(env, int64_to_float32(rs1, &env->fp_status));
231
}
232
233
uint64_t helper_fcvt_s_lu(CPURISCVState *env, target_ulong rs1)
234
{
235
- return nanbox_s(uint64_to_float32(rs1, &env->fp_status));
236
+ return nanbox_s(env, uint64_to_float32(rs1, &env->fp_status));
237
}
238
239
-target_ulong helper_fclass_s(uint64_t rs1)
240
+target_ulong helper_fclass_s(CPURISCVState *env, uint64_t rs1)
241
{
242
- float32 frs1 = check_nanbox_s(rs1);
243
+ float32 frs1 = check_nanbox_s(env, rs1);
244
return fclass_s(frs1);
245
}
246
247
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
248
249
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
250
{
251
- return nanbox_s(float64_to_float32(rs1, &env->fp_status));
252
+ return nanbox_s(env, float64_to_float32(rs1, &env->fp_status));
253
}
254
255
uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
256
{
257
- float32 frs1 = check_nanbox_s(rs1);
258
+ float32 frs1 = check_nanbox_s(env, rs1);
259
return float32_to_float64(frs1, &env->fp_status);
260
}
261
262
@@ -XXX,XX +XXX,XX @@ uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
263
264
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
265
{
266
- float32 frs1 = check_nanbox_s(rs1);
267
+ float32 frs1 = check_nanbox_s(env, rs1);
268
return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
269
}
270
271
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
272
{
273
float16 frs1 = check_nanbox_h(rs1);
274
- return nanbox_s(float16_to_float32(frs1, true, &env->fp_status));
275
+ return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status));
276
}
277
278
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
279
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
408
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
280
index XXXXXXX..XXXXXXX 100644
409
index XXXXXXX..XXXXXXX 100644
281
--- a/target/riscv/translate.c
410
--- a/target/riscv/translate.c
282
+++ b/target/riscv/translate.c
411
+++ b/target/riscv/translate.c
283
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
412
@@ -XXX,XX +XXX,XX @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
284
TCGv zero;
285
/* Space for 3 operands plus 1 extra for address computation. */
286
TCGv temp[4];
287
+ /* Space for 4 operands(1 dest and <=3 src) for float point computation */
288
+ TCGv_i64 ftemp[4];
289
+ uint8_t nftemp;
290
/* PointerMasking extension */
291
bool pm_mask_enabled;
292
bool pm_base_enabled;
293
@@ -XXX,XX +XXX,XX @@ static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
294
}
413
}
295
}
414
}
296
415
297
+static TCGv_i64 ftemp_new(DisasContext *ctx)
416
+#ifndef CONFIG_USER_ONLY
298
+{
417
+/*
299
+ assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp));
418
+ * Direct calls
300
+ return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64();
419
+ * - jal x1;
301
+}
420
+ * - jal x5;
302
+
421
+ * - c.jal.
303
+static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
422
+ * - cm.jalt.
304
+{
423
+ *
305
+ if (!ctx->cfg_ptr->ext_zfinx) {
424
+ * Direct jumps
306
+ return cpu_fpr[reg_num];
425
+ * - jal x0;
307
+ }
426
+ * - c.j;
308
+
427
+ * - cm.jt.
309
+ if (reg_num == 0) {
428
+ *
310
+ return tcg_constant_i64(0);
429
+ * Other direct jumps
311
+ }
430
+ * - jal rd where rd != x1 and rd != x5 and rd != x0;
312
+ switch (get_xl(ctx)) {
431
+ */
313
+ case MXL_RV32:
432
+static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm)
314
+#ifdef TARGET_RISCV32
433
+{
315
+ {
434
+ TCGv dest = tcg_temp_new();
316
+ TCGv_i64 t = ftemp_new(ctx);
435
+ TCGv src = tcg_temp_new();
317
+ tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]);
436
+ TCGv type;
318
+ return t;
437
+
319
+ }
438
+ /*
320
+#else
439
+ * If rd is x1 or x5 link registers, treat this as direct call otherwise
321
+ /* fall through */
440
+ * its a direct jump.
322
+ case MXL_RV64:
441
+ */
323
+ return cpu_gpr[reg_num];
442
+ if (rd == 1 || rd == 5) {
324
+#endif
443
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
325
+ default:
444
+ } else if (rd == 0) {
326
+ g_assert_not_reached();
445
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
327
+ }
446
+ } else {
328
+}
447
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP);
329
+
448
+ }
330
+static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
449
+
331
+{
450
+ gen_pc_plus_diff(dest, ctx, imm);
332
+ if (!ctx->cfg_ptr->ext_zfinx) {
451
+ gen_pc_plus_diff(src, ctx, 0);
333
+ return cpu_fpr[reg_num];
452
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
334
+ }
453
+}
335
+
454
+#endif
336
+ if (reg_num == 0) {
337
+ return ftemp_new(ctx);
338
+ }
339
+
340
+ switch (get_xl(ctx)) {
341
+ case MXL_RV32:
342
+ return ftemp_new(ctx);
343
+#ifdef TARGET_RISCV64
344
+ case MXL_RV64:
345
+ return cpu_gpr[reg_num];
346
+#endif
347
+ default:
348
+ g_assert_not_reached();
349
+ }
350
+}
351
+
352
+/* assume t is nanboxing (for normal) or sign-extended (for zfinx) */
353
+static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
354
+{
355
+ if (!ctx->cfg_ptr->ext_zfinx) {
356
+ tcg_gen_mov_i64(cpu_fpr[reg_num], t);
357
+ return;
358
+ }
359
+ if (reg_num != 0) {
360
+ switch (get_xl(ctx)) {
361
+ case MXL_RV32:
362
+#ifdef TARGET_RISCV32
363
+ tcg_gen_extrl_i64_i32(cpu_gpr[reg_num], t);
364
+ break;
365
+#else
366
+ /* fall through */
367
+ case MXL_RV64:
368
+ tcg_gen_mov_i64(cpu_gpr[reg_num], t);
369
+ break;
370
+#endif
371
+ default:
372
+ g_assert_not_reached();
373
+ }
374
+ }
375
+}
376
+
455
+
377
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
456
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
378
{
457
{
379
target_ulong next_pc;
458
TCGv succ_pc = dest_gpr(ctx, rd);
380
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
459
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
381
ctx->cs = cs;
460
}
382
ctx->ntemp = 0;
461
}
383
memset(ctx->temp, 0, sizeof(ctx->temp));
462
384
+ ctx->nftemp = 0;
463
+#ifndef CONFIG_USER_ONLY
385
+ memset(ctx->ftemp, 0, sizeof(ctx->ftemp));
464
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
386
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
465
+ gen_ctr_jal(ctx, rd, imm);
387
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
466
+ }
388
ctx->zero = tcg_constant_tl(0);
467
+#endif
389
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
468
+
390
DisasContext *ctx = container_of(dcbase, DisasContext, base);
469
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
391
CPURISCVState *env = cpu->env_ptr;
470
gen_set_gpr(ctx, rd, succ_pc);
392
uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
471
393
+ int i;
472
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
394
473
index XXXXXXX..XXXXXXX 100644
395
ctx->ol = ctx->xl;
474
--- a/target/riscv/insn_trans/trans_privileged.c.inc
396
decode_opc(env, ctx, opcode16);
475
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
397
ctx->base.pc_next = ctx->pc_succ_insn;
476
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
398
477
if (has_ext(ctx, RVS)) {
399
- for (int i = ctx->ntemp - 1; i >= 0; --i) {
478
decode_save_opc(ctx, 0);
400
+ for (i = ctx->ntemp - 1; i >= 0; --i) {
479
translator_io_start(&ctx->base);
401
tcg_temp_free(ctx->temp[i]);
480
+ gen_update_pc(ctx, 0);
402
ctx->temp[i] = NULL;
481
gen_helper_sret(cpu_pc, tcg_env);
403
}
482
exit_tb(ctx); /* no chaining */
404
ctx->ntemp = 0;
483
ctx->base.is_jmp = DISAS_NORETURN;
405
+ for (i = ctx->nftemp - 1; i >= 0; --i) {
484
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
406
+ tcg_temp_free_i64(ctx->ftemp[i]);
485
#ifndef CONFIG_USER_ONLY
407
+ ctx->ftemp[i] = NULL;
486
decode_save_opc(ctx, 0);
408
+ }
487
translator_io_start(&ctx->base);
409
+ ctx->nftemp = 0;
488
+ gen_update_pc(ctx, 0);
410
489
gen_helper_mret(cpu_pc, tcg_env);
411
if (ctx->base.is_jmp == DISAS_NEXT) {
490
exit_tb(ctx); /* no chaining */
412
target_ulong page_start;
491
ctx->base.is_jmp = DISAS_NORETURN;
413
diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc
492
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
414
index XXXXXXX..XXXXXXX 100644
493
index XXXXXXX..XXXXXXX 100644
415
--- a/target/riscv/insn_trans/trans_rvf.c.inc
494
--- a/target/riscv/insn_trans/trans_rvi.c.inc
416
+++ b/target/riscv/insn_trans/trans_rvf.c.inc
495
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
417
@@ -XXX,XX +XXX,XX @@
496
@@ -XXX,XX +XXX,XX @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
418
419
#define REQUIRE_FPU do {\
420
if (ctx->mstatus_fs == 0) \
421
- return false; \
422
+ if (!ctx->cfg_ptr->ext_zfinx) \
423
+ return false; \
424
+} while (0)
425
+
426
+#define REQUIRE_ZFINX_OR_F(ctx) do {\
427
+ if (!ctx->cfg_ptr->ext_zfinx) { \
428
+ REQUIRE_EXT(ctx, RVF); \
429
+ } \
430
} while (0)
431
432
static bool trans_flw(DisasContext *ctx, arg_flw *a)
433
@@ -XXX,XX +XXX,XX @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a)
434
static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
435
{
436
REQUIRE_FPU;
437
- REQUIRE_EXT(ctx, RVF);
438
+ REQUIRE_ZFINX_OR_F(ctx);
439
+
440
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
441
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
442
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
443
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
444
+
445
gen_set_rm(ctx, a->rm);
446
- gen_helper_fmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
447
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
448
+ gen_helper_fmadd_s(dest, cpu_env, src1, src2, src3);
449
+ gen_set_fpr_hs(ctx, a->rd, dest);
450
mark_fs_dirty(ctx);
451
return true;
497
return true;
452
}
498
}
453
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_s(DisasContext *ctx, arg_fmadd_s *a)
499
454
static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
500
+#ifndef CONFIG_USER_ONLY
501
+/*
502
+ * Indirect calls
503
+ * - jalr x1, rs where rs != x5;
504
+ * - jalr x5, rs where rs != x1;
505
+ * - c.jalr rs1 where rs1 != x5;
506
+ *
507
+ * Indirect jumps
508
+ * - jalr x0, rs where rs != x1 and rs != x5;
509
+ * - c.jr rs1 where rs1 != x1 and rs1 != x5.
510
+ *
511
+ * Returns
512
+ * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
513
+ * - c.jr rs1 where rs1 == x1 or rs1 == x5.
514
+ *
515
+ * Co-routine swap
516
+ * - jalr x1, x5;
517
+ * - jalr x5, x1;
518
+ * - c.jalr x5.
519
+ *
520
+ * Other indirect jumps
521
+ * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
522
+ */
523
+static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
524
+{
525
+ TCGv src = tcg_temp_new();
526
+ TCGv type;
527
+
528
+ if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
529
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
530
+ } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
531
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
532
+ } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
533
+ type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
534
+ } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
535
+ type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
536
+ } else {
537
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
538
+ }
539
+
540
+ gen_pc_plus_diff(src, ctx, 0);
541
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
542
+}
543
+#endif
544
+
545
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
455
{
546
{
456
REQUIRE_FPU;
547
TCGLabel *misaligned = NULL;
457
- REQUIRE_EXT(ctx, RVF);
548
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
458
+ REQUIRE_ZFINX_OR_F(ctx);
549
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
459
+
550
gen_set_gpr(ctx, a->rd, succ_pc);
460
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
551
461
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
552
+#ifndef CONFIG_USER_ONLY
462
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
553
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
463
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
554
+ gen_ctr_jalr(ctx, a, target_pc);
464
+
555
+ }
465
gen_set_rm(ctx, a->rm);
556
+#endif
466
- gen_helper_fmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
557
+
467
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
558
tcg_gen_mov_tl(cpu_pc, target_pc);
468
+ gen_helper_fmsub_s(dest, cpu_env, src1, src2, src3);
559
if (ctx->fcfi_enabled) {
469
+ gen_set_fpr_hs(ctx, a->rd, dest);
560
/*
470
mark_fs_dirty(ctx);
561
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
471
return true;
562
} else {
472
}
563
tcg_gen_brcond_tl(cond, src1, src2, l);
473
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_s(DisasContext *ctx, arg_fmsub_s *a)
564
}
474
static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
565
+
475
{
566
+#ifndef CONFIG_USER_ONLY
476
REQUIRE_FPU;
567
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
477
- REQUIRE_EXT(ctx, RVF);
568
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
478
+ REQUIRE_ZFINX_OR_F(ctx);
569
+ TCGv dest = tcg_temp_new();
479
+
570
+ TCGv src = tcg_temp_new();
480
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
571
+
481
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
572
+ gen_pc_plus_diff(src, ctx, 0);
482
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
573
+ gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
483
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
574
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
484
+
575
+ }
485
gen_set_rm(ctx, a->rm);
576
+#endif
486
- gen_helper_fnmsub_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
577
+
487
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
578
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
488
+ gen_helper_fnmsub_s(dest, cpu_env, src1, src2, src3);
579
ctx->pc_save = orig_pc_save;
489
+ gen_set_fpr_hs(ctx, a->rd, dest);
580
490
mark_fs_dirty(ctx);
581
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
491
return true;
582
gen_pc_plus_diff(target_pc, ctx, a->imm);
492
}
583
gen_exception_inst_addr_mis(ctx, target_pc);
493
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_s(DisasContext *ctx, arg_fnmsub_s *a)
584
} else {
494
static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
585
+#ifndef CONFIG_USER_ONLY
495
{
586
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
496
REQUIRE_FPU;
587
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
497
- REQUIRE_EXT(ctx, RVF);
588
+ TCGv dest = tcg_temp_new();
498
+ REQUIRE_ZFINX_OR_F(ctx);
589
+ TCGv src = tcg_temp_new();
499
+
590
+
500
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
591
+ gen_pc_plus_diff(src, ctx, 0);
501
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
592
+ gen_pc_plus_diff(dest, ctx, a->imm);
502
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
593
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
503
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
594
+ }
504
+
595
+#endif
505
gen_set_rm(ctx, a->rm);
596
gen_goto_tb(ctx, 0, a->imm);
506
- gen_helper_fnmadd_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
597
}
507
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
598
ctx->pc_save = -1;
508
+ gen_helper_fnmadd_s(dest, cpu_env, src1, src2, src3);
599
diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc
509
+ gen_set_fpr_hs(ctx, a->rd, dest);
600
index XXXXXXX..XXXXXXX 100644
510
mark_fs_dirty(ctx);
601
--- a/target/riscv/insn_trans/trans_rvzce.c.inc
511
return true;
602
+++ b/target/riscv/insn_trans/trans_rvzce.c.inc
512
}
603
@@ -XXX,XX +XXX,XX @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
513
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_s(DisasContext *ctx, arg_fnmadd_s *a)
604
514
static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
605
if (ret) {
515
{
606
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
516
REQUIRE_FPU;
607
+#ifndef CONFIG_USER_ONLY
517
- REQUIRE_EXT(ctx, RVF);
608
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
518
+ REQUIRE_ZFINX_OR_F(ctx);
609
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
519
+
610
+ TCGv src = tcg_temp_new();
520
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
611
+ gen_pc_plus_diff(src, ctx, 0);
521
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
612
+ gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type);
522
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
613
+ }
523
614
+#endif
524
gen_set_rm(ctx, a->rm);
615
tcg_gen_mov_tl(cpu_pc, ret_addr);
525
- gen_helper_fadd_s(cpu_fpr[a->rd], cpu_env,
616
tcg_gen_lookup_and_goto_ptr();
526
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
617
ctx->base.is_jmp = DISAS_NORETURN;
527
+ gen_helper_fadd_s(dest, cpu_env, src1, src2);
618
@@ -XXX,XX +XXX,XX @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
528
+ gen_set_fpr_hs(ctx, a->rd, dest);
619
gen_set_gpr(ctx, xRA, succ_pc);
529
mark_fs_dirty(ctx);
620
}
530
return true;
621
531
}
622
+#ifndef CONFIG_USER_ONLY
532
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
623
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
533
static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
624
+ if (a->index >= 32) {
534
{
625
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
535
REQUIRE_FPU;
626
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
536
- REQUIRE_EXT(ctx, RVF);
537
+ REQUIRE_ZFINX_OR_F(ctx);
538
+
539
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
540
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
541
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
542
543
gen_set_rm(ctx, a->rm);
544
- gen_helper_fsub_s(cpu_fpr[a->rd], cpu_env,
545
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
546
+ gen_helper_fsub_s(dest, cpu_env, src1, src2);
547
+ gen_set_fpr_hs(ctx, a->rd, dest);
548
mark_fs_dirty(ctx);
549
return true;
550
}
551
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
552
static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
553
{
554
REQUIRE_FPU;
555
- REQUIRE_EXT(ctx, RVF);
556
+ REQUIRE_ZFINX_OR_F(ctx);
557
+
558
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
559
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
560
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
561
562
gen_set_rm(ctx, a->rm);
563
- gen_helper_fmul_s(cpu_fpr[a->rd], cpu_env,
564
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
565
+ gen_helper_fmul_s(dest, cpu_env, src1, src2);
566
+ gen_set_fpr_hs(ctx, a->rd, dest);
567
mark_fs_dirty(ctx);
568
return true;
569
}
570
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
571
static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
572
{
573
REQUIRE_FPU;
574
- REQUIRE_EXT(ctx, RVF);
575
+ REQUIRE_ZFINX_OR_F(ctx);
576
+
577
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
578
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
579
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
580
581
gen_set_rm(ctx, a->rm);
582
- gen_helper_fdiv_s(cpu_fpr[a->rd], cpu_env,
583
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
584
+ gen_helper_fdiv_s(dest, cpu_env, src1, src2);
585
+ gen_set_fpr_hs(ctx, a->rd, dest);
586
mark_fs_dirty(ctx);
587
return true;
588
}
589
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
590
static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
591
{
592
REQUIRE_FPU;
593
- REQUIRE_EXT(ctx, RVF);
594
+ REQUIRE_ZFINX_OR_F(ctx);
595
+
596
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
597
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
598
599
gen_set_rm(ctx, a->rm);
600
- gen_helper_fsqrt_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
601
+ gen_helper_fsqrt_s(dest, cpu_env, src1);
602
+ gen_set_fpr_hs(ctx, a->rd, dest);
603
mark_fs_dirty(ctx);
604
return true;
605
}
606
@@ -XXX,XX +XXX,XX @@ static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
607
static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a)
608
{
609
REQUIRE_FPU;
610
- REQUIRE_EXT(ctx, RVF);
611
+ REQUIRE_ZFINX_OR_F(ctx);
612
+
613
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
614
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
615
616
if (a->rs1 == a->rs2) { /* FMOV */
617
- gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
618
+ if (!ctx->cfg_ptr->ext_zfinx) {
619
+ gen_check_nanbox_s(dest, src1);
620
+ } else {
627
+ } else {
621
+ tcg_gen_ext32s_i64(dest, src1);
628
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
622
+ }
629
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
623
} else { /* FSGNJ */
630
+ }
624
- TCGv_i64 rs1 = tcg_temp_new_i64();
631
+ }
625
- TCGv_i64 rs2 = tcg_temp_new_i64();
632
+#endif
626
-
633
+
627
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
634
+
628
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
635
tcg_gen_mov_tl(cpu_pc, addr);
629
-
636
630
- /* This formulation retains the nanboxing of rs2. */
637
tcg_gen_lookup_and_goto_ptr();
631
- tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31);
632
- tcg_temp_free_i64(rs1);
633
- tcg_temp_free_i64(rs2);
634
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
635
+
636
+ if (!ctx->cfg_ptr->ext_zfinx) {
637
+ TCGv_i64 rs1 = tcg_temp_new_i64();
638
+ TCGv_i64 rs2 = tcg_temp_new_i64();
639
+ gen_check_nanbox_s(rs1, src1);
640
+ gen_check_nanbox_s(rs2, src2);
641
+
642
+ /* This formulation retains the nanboxing of rs2 in normal 'F'. */
643
+ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31);
644
+
645
+ tcg_temp_free_i64(rs1);
646
+ tcg_temp_free_i64(rs2);
647
+ } else {
648
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 31);
649
+ tcg_gen_ext32s_i64(dest, dest);
650
+ }
651
}
652
+ gen_set_fpr_hs(ctx, a->rd, dest);
653
mark_fs_dirty(ctx);
654
return true;
655
}
656
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
657
TCGv_i64 rs1, rs2, mask;
658
659
REQUIRE_FPU;
660
- REQUIRE_EXT(ctx, RVF);
661
+ REQUIRE_ZFINX_OR_F(ctx);
662
663
- rs1 = tcg_temp_new_i64();
664
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
665
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
666
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
667
668
+ rs1 = tcg_temp_new_i64();
669
+ if (!ctx->cfg_ptr->ext_zfinx) {
670
+ gen_check_nanbox_s(rs1, src1);
671
+ } else {
672
+ tcg_gen_mov_i64(rs1, src1);
673
+ }
674
if (a->rs1 == a->rs2) { /* FNEG */
675
- tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1));
676
+ tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(31, 1));
677
} else {
678
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
679
rs2 = tcg_temp_new_i64();
680
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
681
+ if (!ctx->cfg_ptr->ext_zfinx) {
682
+ gen_check_nanbox_s(rs2, src2);
683
+ } else {
684
+ tcg_gen_mov_i64(rs2, src2);
685
+ }
686
687
/*
688
* Replace bit 31 in rs1 with inverse in rs2.
689
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a)
690
*/
691
mask = tcg_constant_i64(~MAKE_64BIT_MASK(31, 1));
692
tcg_gen_nor_i64(rs2, rs2, mask);
693
- tcg_gen_and_i64(rs1, mask, rs1);
694
- tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
695
+ tcg_gen_and_i64(dest, mask, rs1);
696
+ tcg_gen_or_i64(dest, dest, rs2);
697
698
tcg_temp_free_i64(rs2);
699
}
700
+ /* signed-extended intead of nanboxing for result if enable zfinx */
701
+ if (ctx->cfg_ptr->ext_zfinx) {
702
+ tcg_gen_ext32s_i64(dest, dest);
703
+ }
704
+ gen_set_fpr_hs(ctx, a->rd, dest);
705
tcg_temp_free_i64(rs1);
706
-
707
mark_fs_dirty(ctx);
708
return true;
709
}
710
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
711
TCGv_i64 rs1, rs2;
712
713
REQUIRE_FPU;
714
- REQUIRE_EXT(ctx, RVF);
715
+ REQUIRE_ZFINX_OR_F(ctx);
716
717
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
718
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
719
rs1 = tcg_temp_new_i64();
720
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
721
+
722
+ if (!ctx->cfg_ptr->ext_zfinx) {
723
+ gen_check_nanbox_s(rs1, src1);
724
+ } else {
725
+ tcg_gen_mov_i64(rs1, src1);
726
+ }
727
728
if (a->rs1 == a->rs2) { /* FABS */
729
- tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1));
730
+ tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(31, 1));
731
} else {
732
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
733
rs2 = tcg_temp_new_i64();
734
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
735
+
736
+ if (!ctx->cfg_ptr->ext_zfinx) {
737
+ gen_check_nanbox_s(rs2, src2);
738
+ } else {
739
+ tcg_gen_mov_i64(rs2, src2);
740
+ }
741
742
/*
743
* Xor bit 31 in rs1 with that in rs2.
744
* This formulation retains the nanboxing of rs1.
745
*/
746
- tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1));
747
- tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
748
+ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1));
749
+ tcg_gen_xor_i64(dest, rs1, dest);
750
751
tcg_temp_free_i64(rs2);
752
}
753
+ /* signed-extended intead of nanboxing for result if enable zfinx */
754
+ if (ctx->cfg_ptr->ext_zfinx) {
755
+ tcg_gen_ext32s_i64(dest, dest);
756
+ }
757
tcg_temp_free_i64(rs1);
758
-
759
+ gen_set_fpr_hs(ctx, a->rd, dest);
760
mark_fs_dirty(ctx);
761
return true;
762
}
763
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a)
764
static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
765
{
766
REQUIRE_FPU;
767
- REQUIRE_EXT(ctx, RVF);
768
+ REQUIRE_ZFINX_OR_F(ctx);
769
+
770
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
771
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
772
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
773
774
- gen_helper_fmin_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
775
- cpu_fpr[a->rs2]);
776
+ gen_helper_fmin_s(dest, cpu_env, src1, src2);
777
+ gen_set_fpr_hs(ctx, a->rd, dest);
778
mark_fs_dirty(ctx);
779
return true;
780
}
781
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
782
static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
783
{
784
REQUIRE_FPU;
785
- REQUIRE_EXT(ctx, RVF);
786
+ REQUIRE_ZFINX_OR_F(ctx);
787
+
788
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
789
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
790
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
791
792
- gen_helper_fmax_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
793
- cpu_fpr[a->rs2]);
794
+ gen_helper_fmax_s(dest, cpu_env, src1, src2);
795
+ gen_set_fpr_hs(ctx, a->rd, dest);
796
mark_fs_dirty(ctx);
797
return true;
798
}
799
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
800
static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
801
{
802
REQUIRE_FPU;
803
- REQUIRE_EXT(ctx, RVF);
804
+ REQUIRE_ZFINX_OR_F(ctx);
805
806
TCGv dest = dest_gpr(ctx, a->rd);
807
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
808
809
gen_set_rm(ctx, a->rm);
810
- gen_helper_fcvt_w_s(dest, cpu_env, cpu_fpr[a->rs1]);
811
+ gen_helper_fcvt_w_s(dest, cpu_env, src1);
812
gen_set_gpr(ctx, a->rd, dest);
813
return true;
814
}
815
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_s(DisasContext *ctx, arg_fcvt_w_s *a)
816
static bool trans_fcvt_wu_s(DisasContext *ctx, arg_fcvt_wu_s *a)
817
{
818
REQUIRE_FPU;
819
- REQUIRE_EXT(ctx, RVF);
820
+ REQUIRE_ZFINX_OR_F(ctx);
821
822
TCGv dest = dest_gpr(ctx, a->rd);
823
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
824
825
gen_set_rm(ctx, a->rm);
826
- gen_helper_fcvt_wu_s(dest, cpu_env, cpu_fpr[a->rs1]);
827
+ gen_helper_fcvt_wu_s(dest, cpu_env, src1);
828
gen_set_gpr(ctx, a->rd, dest);
829
return true;
830
}
831
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
832
{
833
/* NOTE: This was FMV.X.S in an earlier version of the ISA spec! */
834
REQUIRE_FPU;
835
- REQUIRE_EXT(ctx, RVF);
836
+ REQUIRE_ZFINX_OR_F(ctx);
837
838
TCGv dest = dest_gpr(ctx, a->rd);
839
-
840
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
841
#if defined(TARGET_RISCV64)
842
- tcg_gen_ext32s_tl(dest, cpu_fpr[a->rs1]);
843
+ tcg_gen_ext32s_tl(dest, src1);
844
#else
845
- tcg_gen_extrl_i64_i32(dest, cpu_fpr[a->rs1]);
846
+ tcg_gen_extrl_i64_i32(dest, src1);
847
#endif
848
849
gen_set_gpr(ctx, a->rd, dest);
850
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_x_w(DisasContext *ctx, arg_fmv_x_w *a)
851
static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
852
{
853
REQUIRE_FPU;
854
- REQUIRE_EXT(ctx, RVF);
855
+ REQUIRE_ZFINX_OR_F(ctx);
856
857
TCGv dest = dest_gpr(ctx, a->rd);
858
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
859
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
860
861
- gen_helper_feq_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
862
+ gen_helper_feq_s(dest, cpu_env, src1, src2);
863
gen_set_gpr(ctx, a->rd, dest);
864
return true;
865
}
866
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_s(DisasContext *ctx, arg_feq_s *a)
867
static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
868
{
869
REQUIRE_FPU;
870
- REQUIRE_EXT(ctx, RVF);
871
+ REQUIRE_ZFINX_OR_F(ctx);
872
873
TCGv dest = dest_gpr(ctx, a->rd);
874
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
875
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
876
877
- gen_helper_flt_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
878
+ gen_helper_flt_s(dest, cpu_env, src1, src2);
879
gen_set_gpr(ctx, a->rd, dest);
880
return true;
881
}
882
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_s(DisasContext *ctx, arg_flt_s *a)
883
static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
884
{
885
REQUIRE_FPU;
886
- REQUIRE_EXT(ctx, RVF);
887
+ REQUIRE_ZFINX_OR_F(ctx);
888
889
TCGv dest = dest_gpr(ctx, a->rd);
890
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
891
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
892
893
- gen_helper_fle_s(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
894
+ gen_helper_fle_s(dest, cpu_env, src1, src2);
895
gen_set_gpr(ctx, a->rd, dest);
896
return true;
897
}
898
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_s(DisasContext *ctx, arg_fle_s *a)
899
static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
900
{
901
REQUIRE_FPU;
902
- REQUIRE_EXT(ctx, RVF);
903
+ REQUIRE_ZFINX_OR_F(ctx);
904
905
TCGv dest = dest_gpr(ctx, a->rd);
906
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
907
908
- gen_helper_fclass_s(dest, cpu_fpr[a->rs1]);
909
+ gen_helper_fclass_s(dest, cpu_env, src1);
910
gen_set_gpr(ctx, a->rd, dest);
911
return true;
912
}
913
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
914
static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
915
{
916
REQUIRE_FPU;
917
- REQUIRE_EXT(ctx, RVF);
918
+ REQUIRE_ZFINX_OR_F(ctx);
919
920
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
921
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
922
923
gen_set_rm(ctx, a->rm);
924
- gen_helper_fcvt_s_w(cpu_fpr[a->rd], cpu_env, src);
925
-
926
+ gen_helper_fcvt_s_w(dest, cpu_env, src);
927
+ gen_set_fpr_hs(ctx, a->rd, dest);
928
mark_fs_dirty(ctx);
929
return true;
930
}
931
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_w(DisasContext *ctx, arg_fcvt_s_w *a)
932
static bool trans_fcvt_s_wu(DisasContext *ctx, arg_fcvt_s_wu *a)
933
{
934
REQUIRE_FPU;
935
- REQUIRE_EXT(ctx, RVF);
936
+ REQUIRE_ZFINX_OR_F(ctx);
937
938
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
939
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
940
941
gen_set_rm(ctx, a->rm);
942
- gen_helper_fcvt_s_wu(cpu_fpr[a->rd], cpu_env, src);
943
-
944
+ gen_helper_fcvt_s_wu(dest, cpu_env, src);
945
+ gen_set_fpr_hs(ctx, a->rd, dest);
946
mark_fs_dirty(ctx);
947
return true;
948
}
949
@@ -XXX,XX +XXX,XX @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a)
950
{
951
/* NOTE: This was FMV.S.X in an earlier version of the ISA spec! */
952
REQUIRE_FPU;
953
- REQUIRE_EXT(ctx, RVF);
954
+ REQUIRE_ZFINX_OR_F(ctx);
955
956
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
957
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
958
959
- tcg_gen_extu_tl_i64(cpu_fpr[a->rd], src);
960
- gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]);
961
-
962
+ tcg_gen_extu_tl_i64(dest, src);
963
+ gen_nanbox_s(dest, dest);
964
+ gen_set_fpr_hs(ctx, a->rd, dest);
965
mark_fs_dirty(ctx);
966
return true;
967
}
968
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_s(DisasContext *ctx, arg_fcvt_l_s *a)
969
{
970
REQUIRE_64BIT(ctx);
971
REQUIRE_FPU;
972
- REQUIRE_EXT(ctx, RVF);
973
+ REQUIRE_ZFINX_OR_F(ctx);
974
975
TCGv dest = dest_gpr(ctx, a->rd);
976
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
977
978
gen_set_rm(ctx, a->rm);
979
- gen_helper_fcvt_l_s(dest, cpu_env, cpu_fpr[a->rs1]);
980
+ gen_helper_fcvt_l_s(dest, cpu_env, src1);
981
gen_set_gpr(ctx, a->rd, dest);
982
return true;
983
}
984
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_s(DisasContext *ctx, arg_fcvt_lu_s *a)
985
{
986
REQUIRE_64BIT(ctx);
987
REQUIRE_FPU;
988
- REQUIRE_EXT(ctx, RVF);
989
+ REQUIRE_ZFINX_OR_F(ctx);
990
991
TCGv dest = dest_gpr(ctx, a->rd);
992
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
993
994
gen_set_rm(ctx, a->rm);
995
- gen_helper_fcvt_lu_s(dest, cpu_env, cpu_fpr[a->rs1]);
996
+ gen_helper_fcvt_lu_s(dest, cpu_env, src1);
997
gen_set_gpr(ctx, a->rd, dest);
998
return true;
999
}
1000
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_l(DisasContext *ctx, arg_fcvt_s_l *a)
1001
{
1002
REQUIRE_64BIT(ctx);
1003
REQUIRE_FPU;
1004
- REQUIRE_EXT(ctx, RVF);
1005
+ REQUIRE_ZFINX_OR_F(ctx);
1006
1007
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
1008
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
1009
1010
gen_set_rm(ctx, a->rm);
1011
- gen_helper_fcvt_s_l(cpu_fpr[a->rd], cpu_env, src);
1012
-
1013
+ gen_helper_fcvt_s_l(dest, cpu_env, src);
1014
+ gen_set_fpr_hs(ctx, a->rd, dest);
1015
mark_fs_dirty(ctx);
1016
return true;
1017
}
1018
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_lu(DisasContext *ctx, arg_fcvt_s_lu *a)
1019
{
1020
REQUIRE_64BIT(ctx);
1021
REQUIRE_FPU;
1022
- REQUIRE_EXT(ctx, RVF);
1023
+ REQUIRE_ZFINX_OR_F(ctx);
1024
1025
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
1026
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
1027
1028
gen_set_rm(ctx, a->rm);
1029
- gen_helper_fcvt_s_lu(cpu_fpr[a->rd], cpu_env, src);
1030
-
1031
+ gen_helper_fcvt_s_lu(dest, cpu_env, src);
1032
+ gen_set_fpr_hs(ctx, a->rd, dest);
1033
mark_fs_dirty(ctx);
1034
return true;
1035
}
1036
--
638
--
1037
2.35.1
639
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
2
2
3
- update extension check REQUIRE_ZHINX_OR_ZFH and REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN
3
CTR extension adds a new instruction sctrclr to quickly
4
- update half float point register read/write
4
clear the recorded entries buffer.
5
- disable nanbox_h check
6
5
7
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
6
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
8
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-Id: <20220211043920.28981-6-liweiwei@iscas.ac.cn>
8
Message-ID: <20250205-b4-ctr_upstream_v6-v6-5-439d8e06c8ef@rivosinc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
10
---
14
target/riscv/helper.h | 2 +-
11
target/riscv/cpu.h | 1 +
15
target/riscv/internals.h | 16 +-
12
target/riscv/helper.h | 1 +
16
target/riscv/fpu_helper.c | 89 +++---
13
target/riscv/insn32.decode | 1 +
17
target/riscv/insn_trans/trans_rvzfh.c.inc | 332 +++++++++++++++-------
14
target/riscv/cpu_helper.c | 7 +++++
18
4 files changed, 296 insertions(+), 143 deletions(-)
15
target/riscv/op_helper.c | 29 +++++++++++++++++++
16
.../riscv/insn_trans/trans_privileged.c.inc | 11 +++++++
17
6 files changed, 50 insertions(+)
19
18
19
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.h
22
+++ b/target/riscv/cpu.h
23
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
24
25
void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
26
enum CTRType type, target_ulong prev_priv, bool prev_virt);
27
+void riscv_ctr_clear(CPURISCVState *env);
28
29
void riscv_translate_init(void);
30
void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
20
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
31
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
21
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/helper.h
33
--- a/target/riscv/helper.h
23
+++ b/target/riscv/helper.h
34
+++ b/target/riscv/helper.h
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_2(fcvt_h_w, TCG_CALL_NO_RWG, i64, env, tl)
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
25
DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
36
DEF_HELPER_1(sret, tl, env)
26
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
37
DEF_HELPER_1(mret, tl, env)
27
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
38
DEF_HELPER_1(mnret, tl, env)
28
-DEF_HELPER_FLAGS_1(fclass_h, TCG_CALL_NO_RWG_SE, tl, i64)
39
+DEF_HELPER_1(ctr_clear, void, env)
29
+DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
40
DEF_HELPER_1(wfi, void, env)
30
41
DEF_HELPER_1(wrs_nto, void, env)
31
/* Special functions */
42
DEF_HELPER_1(tlb_flush, void, env)
32
DEF_HELPER_2(csrr, tl, env, int)
43
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
33
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
34
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/internals.h
45
--- a/target/riscv/insn32.decode
36
+++ b/target/riscv/internals.h
46
+++ b/target/riscv/insn32.decode
37
@@ -XXX,XX +XXX,XX @@ static inline float32 check_nanbox_s(CPURISCVState *env, uint64_t f)
47
@@ -XXX,XX +XXX,XX @@
48
# *** Privileged Instructions ***
49
ecall 000000000000 00000 000 00000 1110011
50
ebreak 000000000001 00000 000 00000 1110011
51
+sctrclr 000100000100 00000 000 00000 1110011
52
uret 0000000 00010 00000 000 00000 1110011
53
sret 0001000 00010 00000 000 00000 1110011
54
mret 0011000 00010 00000 000 00000 1110011
55
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/cpu_helper.c
58
+++ b/target/riscv/cpu_helper.c
59
@@ -XXX,XX +XXX,XX @@ static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
38
}
60
}
39
}
61
}
40
62
41
-static inline uint64_t nanbox_h(float16 f)
63
+void riscv_ctr_clear(CPURISCVState *env)
42
+static inline uint64_t nanbox_h(CPURISCVState *env, float16 f)
64
+{
65
+ memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
66
+ memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
67
+ memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
68
+}
69
+
70
static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
43
{
71
{
44
- return f | MAKE_64BIT_MASK(16, 48);
72
switch (priv) {
45
+ /* the value is sign-extended instead of NaN-boxing for zfinx */
73
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
46
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
74
index XXXXXXX..XXXXXXX 100644
47
+ return (int16_t)f;
75
--- a/target/riscv/op_helper.c
48
+ } else {
76
+++ b/target/riscv/op_helper.c
49
+ return f | MAKE_64BIT_MASK(16, 48);
77
@@ -XXX,XX +XXX,XX @@ void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
50
+ }
78
env->priv, env->virt_enabled);
51
}
79
}
52
80
53
-static inline float16 check_nanbox_h(uint64_t f)
81
+void helper_ctr_clear(CPURISCVState *env)
54
+static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
82
+{
55
{
83
+ /*
56
+ /* Disable nanbox check when enable zfinx */
84
+ * It's safe to call smstateen_acc_ok() for umode access regardless of the
57
+ if (RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
85
+ * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
58
+ return (uint16_t)f;
86
+ * is zero, smstateen_acc_ok() will return the correct exception code and
87
+ * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
88
+ * scenario the U-mode check below will handle that case.
89
+ */
90
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
91
+ if (ret != RISCV_EXCP_NONE) {
92
+ riscv_raise_exception(env, ret, GETPC());
59
+ }
93
+ }
60
+
94
+
61
uint64_t mask = MAKE_64BIT_MASK(16, 48);
95
+ if (env->priv == PRV_U) {
62
96
+ /*
63
if (likely((f & mask) == mask)) {
97
+ * One corner case is when sctrclr is executed from VU-mode and
64
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
98
+ * mstateen.CTR = 0, in which case we are supposed to raise
99
+ * RISCV_EXCP_ILLEGAL_INST. This case is already handled in
100
+ * smstateen_acc_ok().
101
+ */
102
+ uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
103
+ RISCV_EXCP_ILLEGAL_INST;
104
+ riscv_raise_exception(env, excep, GETPC());
105
+ }
106
+
107
+ riscv_ctr_clear(env);
108
+}
109
+
110
void helper_wfi(CPURISCVState *env)
111
{
112
CPUState *cs = env_cpu(env);
113
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
65
index XXXXXXX..XXXXXXX 100644
114
index XXXXXXX..XXXXXXX 100644
66
--- a/target/riscv/fpu_helper.c
115
--- a/target/riscv/insn_trans/trans_privileged.c.inc
67
+++ b/target/riscv/fpu_helper.c
116
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
68
@@ -XXX,XX +XXX,XX @@ void helper_set_rod_rounding_mode(CPURISCVState *env)
117
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
69
static uint64_t do_fmadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
70
uint64_t rs3, int flags)
71
{
72
- float16 frs1 = check_nanbox_h(rs1);
73
- float16 frs2 = check_nanbox_h(rs2);
74
- float16 frs3 = check_nanbox_h(rs3);
75
- return nanbox_h(float16_muladd(frs1, frs2, frs3, flags, &env->fp_status));
76
+ float16 frs1 = check_nanbox_h(env, rs1);
77
+ float16 frs2 = check_nanbox_h(env, rs2);
78
+ float16 frs3 = check_nanbox_h(env, rs3);
79
+ return nanbox_h(env, float16_muladd(frs1, frs2, frs3, flags,
80
+ &env->fp_status));
81
}
82
83
static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2,
84
@@ -XXX,XX +XXX,XX @@ target_ulong helper_fclass_d(uint64_t frs1)
85
86
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
87
{
88
- float16 frs1 = check_nanbox_h(rs1);
89
- float16 frs2 = check_nanbox_h(rs2);
90
- return nanbox_h(float16_add(frs1, frs2, &env->fp_status));
91
+ float16 frs1 = check_nanbox_h(env, rs1);
92
+ float16 frs2 = check_nanbox_h(env, rs2);
93
+ return nanbox_h(env, float16_add(frs1, frs2, &env->fp_status));
94
}
95
96
uint64_t helper_fsub_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
97
{
98
- float16 frs1 = check_nanbox_h(rs1);
99
- float16 frs2 = check_nanbox_h(rs2);
100
- return nanbox_h(float16_sub(frs1, frs2, &env->fp_status));
101
+ float16 frs1 = check_nanbox_h(env, rs1);
102
+ float16 frs2 = check_nanbox_h(env, rs2);
103
+ return nanbox_h(env, float16_sub(frs1, frs2, &env->fp_status));
104
}
105
106
uint64_t helper_fmul_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
107
{
108
- float16 frs1 = check_nanbox_h(rs1);
109
- float16 frs2 = check_nanbox_h(rs2);
110
- return nanbox_h(float16_mul(frs1, frs2, &env->fp_status));
111
+ float16 frs1 = check_nanbox_h(env, rs1);
112
+ float16 frs2 = check_nanbox_h(env, rs2);
113
+ return nanbox_h(env, float16_mul(frs1, frs2, &env->fp_status));
114
}
115
116
uint64_t helper_fdiv_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
117
{
118
- float16 frs1 = check_nanbox_h(rs1);
119
- float16 frs2 = check_nanbox_h(rs2);
120
- return nanbox_h(float16_div(frs1, frs2, &env->fp_status));
121
+ float16 frs1 = check_nanbox_h(env, rs1);
122
+ float16 frs2 = check_nanbox_h(env, rs2);
123
+ return nanbox_h(env, float16_div(frs1, frs2, &env->fp_status));
124
}
125
126
uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
127
{
128
- float16 frs1 = check_nanbox_h(rs1);
129
- float16 frs2 = check_nanbox_h(rs2);
130
- return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
131
+ float16 frs1 = check_nanbox_h(env, rs1);
132
+ float16 frs2 = check_nanbox_h(env, rs2);
133
+ return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
134
float16_minnum(frs1, frs2, &env->fp_status) :
135
float16_minimum_number(frs1, frs2, &env->fp_status));
136
}
137
138
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
139
{
140
- float16 frs1 = check_nanbox_h(rs1);
141
- float16 frs2 = check_nanbox_h(rs2);
142
- return nanbox_h(env->priv_ver < PRIV_VERSION_1_11_0 ?
143
+ float16 frs1 = check_nanbox_h(env, rs1);
144
+ float16 frs2 = check_nanbox_h(env, rs2);
145
+ return nanbox_h(env, env->priv_ver < PRIV_VERSION_1_11_0 ?
146
float16_maxnum(frs1, frs2, &env->fp_status) :
147
float16_maximum_number(frs1, frs2, &env->fp_status));
148
}
149
150
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
151
{
152
- float16 frs1 = check_nanbox_h(rs1);
153
- return nanbox_h(float16_sqrt(frs1, &env->fp_status));
154
+ float16 frs1 = check_nanbox_h(env, rs1);
155
+ return nanbox_h(env, float16_sqrt(frs1, &env->fp_status));
156
}
157
158
target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
159
{
160
- float16 frs1 = check_nanbox_h(rs1);
161
- float16 frs2 = check_nanbox_h(rs2);
162
+ float16 frs1 = check_nanbox_h(env, rs1);
163
+ float16 frs2 = check_nanbox_h(env, rs2);
164
return float16_le(frs1, frs2, &env->fp_status);
165
}
166
167
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
168
{
169
- float16 frs1 = check_nanbox_h(rs1);
170
- float16 frs2 = check_nanbox_h(rs2);
171
+ float16 frs1 = check_nanbox_h(env, rs1);
172
+ float16 frs2 = check_nanbox_h(env, rs2);
173
return float16_lt(frs1, frs2, &env->fp_status);
174
}
175
176
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
177
{
178
- float16 frs1 = check_nanbox_h(rs1);
179
- float16 frs2 = check_nanbox_h(rs2);
180
+ float16 frs1 = check_nanbox_h(env, rs1);
181
+ float16 frs2 = check_nanbox_h(env, rs2);
182
return float16_eq_quiet(frs1, frs2, &env->fp_status);
183
}
184
185
-target_ulong helper_fclass_h(uint64_t rs1)
186
+target_ulong helper_fclass_h(CPURISCVState *env, uint64_t rs1)
187
{
188
- float16 frs1 = check_nanbox_h(rs1);
189
+ float16 frs1 = check_nanbox_h(env, rs1);
190
return fclass_h(frs1);
191
}
192
193
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
194
{
195
- float16 frs1 = check_nanbox_h(rs1);
196
+ float16 frs1 = check_nanbox_h(env, rs1);
197
return float16_to_int32(frs1, &env->fp_status);
198
}
199
200
target_ulong helper_fcvt_wu_h(CPURISCVState *env, uint64_t rs1)
201
{
202
- float16 frs1 = check_nanbox_h(rs1);
203
+ float16 frs1 = check_nanbox_h(env, rs1);
204
return (int32_t)float16_to_uint32(frs1, &env->fp_status);
205
}
206
207
target_ulong helper_fcvt_l_h(CPURISCVState *env, uint64_t rs1)
208
{
209
- float16 frs1 = check_nanbox_h(rs1);
210
+ float16 frs1 = check_nanbox_h(env, rs1);
211
return float16_to_int64(frs1, &env->fp_status);
212
}
213
214
target_ulong helper_fcvt_lu_h(CPURISCVState *env, uint64_t rs1)
215
{
216
- float16 frs1 = check_nanbox_h(rs1);
217
+ float16 frs1 = check_nanbox_h(env, rs1);
218
return float16_to_uint64(frs1, &env->fp_status);
219
}
220
221
uint64_t helper_fcvt_h_w(CPURISCVState *env, target_ulong rs1)
222
{
223
- return nanbox_h(int32_to_float16((int32_t)rs1, &env->fp_status));
224
+ return nanbox_h(env, int32_to_float16((int32_t)rs1, &env->fp_status));
225
}
226
227
uint64_t helper_fcvt_h_wu(CPURISCVState *env, target_ulong rs1)
228
{
229
- return nanbox_h(uint32_to_float16((uint32_t)rs1, &env->fp_status));
230
+ return nanbox_h(env, uint32_to_float16((uint32_t)rs1, &env->fp_status));
231
}
232
233
uint64_t helper_fcvt_h_l(CPURISCVState *env, target_ulong rs1)
234
{
235
- return nanbox_h(int64_to_float16(rs1, &env->fp_status));
236
+ return nanbox_h(env, int64_to_float16(rs1, &env->fp_status));
237
}
238
239
uint64_t helper_fcvt_h_lu(CPURISCVState *env, target_ulong rs1)
240
{
241
- return nanbox_h(uint64_to_float16(rs1, &env->fp_status));
242
+ return nanbox_h(env, uint64_to_float16(rs1, &env->fp_status));
243
}
244
245
uint64_t helper_fcvt_h_s(CPURISCVState *env, uint64_t rs1)
246
{
247
float32 frs1 = check_nanbox_s(env, rs1);
248
- return nanbox_h(float32_to_float16(frs1, true, &env->fp_status));
249
+ return nanbox_h(env, float32_to_float16(frs1, true, &env->fp_status));
250
}
251
252
uint64_t helper_fcvt_s_h(CPURISCVState *env, uint64_t rs1)
253
{
254
- float16 frs1 = check_nanbox_h(rs1);
255
+ float16 frs1 = check_nanbox_h(env, rs1);
256
return nanbox_s(env, float16_to_float32(frs1, true, &env->fp_status));
257
}
258
259
uint64_t helper_fcvt_h_d(CPURISCVState *env, uint64_t rs1)
260
{
261
- return nanbox_h(float64_to_float16(rs1, true, &env->fp_status));
262
+ return nanbox_h(env, float64_to_float16(rs1, true, &env->fp_status));
263
}
264
265
uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
266
{
267
- float16 frs1 = check_nanbox_h(rs1);
268
+ float16 frs1 = check_nanbox_h(env, rs1);
269
return float16_to_float64(frs1, true, &env->fp_status);
270
}
271
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
272
index XXXXXXX..XXXXXXX 100644
273
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
274
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
275
@@ -XXX,XX +XXX,XX @@
276
} \
277
} while (0)
278
279
+#define REQUIRE_ZHINX_OR_ZFH(ctx) do { \
280
+ if (!ctx->cfg_ptr->ext_zhinx && !ctx->cfg_ptr->ext_zfh) { \
281
+ return false; \
282
+ } \
283
+} while (0)
284
+
285
#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \
286
if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \
287
return false; \
288
} \
289
} while (0)
290
291
+#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \
292
+ if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \
293
+ ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \
294
+ return false; \
295
+ } \
296
+} while (0)
297
+
298
static bool trans_flh(DisasContext *ctx, arg_flh *a)
299
{
300
TCGv_i64 dest;
301
@@ -XXX,XX +XXX,XX @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
302
static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
303
{
304
REQUIRE_FPU;
305
- REQUIRE_ZFH(ctx);
306
+ REQUIRE_ZHINX_OR_ZFH(ctx);
307
+
308
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
309
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
310
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
311
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
312
313
gen_set_rm(ctx, a->rm);
314
- gen_helper_fmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
315
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
316
+ gen_helper_fmadd_h(dest, cpu_env, src1, src2, src3);
317
+ gen_set_fpr_hs(ctx, a->rd, dest);
318
mark_fs_dirty(ctx);
319
return true;
118
return true;
320
}
119
}
321
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_h(DisasContext *ctx, arg_fmadd_h *a)
120
322
static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
121
+static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a)
122
+{
123
+#ifndef CONFIG_USER_ONLY
124
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
125
+ gen_helper_ctr_clear(tcg_env);
126
+ return true;
127
+ }
128
+#endif
129
+ return false;
130
+}
131
+
132
static bool trans_uret(DisasContext *ctx, arg_uret *a)
323
{
133
{
324
REQUIRE_FPU;
134
return false;
325
- REQUIRE_ZFH(ctx);
326
+ REQUIRE_ZHINX_OR_ZFH(ctx);
327
+
328
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
329
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
330
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
331
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
332
333
gen_set_rm(ctx, a->rm);
334
- gen_helper_fmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
335
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
336
+ gen_helper_fmsub_h(dest, cpu_env, src1, src2, src3);
337
+ gen_set_fpr_hs(ctx, a->rd, dest);
338
mark_fs_dirty(ctx);
339
return true;
340
}
341
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_h(DisasContext *ctx, arg_fmsub_h *a)
342
static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
343
{
344
REQUIRE_FPU;
345
- REQUIRE_ZFH(ctx);
346
+ REQUIRE_ZHINX_OR_ZFH(ctx);
347
+
348
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
349
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
350
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
351
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
352
353
gen_set_rm(ctx, a->rm);
354
- gen_helper_fnmsub_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
355
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
356
+ gen_helper_fnmsub_h(dest, cpu_env, src1, src2, src3);
357
+ gen_set_fpr_hs(ctx, a->rd, dest);
358
mark_fs_dirty(ctx);
359
return true;
360
}
361
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_h(DisasContext *ctx, arg_fnmsub_h *a)
362
static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
363
{
364
REQUIRE_FPU;
365
- REQUIRE_ZFH(ctx);
366
+ REQUIRE_ZHINX_OR_ZFH(ctx);
367
+
368
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
369
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
370
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
371
+ TCGv_i64 src3 = get_fpr_hs(ctx, a->rs3);
372
373
gen_set_rm(ctx, a->rm);
374
- gen_helper_fnmadd_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
375
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
376
+ gen_helper_fnmadd_h(dest, cpu_env, src1, src2, src3);
377
+ gen_set_fpr_hs(ctx, a->rd, dest);
378
mark_fs_dirty(ctx);
379
return true;
380
}
381
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_h(DisasContext *ctx, arg_fnmadd_h *a)
382
static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
383
{
384
REQUIRE_FPU;
385
- REQUIRE_ZFH(ctx);
386
+ REQUIRE_ZHINX_OR_ZFH(ctx);
387
+
388
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
389
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
390
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
391
392
gen_set_rm(ctx, a->rm);
393
- gen_helper_fadd_h(cpu_fpr[a->rd], cpu_env,
394
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
395
+ gen_helper_fadd_h(dest, cpu_env, src1, src2);
396
+ gen_set_fpr_hs(ctx, a->rd, dest);
397
mark_fs_dirty(ctx);
398
return true;
399
}
400
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_h(DisasContext *ctx, arg_fadd_h *a)
401
static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
402
{
403
REQUIRE_FPU;
404
- REQUIRE_ZFH(ctx);
405
+ REQUIRE_ZHINX_OR_ZFH(ctx);
406
+
407
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
408
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
409
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
410
411
gen_set_rm(ctx, a->rm);
412
- gen_helper_fsub_h(cpu_fpr[a->rd], cpu_env,
413
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
414
+ gen_helper_fsub_h(dest, cpu_env, src1, src2);
415
+ gen_set_fpr_hs(ctx, a->rd, dest);
416
mark_fs_dirty(ctx);
417
return true;
418
}
419
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_h(DisasContext *ctx, arg_fsub_h *a)
420
static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
421
{
422
REQUIRE_FPU;
423
- REQUIRE_ZFH(ctx);
424
+ REQUIRE_ZHINX_OR_ZFH(ctx);
425
+
426
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
427
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
428
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
429
430
gen_set_rm(ctx, a->rm);
431
- gen_helper_fmul_h(cpu_fpr[a->rd], cpu_env,
432
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
433
+ gen_helper_fmul_h(dest, cpu_env, src1, src2);
434
+ gen_set_fpr_hs(ctx, a->rd, dest);
435
mark_fs_dirty(ctx);
436
return true;
437
}
438
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_h(DisasContext *ctx, arg_fmul_h *a)
439
static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
440
{
441
REQUIRE_FPU;
442
- REQUIRE_ZFH(ctx);
443
+ REQUIRE_ZHINX_OR_ZFH(ctx);
444
+
445
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
446
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
447
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
448
449
gen_set_rm(ctx, a->rm);
450
- gen_helper_fdiv_h(cpu_fpr[a->rd], cpu_env,
451
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
452
+ gen_helper_fdiv_h(dest, cpu_env, src1, src2);
453
+ gen_set_fpr_hs(ctx, a->rd, dest);
454
mark_fs_dirty(ctx);
455
return true;
456
}
457
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_h(DisasContext *ctx, arg_fdiv_h *a)
458
static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
459
{
460
REQUIRE_FPU;
461
- REQUIRE_ZFH(ctx);
462
+ REQUIRE_ZHINX_OR_ZFH(ctx);
463
+
464
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
465
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
466
467
gen_set_rm(ctx, a->rm);
468
- gen_helper_fsqrt_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
469
+ gen_helper_fsqrt_h(dest, cpu_env, src1);
470
+ gen_set_fpr_hs(ctx, a->rd, dest);
471
mark_fs_dirty(ctx);
472
return true;
473
}
474
@@ -XXX,XX +XXX,XX @@ static bool trans_fsqrt_h(DisasContext *ctx, arg_fsqrt_h *a)
475
static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a)
476
{
477
REQUIRE_FPU;
478
- REQUIRE_ZFH(ctx);
479
+ REQUIRE_ZHINX_OR_ZFH(ctx);
480
+
481
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
482
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
483
484
if (a->rs1 == a->rs2) { /* FMOV */
485
- gen_check_nanbox_h(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
486
+ if (!ctx->cfg_ptr->ext_zfinx) {
487
+ gen_check_nanbox_h(dest, src1);
488
+ } else {
489
+ tcg_gen_ext16s_i64(dest, src1);
490
+ }
491
} else {
492
- TCGv_i64 rs1 = tcg_temp_new_i64();
493
- TCGv_i64 rs2 = tcg_temp_new_i64();
494
-
495
- gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
496
- gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
497
-
498
- /* This formulation retains the nanboxing of rs2. */
499
- tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 15);
500
- tcg_temp_free_i64(rs1);
501
- tcg_temp_free_i64(rs2);
502
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
503
+
504
+ if (!ctx->cfg_ptr->ext_zfinx) {
505
+ TCGv_i64 rs1 = tcg_temp_new_i64();
506
+ TCGv_i64 rs2 = tcg_temp_new_i64();
507
+ gen_check_nanbox_h(rs1, src1);
508
+ gen_check_nanbox_h(rs2, src2);
509
+
510
+ /* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */
511
+ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15);
512
+
513
+ tcg_temp_free_i64(rs1);
514
+ tcg_temp_free_i64(rs2);
515
+ } else {
516
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 15);
517
+ tcg_gen_ext16s_i64(dest, dest);
518
+ }
519
}
520
-
521
+ gen_set_fpr_hs(ctx, a->rd, dest);
522
mark_fs_dirty(ctx);
523
return true;
524
}
525
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
526
TCGv_i64 rs1, rs2, mask;
527
528
REQUIRE_FPU;
529
- REQUIRE_ZFH(ctx);
530
+ REQUIRE_ZHINX_OR_ZFH(ctx);
531
+
532
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
533
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
534
535
rs1 = tcg_temp_new_i64();
536
- gen_check_nanbox_h(rs1, cpu_fpr[a->rs1]);
537
+ if (!ctx->cfg_ptr->ext_zfinx) {
538
+ gen_check_nanbox_h(rs1, src1);
539
+ } else {
540
+ tcg_gen_mov_i64(rs1, src1);
541
+ }
542
543
if (a->rs1 == a->rs2) { /* FNEG */
544
- tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(15, 1));
545
+ tcg_gen_xori_i64(dest, rs1, MAKE_64BIT_MASK(15, 1));
546
} else {
547
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
548
rs2 = tcg_temp_new_i64();
549
- gen_check_nanbox_h(rs2, cpu_fpr[a->rs2]);
550
+
551
+ if (!ctx->cfg_ptr->ext_zfinx) {
552
+ gen_check_nanbox_h(rs2, src2);
553
+ } else {
554
+ tcg_gen_mov_i64(rs2, src2);
555
+ }
556
557
/*
558
* Replace bit 15 in rs1 with inverse in rs2.
559
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a)
560
mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1));
561
tcg_gen_not_i64(rs2, rs2);
562
tcg_gen_andc_i64(rs2, rs2, mask);
563
- tcg_gen_and_i64(rs1, mask, rs1);
564
- tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2);
565
+ tcg_gen_and_i64(dest, mask, rs1);
566
+ tcg_gen_or_i64(dest, dest, rs2);
567
568
tcg_temp_free_i64(mask);
569
tcg_temp_free_i64(rs2);
570
}
571
+ /* signed-extended intead of nanboxing for result if enable zfinx */
572
+ if (ctx->cfg_ptr->ext_zfinx) {
573
+ tcg_gen_ext16s_i64(dest, dest);
574
+ }
575
+ tcg_temp_free_i64(rs1);
576
mark_fs_dirty(ctx);
577
return true;
578
}
579
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
580
TCGv_i64 rs1, rs2;
581
582
REQUIRE_FPU;
583
- REQUIRE_ZFH(ctx);
584
+ REQUIRE_ZHINX_OR_ZFH(ctx);
585
+
586
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
587
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
588
589
rs1 = tcg_temp_new_i64();
590
- gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]);
591
+ if (!ctx->cfg_ptr->ext_zfinx) {
592
+ gen_check_nanbox_h(rs1, src1);
593
+ } else {
594
+ tcg_gen_mov_i64(rs1, src1);
595
+ }
596
597
if (a->rs1 == a->rs2) { /* FABS */
598
- tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(15, 1));
599
+ tcg_gen_andi_i64(dest, rs1, ~MAKE_64BIT_MASK(15, 1));
600
} else {
601
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
602
rs2 = tcg_temp_new_i64();
603
- gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]);
604
+
605
+ if (!ctx->cfg_ptr->ext_zfinx) {
606
+ gen_check_nanbox_h(rs2, src2);
607
+ } else {
608
+ tcg_gen_mov_i64(rs2, src2);
609
+ }
610
611
/*
612
* Xor bit 15 in rs1 with that in rs2.
613
* This formulation retains the nanboxing of rs1.
614
*/
615
- tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(15, 1));
616
- tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2);
617
+ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1));
618
+ tcg_gen_xor_i64(dest, rs1, dest);
619
620
tcg_temp_free_i64(rs2);
621
}
622
-
623
+ /* signed-extended intead of nanboxing for result if enable zfinx */
624
+ if (ctx->cfg_ptr->ext_zfinx) {
625
+ tcg_gen_ext16s_i64(dest, dest);
626
+ }
627
+ tcg_temp_free_i64(rs1);
628
mark_fs_dirty(ctx);
629
return true;
630
}
631
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a)
632
static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
633
{
634
REQUIRE_FPU;
635
- REQUIRE_ZFH(ctx);
636
+ REQUIRE_ZHINX_OR_ZFH(ctx);
637
+
638
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
639
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
640
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
641
642
- gen_helper_fmin_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
643
- cpu_fpr[a->rs2]);
644
+ gen_helper_fmin_h(dest, cpu_env, src1, src2);
645
+ gen_set_fpr_hs(ctx, a->rd, dest);
646
mark_fs_dirty(ctx);
647
return true;
648
}
649
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_h(DisasContext *ctx, arg_fmin_h *a)
650
static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
651
{
652
REQUIRE_FPU;
653
- REQUIRE_ZFH(ctx);
654
+ REQUIRE_ZHINX_OR_ZFH(ctx);
655
656
- gen_helper_fmax_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
657
- cpu_fpr[a->rs2]);
658
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
659
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
660
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
661
+
662
+ gen_helper_fmax_h(dest, cpu_env, src1, src2);
663
+ gen_set_fpr_hs(ctx, a->rd, dest);
664
mark_fs_dirty(ctx);
665
return true;
666
}
667
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a)
668
static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
669
{
670
REQUIRE_FPU;
671
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
672
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
673
+
674
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
675
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
676
677
gen_set_rm(ctx, a->rm);
678
- gen_helper_fcvt_s_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
679
+ gen_helper_fcvt_s_h(dest, cpu_env, src1);
680
+ gen_set_fpr_hs(ctx, a->rd, dest);
681
682
mark_fs_dirty(ctx);
683
684
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a)
685
static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a)
686
{
687
REQUIRE_FPU;
688
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
689
- REQUIRE_EXT(ctx, RVD);
690
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
691
+ REQUIRE_ZDINX_OR_D(ctx);
692
+
693
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
694
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
695
696
gen_set_rm(ctx, a->rm);
697
- gen_helper_fcvt_d_h(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
698
+ gen_helper_fcvt_d_h(dest, cpu_env, src1);
699
+ gen_set_fpr_d(ctx, a->rd, dest);
700
701
mark_fs_dirty(ctx);
702
703
-
704
return true;
705
}
706
707
static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
708
{
709
REQUIRE_FPU;
710
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
711
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
712
713
- gen_set_rm(ctx, a->rm);
714
- gen_helper_fcvt_h_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
715
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
716
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
717
718
+ gen_set_rm(ctx, a->rm);
719
+ gen_helper_fcvt_h_s(dest, cpu_env, src1);
720
+ gen_set_fpr_hs(ctx, a->rd, dest);
721
mark_fs_dirty(ctx);
722
723
return true;
724
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a)
725
static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
726
{
727
REQUIRE_FPU;
728
- REQUIRE_ZFH_OR_ZFHMIN(ctx);
729
- REQUIRE_EXT(ctx, RVD);
730
+ REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx);
731
+ REQUIRE_ZDINX_OR_D(ctx);
732
733
- gen_set_rm(ctx, a->rm);
734
- gen_helper_fcvt_h_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
735
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
736
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
737
738
+ gen_set_rm(ctx, a->rm);
739
+ gen_helper_fcvt_h_d(dest, cpu_env, src1);
740
+ gen_set_fpr_hs(ctx, a->rd, dest);
741
mark_fs_dirty(ctx);
742
743
return true;
744
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a)
745
static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
746
{
747
REQUIRE_FPU;
748
- REQUIRE_ZFH(ctx);
749
+ REQUIRE_ZHINX_OR_ZFH(ctx);
750
751
TCGv dest = dest_gpr(ctx, a->rd);
752
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
753
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
754
755
- gen_helper_feq_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
756
+ gen_helper_feq_h(dest, cpu_env, src1, src2);
757
gen_set_gpr(ctx, a->rd, dest);
758
return true;
759
}
760
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_h(DisasContext *ctx, arg_feq_h *a)
761
static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
762
{
763
REQUIRE_FPU;
764
- REQUIRE_ZFH(ctx);
765
+ REQUIRE_ZHINX_OR_ZFH(ctx);
766
767
TCGv dest = dest_gpr(ctx, a->rd);
768
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
769
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
770
771
- gen_helper_flt_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
772
+ gen_helper_flt_h(dest, cpu_env, src1, src2);
773
gen_set_gpr(ctx, a->rd, dest);
774
775
return true;
776
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_h(DisasContext *ctx, arg_flt_h *a)
777
static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
778
{
779
REQUIRE_FPU;
780
- REQUIRE_ZFH(ctx);
781
+ REQUIRE_ZHINX_OR_ZFH(ctx);
782
783
TCGv dest = dest_gpr(ctx, a->rd);
784
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
785
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
786
787
- gen_helper_fle_h(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
788
+ gen_helper_fle_h(dest, cpu_env, src1, src2);
789
gen_set_gpr(ctx, a->rd, dest);
790
return true;
791
}
792
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_h(DisasContext *ctx, arg_fle_h *a)
793
static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
794
{
795
REQUIRE_FPU;
796
- REQUIRE_ZFH(ctx);
797
+ REQUIRE_ZHINX_OR_ZFH(ctx);
798
799
TCGv dest = dest_gpr(ctx, a->rd);
800
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
801
802
- gen_helper_fclass_h(dest, cpu_fpr[a->rs1]);
803
+ gen_helper_fclass_h(dest, cpu_env, src1);
804
gen_set_gpr(ctx, a->rd, dest);
805
return true;
806
}
807
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_h(DisasContext *ctx, arg_fclass_h *a)
808
static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
809
{
810
REQUIRE_FPU;
811
- REQUIRE_ZFH(ctx);
812
+ REQUIRE_ZHINX_OR_ZFH(ctx);
813
814
TCGv dest = dest_gpr(ctx, a->rd);
815
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
816
817
gen_set_rm(ctx, a->rm);
818
- gen_helper_fcvt_w_h(dest, cpu_env, cpu_fpr[a->rs1]);
819
+ gen_helper_fcvt_w_h(dest, cpu_env, src1);
820
gen_set_gpr(ctx, a->rd, dest);
821
return true;
822
}
823
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_h(DisasContext *ctx, arg_fcvt_w_h *a)
824
static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
825
{
826
REQUIRE_FPU;
827
- REQUIRE_ZFH(ctx);
828
+ REQUIRE_ZHINX_OR_ZFH(ctx);
829
830
TCGv dest = dest_gpr(ctx, a->rd);
831
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
832
833
gen_set_rm(ctx, a->rm);
834
- gen_helper_fcvt_wu_h(dest, cpu_env, cpu_fpr[a->rs1]);
835
+ gen_helper_fcvt_wu_h(dest, cpu_env, src1);
836
gen_set_gpr(ctx, a->rd, dest);
837
return true;
838
}
839
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_h(DisasContext *ctx, arg_fcvt_wu_h *a)
840
static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
841
{
842
REQUIRE_FPU;
843
- REQUIRE_ZFH(ctx);
844
+ REQUIRE_ZHINX_OR_ZFH(ctx);
845
846
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
847
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
848
849
gen_set_rm(ctx, a->rm);
850
- gen_helper_fcvt_h_w(cpu_fpr[a->rd], cpu_env, t0);
851
+ gen_helper_fcvt_h_w(dest, cpu_env, t0);
852
+ gen_set_fpr_hs(ctx, a->rd, dest);
853
854
mark_fs_dirty(ctx);
855
return true;
856
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_w(DisasContext *ctx, arg_fcvt_h_w *a)
857
static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
858
{
859
REQUIRE_FPU;
860
- REQUIRE_ZFH(ctx);
861
+ REQUIRE_ZHINX_OR_ZFH(ctx);
862
863
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
864
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
865
866
gen_set_rm(ctx, a->rm);
867
- gen_helper_fcvt_h_wu(cpu_fpr[a->rd], cpu_env, t0);
868
+ gen_helper_fcvt_h_wu(dest, cpu_env, t0);
869
+ gen_set_fpr_hs(ctx, a->rd, dest);
870
871
mark_fs_dirty(ctx);
872
return true;
873
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_h(DisasContext *ctx, arg_fcvt_l_h *a)
874
{
875
REQUIRE_64BIT(ctx);
876
REQUIRE_FPU;
877
- REQUIRE_ZFH(ctx);
878
+ REQUIRE_ZHINX_OR_ZFH(ctx);
879
880
TCGv dest = dest_gpr(ctx, a->rd);
881
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
882
883
gen_set_rm(ctx, a->rm);
884
- gen_helper_fcvt_l_h(dest, cpu_env, cpu_fpr[a->rs1]);
885
+ gen_helper_fcvt_l_h(dest, cpu_env, src1);
886
gen_set_gpr(ctx, a->rd, dest);
887
return true;
888
}
889
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_h(DisasContext *ctx, arg_fcvt_lu_h *a)
890
{
891
REQUIRE_64BIT(ctx);
892
REQUIRE_FPU;
893
- REQUIRE_ZFH(ctx);
894
+ REQUIRE_ZHINX_OR_ZFH(ctx);
895
896
TCGv dest = dest_gpr(ctx, a->rd);
897
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
898
899
gen_set_rm(ctx, a->rm);
900
- gen_helper_fcvt_lu_h(dest, cpu_env, cpu_fpr[a->rs1]);
901
+ gen_helper_fcvt_lu_h(dest, cpu_env, src1);
902
gen_set_gpr(ctx, a->rd, dest);
903
return true;
904
}
905
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_l(DisasContext *ctx, arg_fcvt_h_l *a)
906
{
907
REQUIRE_64BIT(ctx);
908
REQUIRE_FPU;
909
- REQUIRE_ZFH(ctx);
910
+ REQUIRE_ZHINX_OR_ZFH(ctx);
911
912
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
913
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
914
915
gen_set_rm(ctx, a->rm);
916
- gen_helper_fcvt_h_l(cpu_fpr[a->rd], cpu_env, t0);
917
+ gen_helper_fcvt_h_l(dest, cpu_env, t0);
918
+ gen_set_fpr_hs(ctx, a->rd, dest);
919
920
mark_fs_dirty(ctx);
921
return true;
922
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_h_lu(DisasContext *ctx, arg_fcvt_h_lu *a)
923
{
924
REQUIRE_64BIT(ctx);
925
REQUIRE_FPU;
926
- REQUIRE_ZFH(ctx);
927
+ REQUIRE_ZHINX_OR_ZFH(ctx);
928
929
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
930
TCGv t0 = get_gpr(ctx, a->rs1, EXT_SIGN);
931
932
gen_set_rm(ctx, a->rm);
933
- gen_helper_fcvt_h_lu(cpu_fpr[a->rd], cpu_env, t0);
934
+ gen_helper_fcvt_h_lu(dest, cpu_env, t0);
935
+ gen_set_fpr_hs(ctx, a->rd, dest);
936
937
mark_fs_dirty(ctx);
938
return true;
939
--
135
--
940
2.35.1
136
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
2
3
Add a subsection to machine.c to migrate CTR CSR state
4
5
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250205-b4-ctr_upstream_v6-v6-6-439d8e06c8ef@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/machine.c | 25 +++++++++++++++++++++++++
11
1 file changed, 25 insertions(+)
12
13
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/machine.c
16
+++ b/target/riscv/machine.c
17
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_envcfg = {
18
}
19
};
20
21
+static bool ctr_needed(void *opaque)
22
+{
23
+ RISCVCPU *cpu = opaque;
24
+
25
+ return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
26
+}
27
+
28
+static const VMStateDescription vmstate_ctr = {
29
+ .name = "cpu/ctr",
30
+ .version_id = 1,
31
+ .minimum_version_id = 1,
32
+ .needed = ctr_needed,
33
+ .fields = (const VMStateField[]) {
34
+ VMSTATE_UINT64(env.mctrctl, RISCVCPU),
35
+ VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
36
+ VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
37
+ VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
38
+ VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
39
+ VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
40
+ VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
41
+ VMSTATE_END_OF_LIST()
42
+ }
43
+};
44
+
45
static bool pmu_needed(void *opaque)
46
{
47
RISCVCPU *cpu = opaque;
48
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
49
&vmstate_jvt,
50
&vmstate_elp,
51
&vmstate_ssp,
52
+ &vmstate_ctr,
53
NULL
54
}
55
};
56
--
57
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: julia <midnight@trainwit.ch>
2
2
3
Co-authored-by: ardxwe <ardxwe@gmail.com>
3
For instance, QEMUs newer than b6ecc63c569bb88c0fcadf79fb92bf4b88aefea8
4
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
4
would silently treat this akin to an unmapped page (as required by the
5
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
5
RISC-V spec, admittedly). However, not all hardware platforms do (e.g.
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
CVA6) which leads to an apparent QEMU bug.
7
Message-Id: <20220211043920.28981-3-liweiwei@iscas.ac.cn>
7
8
Instead, log a guest error so that in future, incorrectly set up page
9
tables can be debugged without bisecting QEMU.
10
11
Signed-off-by: julia <midnight@trainwit.ch>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Message-ID: <20250203061852.2931556-1-midnight@trainwit.ch>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
15
---
10
target/riscv/cpu_helper.c | 6 +++++-
16
target/riscv/cpu_helper.c | 27 ++++++++++++++++++++++++++-
11
target/riscv/csr.c | 25 ++++++++++++++++++++-----
17
1 file changed, 26 insertions(+), 1 deletion(-)
12
target/riscv/translate.c | 4 ++++
13
3 files changed, 29 insertions(+), 6 deletions(-)
14
18
15
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
19
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_helper.c
21
--- a/target/riscv/cpu_helper.c
18
+++ b/target/riscv/cpu_helper.c
22
+++ b/target/riscv/cpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_vector_enabled(CPURISCVState *env)
23
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
20
24
ppn = pte >> PTE_PPN_SHIFT;
21
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
25
} else {
22
{
26
if (pte & PTE_RESERVED) {
23
- uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
27
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
24
+ uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
28
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
25
MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
29
+ __func__, pte_addr, pte);
26
MSTATUS64_UXL | MSTATUS_VS;
30
return TRANSLATE_FAIL;
31
}
32
33
if (!pbmte && (pte & PTE_PBMT)) {
34
+ /* Reserved without Svpbmt. */
35
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
36
+ "and Svpbmt extension is disabled: "
37
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
38
+ __func__, pte_addr, pte);
39
return TRANSLATE_FAIL;
40
}
41
42
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
43
+ /* Reserved without Svnapot extension */
44
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
45
+ "and Svnapot extension is disabled: "
46
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
47
+ __func__, pte_addr, pte);
48
return TRANSLATE_FAIL;
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
52
/* Invalid PTE */
53
return TRANSLATE_FAIL;
54
}
27
+
55
+
28
+ if (riscv_has_ext(env, RVF)) {
56
if (pte & (PTE_R | PTE_W | PTE_X)) {
29
+ mstatus_mask |= MSTATUS_FS;
57
goto leaf;
30
+ }
58
}
31
bool current_virt = riscv_cpu_virt_enabled(env);
59
32
60
- /* Inner PTE, continue walking */
33
g_assert(riscv_has_ext(env, RVH));
61
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
34
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
62
+ /* D, A, and U bits are reserved in non-leaf/inner PTEs */
35
index XXXXXXX..XXXXXXX 100644
63
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
36
--- a/target/riscv/csr.c
64
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
37
+++ b/target/riscv/csr.c
65
+ __func__, pte_addr, pte);
38
@@ -XXX,XX +XXX,XX @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
66
return TRANSLATE_FAIL;
39
static RISCVException fs(CPURISCVState *env, int csrno)
67
}
40
{
68
+ /* Inner PTE, continue walking */
41
#if !defined(CONFIG_USER_ONLY)
69
base = ppn << PGSHIFT;
42
- if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
43
+ if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
44
+ !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
45
return RISCV_EXCP_ILLEGAL_INST;
46
}
70
}
47
#endif
71
48
@@ -XXX,XX +XXX,XX @@ static RISCVException write_fflags(CPURISCVState *env, int csrno,
72
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
49
target_ulong val)
73
leaf:
50
{
74
if (ppn & ((1ULL << ptshift) - 1)) {
51
#if !defined(CONFIG_USER_ONLY)
75
/* Misaligned PPN */
52
- env->mstatus |= MSTATUS_FS;
76
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
53
+ if (riscv_has_ext(env, RVF)) {
77
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
54
+ env->mstatus |= MSTATUS_FS;
78
+ __func__, pte_addr, pte);
55
+ }
79
return TRANSLATE_FAIL;
56
#endif
57
riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
58
return RISCV_EXCP_NONE;
59
@@ -XXX,XX +XXX,XX @@ static RISCVException write_frm(CPURISCVState *env, int csrno,
60
target_ulong val)
61
{
62
#if !defined(CONFIG_USER_ONLY)
63
- env->mstatus |= MSTATUS_FS;
64
+ if (riscv_has_ext(env, RVF)) {
65
+ env->mstatus |= MSTATUS_FS;
66
+ }
67
#endif
68
env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
69
return RISCV_EXCP_NONE;
70
@@ -XXX,XX +XXX,XX @@ static RISCVException write_fcsr(CPURISCVState *env, int csrno,
71
target_ulong val)
72
{
73
#if !defined(CONFIG_USER_ONLY)
74
- env->mstatus |= MSTATUS_FS;
75
+ if (riscv_has_ext(env, RVF)) {
76
+ env->mstatus |= MSTATUS_FS;
77
+ }
78
#endif
79
env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
80
riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
81
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
82
tlb_flush(env_cpu(env));
83
}
80
}
84
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
81
if (!pbmte && (pte & PTE_PBMT)) {
85
- MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
82
/* Reserved without Svpbmt. */
86
+ MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
83
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
87
MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
84
+ "and Svpbmt extension is disabled: "
88
MSTATUS_TW | MSTATUS_VS;
85
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
89
86
+ __func__, pte_addr, pte);
90
+ if (riscv_has_ext(env, RVF)) {
87
return TRANSLATE_FAIL;
91
+ mask |= MSTATUS_FS;
92
+ }
93
+
94
if (xl != MXL_RV32 || env->debugger) {
95
/*
96
* RV32: MPV and GVA are not in mstatus. The current plan is to
97
@@ -XXX,XX +XXX,XX @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
98
return RISCV_EXCP_NONE;
99
}
88
}
100
89
101
+ if (!(val & RVF)) {
102
+ env->mstatus &= ~MSTATUS_FS;
103
+ }
104
+
105
/* flush translation cache */
106
tb_flush(env_cpu(env));
107
env->misa_ext = val;
108
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
109
index XXXXXXX..XXXXXXX 100644
110
--- a/target/riscv/translate.c
111
+++ b/target/riscv/translate.c
112
@@ -XXX,XX +XXX,XX @@ static void mark_fs_dirty(DisasContext *ctx)
113
{
114
TCGv tmp;
115
116
+ if (!has_ext(ctx, RVF)) {
117
+ return;
118
+ }
119
+
120
if (ctx->mstatus_fs != MSTATUS_FS) {
121
/* Remember the state change for the rest of the TB. */
122
ctx->mstatus_fs = MSTATUS_FS;
123
--
90
--
124
2.35.1
91
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rob Bradford <rbradford@rivosinc.com>
1
2
3
Some extra spaces made into into the RISC-V opcode data table.
4
5
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250206153410.236636-2-rbradford@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
disas/riscv.c | 12 ++++++------
11
1 file changed, 6 insertions(+), 6 deletions(-)
12
13
diff --git a/disas/riscv.c b/disas/riscv.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/disas/riscv.c
16
+++ b/disas/riscv.c
17
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data rvi_opcode_data[] = {
18
{ "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
19
{ "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
20
{ "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
21
- { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
22
+ { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
23
{ "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
24
{ "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
25
{ "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
26
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data rvi_opcode_data[] = {
27
{ "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
28
{ "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
29
{ "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
30
- { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
31
- { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
32
- { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
33
- { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
34
- { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
35
+ { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
36
+ { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
37
+ { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
38
+ { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
39
+ { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
40
{ "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
41
{ "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
42
{ "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
43
--
44
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rob Bradford <rbradford@rivosinc.com>
1
2
3
This reflects the latest frozen version of the RISC-V Debug
4
specification (1.0.0-rc4) which includes the Sdtrig extension.
5
6
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <20250206153410.236636-3-rbradford@rivosinc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
disas/riscv.c | 4 +++-
12
1 file changed, 3 insertions(+), 1 deletion(-)
13
14
diff --git a/disas/riscv.c b/disas/riscv.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/disas/riscv.c
17
+++ b/disas/riscv.c
18
@@ -XXX,XX +XXX,XX @@ static const char *csr_name(int csrno)
19
case 0x07a1: return "tdata1";
20
case 0x07a2: return "tdata2";
21
case 0x07a3: return "tdata3";
22
+ case 0x07a4: return "tinfo";
23
case 0x07b0: return "dcsr";
24
case 0x07b1: return "dpc";
25
- case 0x07b2: return "dscratch";
26
+ case 0x07b2: return "dscratch0";
27
+ case 0x07b3: return "dscratch1";
28
case 0x0b00: return "mcycle";
29
case 0x0b01: return "mtime";
30
case 0x0b02: return "minstret";
31
--
32
2.48.1
diff view generated by jsdifflib
New patch
1
From: Atish Patra <atishp@rivosinc.com>
1
2
3
As per the latest privilege specification v1.13[1], the sscofpmf
4
only reserves first 8 bits of hpmeventX. Update the corresponding
5
masks accordingly.
6
7
[1]https://github.com/riscv/riscv-isa-manual/issues/1578
8
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Signed-off-by: Atish Patra <atishp@rivosinc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250206-pmu_minor_fixes-v2-1-1bb0f4aeb8b4@rivosinc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/cpu_bits.h | 5 ++---
16
1 file changed, 2 insertions(+), 3 deletions(-)
17
18
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu_bits.h
21
+++ b/target/riscv/cpu_bits.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum CTRType {
23
MHPMEVENTH_BIT_VSINH | \
24
MHPMEVENTH_BIT_VUINH)
25
26
-#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
27
-#define MHPMEVENT_IDX_MASK 0xFFFFF
28
-#define MHPMEVENT_SSCOF_RESVD 16
29
+#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56)
30
+#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK)
31
32
/* RISC-V-specific interrupt pending bits. */
33
#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
34
--
35
2.48.1
diff view generated by jsdifflib
New patch
1
From: Atish Patra <atishp@rivosinc.com>
1
2
3
As per the ISA definition, the upper 8 bits in hpmevent are defined
4
by Sscofpmf for privilege mode filtering and overflow bits while the
5
lower 56 bits are desginated for platform specific hpmevent values.
6
For the reset case, mhpmevent value should have zero in lower 56 bits.
7
Software may set the OF bit to indicate disable interrupt.
8
9
Ensure that correct value is checked after masking while clearing the
10
event encodings.
11
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20250206-pmu_minor_fixes-v2-2-1bb0f4aeb8b4@rivosinc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
target/riscv/pmu.c | 2 +-
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
21
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/pmu.c
24
+++ b/target/riscv/pmu.c
25
@@ -XXX,XX +XXX,XX @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
26
* Expected mhpmevent value is zero for reset case. Remove the current
27
* mapping.
28
*/
29
- if (!value) {
30
+ if (!(value & MHPMEVENT_IDX_MASK)) {
31
g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
32
pmu_remove_event_map,
33
GUINT_TO_POINTER(ctr_idx));
34
--
35
2.48.1
diff view generated by jsdifflib
New patch
1
From: Clément Léger <cleger@rivosinc.com>
1
2
3
As raised by Richard Henderson, these warnings are displayed in user
4
only as well. Since they aren't really useful for the end-user, remove
5
them and add a "TODO" note in the leading comments.
6
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Message-ID: <20250213145640.117275-1-cleger@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/tcg/tcg-cpu.c | 8 +++-----
13
1 file changed, 3 insertions(+), 5 deletions(-)
14
15
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/tcg/tcg-cpu.c
18
+++ b/target/riscv/tcg/tcg-cpu.c
19
@@ -XXX,XX +XXX,XX @@ static void riscv_init_max_cpu_extensions(Object *obj)
20
}
21
22
/*
23
- * ext_smrnmi requires OpenSBI changes that our current
24
+ * TODO: ext_smrnmi requires OpenSBI changes that our current
25
* image does not have. Disable it for now.
26
*/
27
if (cpu->cfg.ext_smrnmi) {
28
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
29
- qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
30
}
31
32
/*
33
- * ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup to
34
- * avoid generating a double trap. OpenSBI does not currently support it,
35
+ * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
36
+ * to avoid generating a double trap. OpenSBI does not currently support it,
37
* disable it for now.
38
*/
39
if (cpu->cfg.ext_smdbltrp) {
40
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
41
- qemu_log("Smdbltrp is disabled in the 'max' type CPU\n");
42
}
43
}
44
45
--
46
2.48.1
47
48
diff view generated by jsdifflib
New patch
1
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
2
3
CTR entries are accessed using ctrsource, ctrtarget and ctrdata
4
registers using smcsrind/sscsrind extension. This commits extends
5
the csrind extension to support CTR registers.
6
7
ctrsource is accessible through xireg CSR, ctrtarget is accessible
8
through xireg1 and ctrdata is accessible through xireg2 CSR.
9
10
CTR supports maximum depth of 256 entries which are accessed using
11
xiselect range 0x200 to 0x2ff.
12
13
This commits also adds properties to enable CTR extension. CTR can be
14
enabled using smctr=true and ssctr=true now.
15
16
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
17
Acked-by: Alistair Francis <alistair.francis@wdc.com>
18
Message-ID: <20250212-b4-ctr_upstream_v6-v7-1-4e8159ea33bf@rivosinc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
---
21
target/riscv/cpu.c | 26 ++++++-
22
target/riscv/csr.c | 150 ++++++++++++++++++++++++++++++++++++-
23
target/riscv/tcg/tcg-cpu.c | 11 +++
24
3 files changed, 185 insertions(+), 2 deletions(-)
25
26
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/riscv/cpu.c
29
+++ b/target/riscv/cpu.c
30
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
31
ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
32
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
33
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
34
+ ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
35
+ ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
36
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
37
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
38
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
39
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
40
MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
41
MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
42
MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
43
+ MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
44
+ MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
45
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
46
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
47
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
48
@@ -XXX,XX +XXX,XX @@ static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
49
},
50
};
51
52
+static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
53
+ .ext = CPU_CFG_OFFSET(ext_smctr),
54
+ .implied_misa_exts = RVS,
55
+ .implied_multi_exts = {
56
+ CPU_CFG_OFFSET(ext_sscsrind),
57
+
58
+ RISCV_IMPLIED_EXTS_RULE_END
59
+ },
60
+};
61
+
62
+static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
63
+ .ext = CPU_CFG_OFFSET(ext_ssctr),
64
+ .implied_misa_exts = RVS,
65
+ .implied_multi_exts = {
66
+ CPU_CFG_OFFSET(ext_sscsrind),
67
+
68
+ RISCV_IMPLIED_EXTS_RULE_END
69
+ },
70
+};
71
+
72
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
73
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
74
&RVM_IMPLIED, &RVV_IMPLIED, NULL
75
@@ -XXX,XX +XXX,XX @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
76
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
77
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
78
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
79
- &SUPM_IMPLIED, &SSPM_IMPLIED,
80
+ &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
81
NULL
82
};
83
84
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/riscv/csr.c
87
+++ b/target/riscv/csr.c
88
@@ -XXX,XX +XXX,XX @@ static bool xiselect_cd_range(target_ulong isel)
89
return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
90
}
91
92
+static bool xiselect_ctr_range(int csrno, target_ulong isel)
93
+{
94
+ /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
95
+ return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
96
+ csrno < CSR_MIREG;
97
+}
98
+
99
static int rmw_iprio(target_ulong xlen,
100
target_ulong iselect, uint8_t *iprio,
101
target_ulong *val, target_ulong new_val,
102
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
103
return 0;
104
}
105
106
+static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
107
+ target_ulong new_val, target_ulong wr_mask)
108
+{
109
+ /*
110
+ * CTR arrays are treated as circular buffers and TOS always points to next
111
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
112
+ * 0 is always the latest one, traversal is a bit different here. See the
113
+ * below example.
114
+ *
115
+ * Depth = 16.
116
+ *
117
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
118
+ * TOS H
119
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
120
+ */
121
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
122
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
123
+ uint64_t idx;
124
+
125
+ /* Entry greater than depth-1 is read-only zero */
126
+ if (entry >= depth) {
127
+ if (val) {
128
+ *val = 0;
129
+ }
130
+ return 0;
131
+ }
132
+
133
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
134
+ idx = (idx - entry - 1) & (depth - 1);
135
+
136
+ if (val) {
137
+ *val = env->ctr_src[idx];
138
+ }
139
+
140
+ env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
141
+
142
+ return 0;
143
+}
144
+
145
+static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
146
+ target_ulong new_val, target_ulong wr_mask)
147
+{
148
+ /*
149
+ * CTR arrays are treated as circular buffers and TOS always points to next
150
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
151
+ * 0 is always the latest one, traversal is a bit different here. See the
152
+ * below example.
153
+ *
154
+ * Depth = 16.
155
+ *
156
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
157
+ * head H
158
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
159
+ */
160
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
161
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
162
+ uint64_t idx;
163
+
164
+ /* Entry greater than depth-1 is read-only zero */
165
+ if (entry >= depth) {
166
+ if (val) {
167
+ *val = 0;
168
+ }
169
+ return 0;
170
+ }
171
+
172
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
173
+ idx = (idx - entry - 1) & (depth - 1);
174
+
175
+ if (val) {
176
+ *val = env->ctr_dst[idx];
177
+ }
178
+
179
+ env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
180
+
181
+ return 0;
182
+}
183
+
184
+static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
185
+ target_ulong new_val, target_ulong wr_mask)
186
+{
187
+ /*
188
+ * CTR arrays are treated as circular buffers and TOS always points to next
189
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
190
+ * 0 is always the latest one, traversal is a bit different here. See the
191
+ * below example.
192
+ *
193
+ * Depth = 16.
194
+ *
195
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
196
+ * head H
197
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
198
+ */
199
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
200
+ const uint64_t mask = wr_mask & CTRDATA_MASK;
201
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
202
+ uint64_t idx;
203
+
204
+ /* Entry greater than depth-1 is read-only zero */
205
+ if (entry >= depth) {
206
+ if (val) {
207
+ *val = 0;
208
+ }
209
+ return 0;
210
+ }
211
+
212
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
213
+ idx = (idx - entry - 1) & (depth - 1);
214
+
215
+ if (val) {
216
+ *val = env->ctr_data[idx];
217
+ }
218
+
219
+ env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
220
+
221
+ return 0;
222
+}
223
+
224
static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
225
target_ulong isel, target_ulong *val,
226
target_ulong new_val, target_ulong wr_mask)
227
@@ -XXX,XX +XXX,XX @@ done:
228
return ret;
229
}
230
231
+static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
232
+ target_ulong isel, target_ulong *val,
233
+ target_ulong new_val, target_ulong wr_mask)
234
+{
235
+ if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
236
+ return -EINVAL;
237
+ }
238
+
239
+ if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
240
+ return rmw_ctrsource(env, isel, val, new_val, wr_mask);
241
+ } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
242
+ return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
243
+ } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
244
+ return rmw_ctrdata(env, isel, val, new_val, wr_mask);
245
+ } else if (val) {
246
+ *val = 0;
247
+ }
248
+
249
+ return 0;
250
+}
251
+
252
/*
253
* rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
254
*
255
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
256
target_ulong isel, target_ulong *val,
257
target_ulong new_val, target_ulong wr_mask)
258
{
259
- int ret = -EINVAL;
260
bool virt = csrno == CSR_VSIREG ? true : false;
261
+ int ret = -EINVAL;
262
263
if (xiselect_cd_range(isel)) {
264
ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
265
+ } else if (xiselect_ctr_range(csrno, isel)) {
266
+ ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
267
} else {
268
/*
269
* As per the specification, access to unimplented region is undefined
270
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
271
index XXXXXXX..XXXXXXX 100644
272
--- a/target/riscv/tcg/tcg-cpu.c
273
+++ b/target/riscv/tcg/tcg-cpu.c
274
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
275
return;
276
}
277
278
+ if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
279
+ (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
280
+ if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
281
+ cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
282
+ error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
283
+ return;
284
+ }
285
+ cpu->cfg.ext_smctr = false;
286
+ cpu->cfg.ext_ssctr = false;
287
+ }
288
+
289
/*
290
* Disable isa extensions based on priv spec after we
291
* validated and set everything we need.
292
--
293
2.48.1
diff view generated by jsdifflib
New patch
1
From: Rob Bradford <rbradford@rivosinc.com>
1
2
3
When running in TOR mode (Top of Range) the next PMP entry controls
4
whether the entry is locked. However simply checking if the PMP_LOCK bit
5
is set is not sufficient with the Smepmp extension which now provides a
6
bit (mseccfg.RLB (Rule Lock Bypass)) to disregard the lock bits. In
7
order to respect this bit use the convenience pmp_is_locked() function
8
rather than directly checking PMP_LOCK since this function checks
9
mseccfg.RLB.
10
11
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250210153713.343626-1-rbradford@rivosinc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/pmp.c | 2 +-
18
1 file changed, 1 insertion(+), 1 deletion(-)
19
20
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/pmp.c
23
+++ b/target/riscv/pmp.c
24
@@ -XXX,XX +XXX,XX @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
25
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
26
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
27
28
- if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
29
+ if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
30
qemu_log_mask(LOG_GUEST_ERROR,
31
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
32
return;
33
--
34
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Update headers to retrieve the latest KVM caps for RISC-V.
4
5
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Message-ID: <20250221153758.652078-2-dbarboza@ventanamicro.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
include/standard-headers/linux/ethtool.h | 4 +
10
include/standard-headers/linux/fuse.h | 76 ++++++++++++++++++-
11
.../linux/input-event-codes.h | 1 +
12
include/standard-headers/linux/pci_regs.h | 16 ++--
13
include/standard-headers/linux/virtio_pci.h | 14 ++++
14
linux-headers/asm-arm64/kvm.h | 3 -
15
linux-headers/asm-loongarch/kvm_para.h | 1 +
16
linux-headers/asm-riscv/kvm.h | 7 +-
17
linux-headers/asm-x86/kvm.h | 1 +
18
linux-headers/linux/iommufd.h | 35 ++++++---
19
linux-headers/linux/kvm.h | 8 +-
20
linux-headers/linux/stddef.h | 13 +++-
21
linux-headers/linux/vduse.h | 2 +-
22
13 files changed, 146 insertions(+), 35 deletions(-)
23
24
diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/standard-headers/linux/ethtool.h
27
+++ b/include/standard-headers/linux/ethtool.h
28
@@ -XXX,XX +XXX,XX @@ enum ethtool_link_ext_substate_module {
29
* @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
30
* @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
31
* @ETH_SS_STATS_RMON: names of RMON statistics
32
+ * @ETH_SS_STATS_PHY: names of PHY(dev) statistics
33
+ * @ETH_SS_TS_FLAGS: hardware timestamping flags
34
*
35
* @ETH_SS_COUNT: number of defined string sets
36
*/
37
@@ -XXX,XX +XXX,XX @@ enum ethtool_stringset {
38
    ETH_SS_STATS_ETH_MAC,
39
    ETH_SS_STATS_ETH_CTRL,
40
    ETH_SS_STATS_RMON,
41
+    ETH_SS_STATS_PHY,
42
+    ETH_SS_TS_FLAGS,
43
44
    /* add new constants above here */
45
    ETH_SS_COUNT
46
diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/include/standard-headers/linux/fuse.h
49
+++ b/include/standard-headers/linux/fuse.h
50
@@ -XXX,XX +XXX,XX @@
51
*
52
* 7.41
53
* - add FUSE_ALLOW_IDMAP
54
+ * 7.42
55
+ * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data
56
+ * structures:
57
+ * - struct fuse_uring_ent_in_out
58
+ * - struct fuse_uring_req_header
59
+ * - struct fuse_uring_cmd_req
60
+ * - FUSE_URING_IN_OUT_HEADER_SZ
61
+ * - FUSE_URING_OP_IN_OUT_SZ
62
+ * - enum fuse_uring_cmd
63
*/
64
65
#ifndef _LINUX_FUSE_H
66
@@ -XXX,XX +XXX,XX @@
67
#define FUSE_KERNEL_VERSION 7
68
69
/** Minor version number of this interface */
70
-#define FUSE_KERNEL_MINOR_VERSION 41
71
+#define FUSE_KERNEL_MINOR_VERSION 42
72
73
/** The node ID of the root inode */
74
#define FUSE_ROOT_ID 1
75
@@ -XXX,XX +XXX,XX @@ struct fuse_file_lock {
76
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
77
*         of the request ID indicates resend requests
78
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
79
+ * FUSE_OVER_IO_URING: Indicate that client supports io-uring
80
*/
81
#define FUSE_ASYNC_READ        (1 << 0)
82
#define FUSE_POSIX_LOCKS    (1 << 1)
83
@@ -XXX,XX +XXX,XX @@ struct fuse_file_lock {
84
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
85
#define FUSE_DIRECT_IO_RELAX    FUSE_DIRECT_IO_ALLOW_MMAP
86
#define FUSE_ALLOW_IDMAP    (1ULL << 40)
87
+#define FUSE_OVER_IO_URING    (1ULL << 41)
88
89
/**
90
* CUSE INIT request/reply flags
91
@@ -XXX,XX +XXX,XX @@ struct fuse_supp_groups {
92
    uint32_t    groups[];
93
};
94
95
+/**
96
+ * Size of the ring buffer header
97
+ */
98
+#define FUSE_URING_IN_OUT_HEADER_SZ 128
99
+#define FUSE_URING_OP_IN_OUT_SZ 128
100
+
101
+/* Used as part of the fuse_uring_req_header */
102
+struct fuse_uring_ent_in_out {
103
+    uint64_t flags;
104
+
105
+    /*
106
+     * commit ID to be used in a reply to a ring request (see also
107
+     * struct fuse_uring_cmd_req)
108
+     */
109
+    uint64_t commit_id;
110
+
111
+    /* size of user payload buffer */
112
+    uint32_t payload_sz;
113
+    uint32_t padding;
114
+
115
+    uint64_t reserved;
116
+};
117
+
118
+/**
119
+ * Header for all fuse-io-uring requests
120
+ */
121
+struct fuse_uring_req_header {
122
+    /* struct fuse_in_header / struct fuse_out_header */
123
+    char in_out[FUSE_URING_IN_OUT_HEADER_SZ];
124
+
125
+    /* per op code header */
126
+    char op_in[FUSE_URING_OP_IN_OUT_SZ];
127
+
128
+    struct fuse_uring_ent_in_out ring_ent_in_out;
129
+};
130
+
131
+/**
132
+ * sqe commands to the kernel
133
+ */
134
+enum fuse_uring_cmd {
135
+    FUSE_IO_URING_CMD_INVALID = 0,
136
+
137
+    /* register the request buffer and fetch a fuse request */
138
+    FUSE_IO_URING_CMD_REGISTER = 1,
139
+
140
+    /* commit fuse request result and fetch next request */
141
+    FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2,
142
+};
143
+
144
+/**
145
+ * In the 80B command area of the SQE.
146
+ */
147
+struct fuse_uring_cmd_req {
148
+    uint64_t flags;
149
+
150
+    /* entry identifier for commits */
151
+    uint64_t commit_id;
152
+
153
+    /* queue the command is for (queue index) */
154
+    uint16_t qid;
155
+    uint8_t padding[6];
156
+};
157
+
158
#endif /* _LINUX_FUSE_H */
159
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/include/standard-headers/linux/input-event-codes.h
162
+++ b/include/standard-headers/linux/input-event-codes.h
163
@@ -XXX,XX +XXX,XX @@
164
#define KEY_NOTIFICATION_CENTER    0x1bc    /* Show/hide the notification center */
165
#define KEY_PICKUP_PHONE    0x1bd    /* Answer incoming call */
166
#define KEY_HANGUP_PHONE    0x1be    /* Decline incoming call */
167
+#define KEY_LINK_PHONE        0x1bf /* AL Phone Syncing */
168
169
#define KEY_DEL_EOL        0x1c0
170
#define KEY_DEL_EOS        0x1c1
171
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
172
index XXXXXXX..XXXXXXX 100644
173
--- a/include/standard-headers/linux/pci_regs.h
174
+++ b/include/standard-headers/linux/pci_regs.h
175
@@ -XXX,XX +XXX,XX @@
176
#define PCI_EXP_DEVSTA_TRPND    0x0020    /* Transactions Pending */
177
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1    12    /* v1 endpoints without link end here */
178
#define PCI_EXP_LNKCAP        0x0c    /* Link Capabilities */
179
-#define PCI_EXP_LNKCAP_SLS    0x0000000f /* Supported Link Speeds */
180
+#define PCI_EXP_LNKCAP_SLS    0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
181
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
182
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
183
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
184
@@ -XXX,XX +XXX,XX @@
185
#define PCI_EXP_DEVCAP2_OBFF_MSG    0x00040000 /* New message signaling */
186
#define PCI_EXP_DEVCAP2_OBFF_WAKE    0x00080000 /* Re-use WAKE# for OBFF */
187
#define PCI_EXP_DEVCAP2_EE_PREFIX    0x00200000 /* End-End TLP Prefix */
188
+#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX    0x00c00000 /* Max End-End TLP Prefixes */
189
#define PCI_EXP_DEVCTL2        0x28    /* Device Control 2 */
190
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT    0x000f    /* Completion Timeout Value */
191
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS    0x0010    /* Completion Timeout Disable */
192
@@ -XXX,XX +XXX,XX @@
193
    /* Same bits as above */
194
#define PCI_ERR_CAP        0x18    /* Advanced Error Capabilities & Ctrl*/
195
#define PCI_ERR_CAP_FEP(x)    ((x) & 0x1f)    /* First Error Pointer */
196
-#define PCI_ERR_CAP_ECRC_GENC    0x00000020    /* ECRC Generation Capable */
197
-#define PCI_ERR_CAP_ECRC_GENE    0x00000040    /* ECRC Generation Enable */
198
-#define PCI_ERR_CAP_ECRC_CHKC    0x00000080    /* ECRC Check Capable */
199
-#define PCI_ERR_CAP_ECRC_CHKE    0x00000100    /* ECRC Check Enable */
200
+#define PCI_ERR_CAP_ECRC_GENC        0x00000020 /* ECRC Generation Capable */
201
+#define PCI_ERR_CAP_ECRC_GENE        0x00000040 /* ECRC Generation Enable */
202
+#define PCI_ERR_CAP_ECRC_CHKC        0x00000080 /* ECRC Check Capable */
203
+#define PCI_ERR_CAP_ECRC_CHKE        0x00000100 /* ECRC Check Enable */
204
+#define PCI_ERR_CAP_PREFIX_LOG_PRESENT    0x00000800 /* TLP Prefix Log Present */
205
#define PCI_ERR_HEADER_LOG    0x1c    /* Header Log Register (16 bytes) */
206
#define PCI_ERR_ROOT_COMMAND    0x2c    /* Root Error Command */
207
#define PCI_ERR_ROOT_CMD_COR_EN    0x00000001 /* Correctable Err Reporting Enable */
208
@@ -XXX,XX +XXX,XX @@
209
#define PCI_ERR_ROOT_FATAL_RCV        0x00000040 /* Fatal Received */
210
#define PCI_ERR_ROOT_AER_IRQ        0xf8000000 /* Advanced Error Interrupt Message Number */
211
#define PCI_ERR_ROOT_ERR_SRC    0x34    /* Error Source Identification */
212
+#define PCI_ERR_PREFIX_LOG    0x38    /* TLP Prefix LOG Register (up to 16 bytes) */
213
214
/* Virtual Channel */
215
#define PCI_VC_PORT_CAP1    0x04
216
@@ -XXX,XX +XXX,XX @@
217
#define PCI_ACS_CTRL        0x06    /* ACS Control Register */
218
#define PCI_ACS_EGRESS_CTL_V    0x08    /* ACS Egress Control Vector */
219
220
-#define PCI_VSEC_HDR        4    /* extended cap - vendor-specific */
221
-#define PCI_VSEC_HDR_LEN_SHIFT    20    /* shift for length field */
222
-
223
/* SATA capability */
224
#define PCI_SATA_REGS        4    /* SATA REGs specifier */
225
#define PCI_SATA_REGS_MASK    0xF    /* location - BAR#/inline */
226
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
227
index XXXXXXX..XXXXXXX 100644
228
--- a/include/standard-headers/linux/virtio_pci.h
229
+++ b/include/standard-headers/linux/virtio_pci.h
230
@@ -XXX,XX +XXX,XX @@
231
#define VIRTIO_PCI_CAP_PCI_CFG        5
232
/* Additional shared memory capability */
233
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
234
+/* PCI vendor data configuration */
235
+#define VIRTIO_PCI_CAP_VENDOR_CFG    9
236
237
/* This is the PCI capability header: */
238
struct virtio_pci_cap {
239
@@ -XXX,XX +XXX,XX @@ struct virtio_pci_cap {
240
    uint32_t length;        /* Length of the structure, in bytes. */
241
};
242
243
+/* This is the PCI vendor data capability header: */
244
+struct virtio_pci_vndr_data {
245
+    uint8_t cap_vndr;        /* Generic PCI field: PCI_CAP_ID_VNDR */
246
+    uint8_t cap_next;        /* Generic PCI field: next ptr. */
247
+    uint8_t cap_len;        /* Generic PCI field: capability length */
248
+    uint8_t cfg_type;        /* Identifies the structure. */
249
+    uint16_t vendor_id;    /* Identifies the vendor-specific format. */
250
+    /* For Vendor Definition */
251
+    /* Pads structure to a multiple of 4 bytes */
252
+    /* Reads must not have side effects */
253
+};
254
+
255
struct virtio_pci_cap64 {
256
    struct virtio_pci_cap cap;
257
    uint32_t offset_hi; /* Most sig 32 bits of offset */
258
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
259
index XXXXXXX..XXXXXXX 100644
260
--- a/linux-headers/asm-arm64/kvm.h
261
+++ b/linux-headers/asm-arm64/kvm.h
262
@@ -XXX,XX +XXX,XX @@
263
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
264
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
265
266
-#define KVM_REG_SIZE(id)                        \
267
-    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
268
-
269
struct kvm_regs {
270
    struct user_pt_regs regs;    /* sp = sp_el0 */
271
272
diff --git a/linux-headers/asm-loongarch/kvm_para.h b/linux-headers/asm-loongarch/kvm_para.h
273
index XXXXXXX..XXXXXXX 100644
274
--- a/linux-headers/asm-loongarch/kvm_para.h
275
+++ b/linux-headers/asm-loongarch/kvm_para.h
276
@@ -XXX,XX +XXX,XX @@
277
#define KVM_FEATURE_STEAL_TIME        2
278
/* BIT 24 - 31 are features configurable by user space vmm */
279
#define KVM_FEATURE_VIRT_EXTIOI    24
280
+#define KVM_FEATURE_USER_HCALL        25
281
282
#endif /* _ASM_KVM_PARA_H */
283
diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h
284
index XXXXXXX..XXXXXXX 100644
285
--- a/linux-headers/asm-riscv/kvm.h
286
+++ b/linux-headers/asm-riscv/kvm.h
287
@@ -XXX,XX +XXX,XX @@ enum KVM_RISCV_ISA_EXT_ID {
288
    KVM_RISCV_ISA_EXT_SSNPM,
289
    KVM_RISCV_ISA_EXT_SVADE,
290
    KVM_RISCV_ISA_EXT_SVADU,
291
+    KVM_RISCV_ISA_EXT_SVVPTC,
292
+    KVM_RISCV_ISA_EXT_ZABHA,
293
+    KVM_RISCV_ISA_EXT_ZICCRSE,
294
    KVM_RISCV_ISA_EXT_MAX,
295
};
296
297
@@ -XXX,XX +XXX,XX @@ enum KVM_RISCV_SBI_EXT_ID {
298
    KVM_RISCV_SBI_EXT_VENDOR,
299
    KVM_RISCV_SBI_EXT_DBCN,
300
    KVM_RISCV_SBI_EXT_STA,
301
+    KVM_RISCV_SBI_EXT_SUSP,
302
    KVM_RISCV_SBI_EXT_MAX,
303
};
304
305
@@ -XXX,XX +XXX,XX @@ struct kvm_riscv_sbi_sta {
306
#define KVM_RISCV_TIMER_STATE_OFF    0
307
#define KVM_RISCV_TIMER_STATE_ON    1
308
309
-#define KVM_REG_SIZE(id)        \
310
-    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
311
-
312
/* If you need to interpret the index values, here is the key: */
313
#define KVM_REG_RISCV_TYPE_MASK        0x00000000FF000000
314
#define KVM_REG_RISCV_TYPE_SHIFT    24
315
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
316
index XXXXXXX..XXXXXXX 100644
317
--- a/linux-headers/asm-x86/kvm.h
318
+++ b/linux-headers/asm-x86/kvm.h
319
@@ -XXX,XX +XXX,XX @@ struct kvm_hyperv_eventfd {
320
#define KVM_X86_SEV_VM        2
321
#define KVM_X86_SEV_ES_VM    3
322
#define KVM_X86_SNP_VM        4
323
+#define KVM_X86_TDX_VM        5
324
325
#endif /* _ASM_X86_KVM_H */
326
diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h
327
index XXXXXXX..XXXXXXX 100644
328
--- a/linux-headers/linux/iommufd.h
329
+++ b/linux-headers/linux/iommufd.h
330
@@ -XXX,XX +XXX,XX @@ struct iommu_ioas_unmap {
331
* ioctl(IOMMU_OPTION_HUGE_PAGES)
332
* @IOMMU_OPTION_RLIMIT_MODE:
333
* Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
334
- * to invoke this. Value 0 (default) is user based accouting, 1 uses process
335
+ * to invoke this. Value 0 (default) is user based accounting, 1 uses process
336
* based accounting. Global option, object_id must be 0
337
* @IOMMU_OPTION_HUGE_PAGES:
338
* Value 1 (default) allows contiguous pages to be combined when generating
339
@@ -XXX,XX +XXX,XX @@ struct iommu_vfio_ioas {
340
* @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
341
* domain can be attached to any PASID on the device.
342
* Any domain attached to the non-PASID part of the
343
- * device must also be flaged, otherwise attaching a
344
+ * device must also be flagged, otherwise attaching a
345
* PASID will blocked.
346
* If IOMMU does not support PASID it will return
347
* error (-EOPNOTSUPP).
348
@@ -XXX,XX +XXX,XX @@ struct iommu_hw_info_vtd {
349
* For the details of @idr, @iidr and @aidr, please refer to the chapters
350
* from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
351
*
352
- * User space should read the underlying ARM SMMUv3 hardware information for
353
- * the list of supported features.
354
+ * This reports the raw HW capability, and not all bits are meaningful to be
355
+ * read by userspace. Only the following fields should be used:
356
*
357
- * Note that these values reflect the raw HW capability, without any insight if
358
- * any required kernel driver support is present. Bits may be set indicating the
359
- * HW has functionality that is lacking kernel software support, such as BTM. If
360
- * a VMM is using this information to construct emulated copies of these
361
- * registers it should only forward bits that it knows it can support.
362
+ * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
363
+ * idr[1]: SIDSIZE, SSIDSIZE
364
+ * idr[3]: BBML, RIL
365
+ * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
366
*
367
- * In future, presence of required kernel support will be indicated in flags.
368
+ * - S1P should be assumed to be true if a NESTED HWPT can be created
369
+ * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
370
+ * true.
371
+ * - ATS is a per-device property. If the VMM describes any devices as ATS
372
+ * capable in ACPI/DT it should set the corresponding idr.
373
+ *
374
+ * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
375
+ * important that VMMs do not read bits outside the list to allow for
376
+ * compatibility with future kernels. Several features in the SMMUv3
377
+ * architecture are not currently supported by the kernel for nesting: HTTU,
378
+ * BTM, MPAM and others.
379
*/
380
struct iommu_hw_info_arm_smmuv3 {
381
    __u32 flags;
382
@@ -XXX,XX +XXX,XX @@ struct iommu_hwpt_vtd_s1_invalidate {
383
};
384
385
/**
386
- * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
387
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
388
* (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
389
* @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
390
* Must be little-endian.
391
@@ -XXX,XX +XXX,XX @@ enum iommu_hwpt_pgfault_perm {
392
* @pasid: Process Address Space ID
393
* @grpid: Page Request Group Index
394
* @perm: Combination of enum iommu_hwpt_pgfault_perm
395
+ * @__reserved: Must be 0.
396
* @addr: Fault address
397
* @length: a hint of how much data the requestor is expecting to fetch. For
398
* example, if the PRI initiator knows it is going to do a 10MB
399
@@ -XXX,XX +XXX,XX @@ struct iommu_hwpt_pgfault {
400
    __u32 pasid;
401
    __u32 grpid;
402
    __u32 perm;
403
-    __u64 addr;
404
+    __u32 __reserved;
405
+    __aligned_u64 addr;
406
    __u32 length;
407
    __u32 cookie;
408
};
409
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
410
index XXXXXXX..XXXXXXX 100644
411
--- a/linux-headers/linux/kvm.h
412
+++ b/linux-headers/linux/kvm.h
413
@@ -XXX,XX +XXX,XX @@ struct kvm_ioeventfd {
414
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
415
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
416
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
417
-#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
418
- KVM_X86_DISABLE_EXITS_HLT | \
419
- KVM_X86_DISABLE_EXITS_PAUSE | \
420
- KVM_X86_DISABLE_EXITS_CSTATE)
421
422
/* for KVM_ENABLE_CAP */
423
struct kvm_enable_cap {
424
@@ -XXX,XX +XXX,XX @@ struct kvm_dirty_tlb {
425
426
#define KVM_REG_SIZE_SHIFT    52
427
#define KVM_REG_SIZE_MASK    0x00f0000000000000ULL
428
+
429
+#define KVM_REG_SIZE(id)        \
430
+    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
431
+
432
#define KVM_REG_SIZE_U8        0x0000000000000000ULL
433
#define KVM_REG_SIZE_U16    0x0010000000000000ULL
434
#define KVM_REG_SIZE_U32    0x0020000000000000ULL
435
diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h
436
index XXXXXXX..XXXXXXX 100644
437
--- a/linux-headers/linux/stddef.h
438
+++ b/linux-headers/linux/stddef.h
439
@@ -XXX,XX +XXX,XX @@
440
#define __always_inline __inline__
441
#endif
442
443
+/* Not all C++ standards support type declarations inside an anonymous union */
444
+#ifndef __cplusplus
445
+#define __struct_group_tag(TAG)        TAG
446
+#else
447
+#define __struct_group_tag(TAG)
448
+#endif
449
+
450
/**
451
* __struct_group() - Create a mirrored named and anonyomous struct
452
*
453
@@ -XXX,XX +XXX,XX @@
454
* and size: one anonymous and one named. The former's members can be used
455
* normally without sub-struct naming, and the latter can be used to
456
* reason about the start, end, and size of the group of struct members.
457
- * The named struct can also be explicitly tagged for layer reuse, as well
458
- * as both having struct attributes appended.
459
+ * The named struct can also be explicitly tagged for layer reuse (C only),
460
+ * as well as both having struct attributes appended.
461
*/
462
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
463
    union { \
464
        struct { MEMBERS } ATTRS; \
465
-        struct TAG { MEMBERS } ATTRS NAME; \
466
+        struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
467
    } ATTRS
468
469
#ifdef __cplusplus
470
diff --git a/linux-headers/linux/vduse.h b/linux-headers/linux/vduse.h
471
index XXXXXXX..XXXXXXX 100644
472
--- a/linux-headers/linux/vduse.h
473
+++ b/linux-headers/linux/vduse.h
474
@@ -XXX,XX +XXX,XX @@
475
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
476
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
477
#ifndef _VDUSE_H_
478
#define _VDUSE_H_
479
480
--
481
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
At this moment ziccrse is a TCG always enabled named feature for
4
priv_ver > 1.11 that has no exclusive flag. In the next patch we'll make
5
the KVM driver turn ziccrse off if the extension isn't available in the
6
host, and we'll need an ext_ziccrse flag in the CPU state for that.
7
8
Create an exclusive flag for it like we do with other named features.
9
As with any named features we already have, it won't be exposed to
10
users.
11
12
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
14
Message-ID: <20250221153758.652078-3-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/cpu_cfg.h | 3 +++
18
target/riscv/cpu.c | 3 ++-
19
target/riscv/tcg/tcg-cpu.c | 2 ++
20
3 files changed, 7 insertions(+), 1 deletion(-)
21
22
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu_cfg.h
25
+++ b/target/riscv/cpu_cfg.h
26
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
27
bool has_priv_1_12;
28
bool has_priv_1_11;
29
30
+ /* Always enabled for TCG if has_priv_1_11 */
31
+ bool ext_ziccrse;
32
+
33
/* Vendor-specific custom extensions */
34
bool ext_xtheadba;
35
bool ext_xtheadbb;
36
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/riscv/cpu.c
39
+++ b/target/riscv/cpu.c
40
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
41
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
42
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
43
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
44
- ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
45
+ ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
46
ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
47
ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
48
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
49
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
50
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
51
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
52
MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
53
+ MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
54
55
{ },
56
};
57
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/target/riscv/tcg/tcg-cpu.c
60
+++ b/target/riscv/tcg/tcg-cpu.c
61
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
62
63
cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
64
cpu->cfg.ext_ssstateen;
65
+
66
+ cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
67
}
68
69
static void riscv_cpu_validate_g(RISCVCPU *cpu)
70
--
71
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Expose ziccrse, zabha and svvptc.
4
5
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
7
Message-ID: <20250221153758.652078-4-dbarboza@ventanamicro.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/kvm/kvm-cpu.c | 3 +++
11
1 file changed, 3 insertions(+)
12
13
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/kvm/kvm-cpu.c
16
+++ b/target/riscv/kvm/kvm-cpu.c
17
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
18
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
19
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
20
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
21
+ KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
22
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
23
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
24
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
25
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
26
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
27
KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
28
KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
29
+ KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
30
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
31
KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
32
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
33
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
34
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
35
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
36
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
37
+ KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
38
};
39
40
static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
41
--
42
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
This header is incomplete, i.e. it is using definitions that are being
4
supplied by the .c files that are including it.
5
6
Adding this header into a fresh .c file will result in errors:
7
8
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:30:17: error: field ‘parent_obj’ has incomplete type
9
30 | DeviceState parent_obj;
10
| ^~~~~~~~~~
11
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:50:5: error: unknown type name ‘dma_addr_t’; did you mean ‘in_addr_t’?
12
50 | dma_addr_t cq_addr; /* Command queue base physical address */
13
| ^~~~~~~~~~
14
| in_addr_t
15
(...)
16
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:62:5: error: unknown type name ‘QemuThread’; did you mean ‘GThread’?
17
62 | QemuThread core_proc; /* Background processing thread */
18
| ^~~~~~~~~~
19
| GThread
20
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:63:5: error: unknown type name ‘QemuCond’
21
63 | QemuCond core_cond; /* Background processing wake up signal */
22
| ^~~~~~~~
23
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:71:18: error: field ‘trap_as’ has incomplete type
24
71 | AddressSpace trap_as;
25
| ^~~~~~~
26
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:72:18: error: field ‘trap_mr’ has incomplete type
27
72 | MemoryRegion trap_mr;
28
| ^~~~~~~
29
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:80:18: error: field ‘regs_mr’ has incomplete type
30
80 | MemoryRegion regs_mr;
31
| ^~~~~~~
32
33
Fix it by adding the missing headers for these definitions.
34
35
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
36
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
37
Message-ID: <20250224190826.1858473-2-dbarboza@ventanamicro.com>
38
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
39
---
40
hw/riscv/riscv-iommu.h | 2 ++
41
1 file changed, 2 insertions(+)
42
43
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/riscv/riscv-iommu.h
46
+++ b/hw/riscv/riscv-iommu.h
47
@@ -XXX,XX +XXX,XX @@
48
#define HW_RISCV_IOMMU_STATE_H
49
50
#include "qom/object.h"
51
+#include "hw/qdev-properties.h"
52
+#include "system/dma.h"
53
#include "hw/riscv/iommu.h"
54
#include "hw/riscv/riscv-iommu-bits.h"
55
56
--
57
2.48.1
58
59
diff view generated by jsdifflib
New patch
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
2
3
Add the relevant HPM (High Performance Monitor) bits that we'll be using
4
in the next patches.
5
6
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250224190826.1858473-3-dbarboza@ventanamicro.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
hw/riscv/riscv-iommu-bits.h | 47 +++++++++++++++++++++++++++++++++++++
13
1 file changed, 47 insertions(+)
14
15
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/riscv/riscv-iommu-bits.h
18
+++ b/hw/riscv/riscv-iommu-bits.h
19
@@ -XXX,XX +XXX,XX @@ struct riscv_iommu_pq_record {
20
#define RISCV_IOMMU_CAP_ATS BIT_ULL(25)
21
#define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26)
22
#define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28)
23
+#define RISCV_IOMMU_CAP_HPM BIT_ULL(30)
24
#define RISCV_IOMMU_CAP_DBG BIT_ULL(31)
25
#define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32)
26
#define RISCV_IOMMU_CAP_PD8 BIT_ULL(38)
27
@@ -XXX,XX +XXX,XX @@ enum {
28
RISCV_IOMMU_INTR_COUNT
29
};
30
31
+#define RISCV_IOMMU_IOCOUNT_NUM 31
32
+
33
+/* 5.19 Performance monitoring counter overflow status (32bits) */
34
+#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058
35
+#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0)
36
+
37
+/* 5.20 Performance monitoring counter inhibits (32bits) */
38
+#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C
39
+#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0)
40
+
41
+/* 5.21 Performance monitoring cycles counter (64bits) */
42
+#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060
43
+#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0)
44
+#define RISCV_IOMMU_IOHPMCYCLES_OVF BIT_ULL(63)
45
+
46
+/* 5.22 Performance monitoring event counters (31 * 64bits) */
47
+#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068
48
+#define RISCV_IOMMU_REG_IOHPMCTR(_n) \
49
+ (RISCV_IOMMU_REG_IOHPMCTR_BASE + (_n * 0x8))
50
+
51
+/* 5.23 Performance monitoring event selectors (31 * 64bits) */
52
+#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160
53
+#define RISCV_IOMMU_REG_IOHPMEVT(_n) \
54
+ (RISCV_IOMMU_REG_IOHPMEVT_BASE + (_n * 0x8))
55
+#define RISCV_IOMMU_IOHPMEVT_EVENT_ID GENMASK_ULL(14, 0)
56
+#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15)
57
+#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16)
58
+#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36)
59
+#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60)
60
+#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61)
61
+#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62)
62
+#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63)
63
+
64
+enum RISCV_IOMMU_HPMEVENT_id {
65
+ RISCV_IOMMU_HPMEVENT_INVALID = 0,
66
+ RISCV_IOMMU_HPMEVENT_URQ = 1,
67
+ RISCV_IOMMU_HPMEVENT_TRQ = 2,
68
+ RISCV_IOMMU_HPMEVENT_ATS_RQ = 3,
69
+ RISCV_IOMMU_HPMEVENT_TLB_MISS = 4,
70
+ RISCV_IOMMU_HPMEVENT_DD_WALK = 5,
71
+ RISCV_IOMMU_HPMEVENT_PD_WALK = 6,
72
+ RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7,
73
+ RISCV_IOMMU_HPMEVENT_G_WALKS = 8,
74
+ RISCV_IOMMU_HPMEVENT_MAX = 9
75
+};
76
+
77
/* 5.24 Translation request IOVA (64bits) */
78
#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258
79
80
--
81
2.48.1
diff view generated by jsdifflib
1
From: Anup Patel <anup.patel@wdc.com>
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
2
2
3
The RISC-V AIA (Advanced Interrupt Architecture) defines a new
3
The HPM (Hardware Performance Monitor) support consists of almost 7
4
interrupt controller for MSIs (message signal interrupts) called
4
hundred lines that would be put on top of the base riscv-iommu
5
IMSIC (Incoming Message Signal Interrupt Controller). The IMSIC
5
emulation.
6
is per-HART device and also suppport virtualizaiton of MSIs using
7
dedicated VS-level guest interrupt files.
8
6
9
This patch adds device emulation for RISC-V AIA IMSIC which
7
To avoid clogging riscv-iommu.c, add a separated riscv-iommu-hpm file
10
supports M-level, S-level, and VS-level MSIs.
8
that will contain HPM specific code.
11
9
12
Signed-off-by: Anup Patel <anup.patel@wdc.com>
10
We'll start by adding riscv_iommu_hpmcycle_read(), a helper that will be
13
Signed-off-by: Anup Patel <anup@brainfault.org>
11
called during the riscv_iommu_mmio_read() callback.
14
Reviewed-by: Frank Chang <frank.chang@sifive.com>
12
15
Message-Id: <20220220085526.808674-3-anup@brainfault.org>
13
This change will have no effect on the existing emulation since we're
14
not declaring HPM feature support.
15
16
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
17
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
18
Acked-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20250224190826.1858473-4-dbarboza@ventanamicro.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
21
---
18
include/hw/intc/riscv_imsic.h | 68 ++++++
22
hw/riscv/riscv-iommu-hpm.h | 27 +++++++++++++++++++
19
hw/intc/riscv_imsic.c | 448 ++++++++++++++++++++++++++++++++++
23
hw/riscv/riscv-iommu.h | 4 +++
20
hw/intc/Kconfig | 3 +
24
hw/riscv/riscv-iommu-hpm.c | 54 ++++++++++++++++++++++++++++++++++++++
21
hw/intc/meson.build | 1 +
25
hw/riscv/riscv-iommu.c | 24 ++++++++++++++++-
22
4 files changed, 520 insertions(+)
26
hw/riscv/meson.build | 3 ++-
23
create mode 100644 include/hw/intc/riscv_imsic.h
27
5 files changed, 110 insertions(+), 2 deletions(-)
24
create mode 100644 hw/intc/riscv_imsic.c
28
create mode 100644 hw/riscv/riscv-iommu-hpm.h
29
create mode 100644 hw/riscv/riscv-iommu-hpm.c
25
30
26
diff --git a/include/hw/intc/riscv_imsic.h b/include/hw/intc/riscv_imsic.h
31
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
27
new file mode 100644
32
new file mode 100644
28
index XXXXXXX..XXXXXXX
33
index XXXXXXX..XXXXXXX
29
--- /dev/null
34
--- /dev/null
30
+++ b/include/hw/intc/riscv_imsic.h
35
+++ b/hw/riscv/riscv-iommu-hpm.h
31
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@
32
+/*
37
+/*
33
+ * RISC-V IMSIC (Incoming Message Signal Interrupt Controller) interface
38
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
34
+ *
39
+ *
35
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
40
+ * Copyright (C) 2022-2023 Rivos Inc.
36
+ *
41
+ *
37
+ * This program is free software; you can redistribute it and/or modify it
42
+ * This program is free software; you can redistribute it and/or modify it
38
+ * under the terms and conditions of the GNU General Public License,
43
+ * under the terms and conditions of the GNU General Public License,
39
+ * version 2 or later, as published by the Free Software Foundation.
44
+ * version 2 or later, as published by the Free Software Foundation.
40
+ *
45
+ *
41
+ * This program is distributed in the hope it will be useful, but WITHOUT
46
+ * This program is distributed in the hope that it will be useful,
42
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
47
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
43
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
48
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44
+ * more details.
49
+ * GNU General Public License for more details.
45
+ *
50
+ *
46
+ * You should have received a copy of the GNU General Public License along with
51
+ * You should have received a copy of the GNU General Public License along
47
+ * this program. If not, see <http://www.gnu.org/licenses/>.
52
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
48
+ */
53
+ */
49
+
54
+
50
+#ifndef HW_RISCV_IMSIC_H
55
+#ifndef HW_RISCV_IOMMU_HPM_H
51
+#define HW_RISCV_IMSIC_H
56
+#define HW_RISCV_IOMMU_HPM_H
52
+
57
+
53
+#include "hw/sysbus.h"
54
+#include "qom/object.h"
58
+#include "qom/object.h"
59
+#include "hw/riscv/riscv-iommu.h"
55
+
60
+
56
+#define TYPE_RISCV_IMSIC "riscv.imsic"
61
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
57
+
58
+typedef struct RISCVIMSICState RISCVIMSICState;
59
+DECLARE_INSTANCE_CHECKER(RISCVIMSICState, RISCV_IMSIC, TYPE_RISCV_IMSIC)
60
+
61
+#define IMSIC_MMIO_PAGE_SHIFT 12
62
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
63
+#define IMSIC_MMIO_SIZE(__num_pages) ((__num_pages) * IMSIC_MMIO_PAGE_SZ)
64
+
65
+#define IMSIC_MMIO_HART_GUEST_MAX_BTIS 6
66
+#define IMSIC_MMIO_GROUP_MIN_SHIFT 24
67
+
68
+#define IMSIC_HART_NUM_GUESTS(__guest_bits) \
69
+ (1U << (__guest_bits))
70
+#define IMSIC_HART_SIZE(__guest_bits) \
71
+ (IMSIC_HART_NUM_GUESTS(__guest_bits) * IMSIC_MMIO_PAGE_SZ)
72
+#define IMSIC_GROUP_NUM_HARTS(__hart_bits) \
73
+ (1U << (__hart_bits))
74
+#define IMSIC_GROUP_SIZE(__hart_bits, __guest_bits) \
75
+ (IMSIC_GROUP_NUM_HARTS(__hart_bits) * IMSIC_HART_SIZE(__guest_bits))
76
+
77
+struct RISCVIMSICState {
78
+ /*< private >*/
79
+ SysBusDevice parent_obj;
80
+ qemu_irq *external_irqs;
81
+
82
+ /*< public >*/
83
+ MemoryRegion mmio;
84
+ uint32_t num_eistate;
85
+ uint32_t *eidelivery;
86
+ uint32_t *eithreshold;
87
+ uint32_t *eistate;
88
+
89
+ /* config */
90
+ bool mmode;
91
+ uint32_t hartid;
92
+ uint32_t num_pages;
93
+ uint32_t num_irqs;
94
+};
95
+
96
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
97
+ uint32_t num_pages, uint32_t num_ids);
98
+
62
+
99
+#endif
63
+#endif
100
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
64
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/riscv/riscv-iommu.h
67
+++ b/hw/riscv/riscv-iommu.h
68
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
69
70
QLIST_ENTRY(RISCVIOMMUState) iommus;
71
QLIST_HEAD(, RISCVIOMMUSpace) spaces;
72
+
73
+ /* HPM cycle counter */
74
+ uint64_t hpmcycle_val; /* Current value of cycle register */
75
+ uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
76
};
77
78
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
79
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
101
new file mode 100644
80
new file mode 100644
102
index XXXXXXX..XXXXXXX
81
index XXXXXXX..XXXXXXX
103
--- /dev/null
82
--- /dev/null
104
+++ b/hw/intc/riscv_imsic.c
83
+++ b/hw/riscv/riscv-iommu-hpm.c
105
@@ -XXX,XX +XXX,XX @@
84
@@ -XXX,XX +XXX,XX @@
106
+/*
85
+/*
107
+ * RISC-V IMSIC (Incoming Message Signaled Interrupt Controller)
86
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
108
+ *
87
+ *
109
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
88
+ * Copyright (C) 2022-2023 Rivos Inc.
110
+ *
89
+ *
111
+ * This program is free software; you can redistribute it and/or modify it
90
+ * This program is free software; you can redistribute it and/or modify it
112
+ * under the terms and conditions of the GNU General Public License,
91
+ * under the terms and conditions of the GNU General Public License,
113
+ * version 2 or later, as published by the Free Software Foundation.
92
+ * version 2 or later, as published by the Free Software Foundation.
114
+ *
93
+ *
115
+ * This program is distributed in the hope it will be useful, but WITHOUT
94
+ * This program is distributed in the hope that it will be useful,
116
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
95
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
117
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
96
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
118
+ * more details.
97
+ * GNU General Public License for more details.
119
+ *
98
+ *
120
+ * You should have received a copy of the GNU General Public License along with
99
+ * You should have received a copy of the GNU General Public License along
121
+ * this program. If not, see <http://www.gnu.org/licenses/>.
100
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
122
+ */
101
+ */
123
+
102
+
124
+#include "qemu/osdep.h"
103
+#include "qemu/osdep.h"
125
+#include "qapi/error.h"
104
+#include "qemu/timer.h"
126
+#include "qemu/log.h"
105
+#include "cpu_bits.h"
127
+#include "qemu/module.h"
106
+#include "riscv-iommu-hpm.h"
128
+#include "qemu/error-report.h"
107
+#include "riscv-iommu.h"
129
+#include "qemu/bswap.h"
108
+#include "riscv-iommu-bits.h"
130
+#include "exec/address-spaces.h"
109
+#include "trace.h"
131
+#include "hw/sysbus.h"
132
+#include "hw/pci/msi.h"
133
+#include "hw/boards.h"
134
+#include "hw/qdev-properties.h"
135
+#include "hw/intc/riscv_imsic.h"
136
+#include "hw/irq.h"
137
+#include "target/riscv/cpu.h"
138
+#include "target/riscv/cpu_bits.h"
139
+#include "sysemu/sysemu.h"
140
+#include "migration/vmstate.h"
141
+
110
+
142
+#define IMSIC_MMIO_PAGE_LE 0x00
111
+/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
143
+#define IMSIC_MMIO_PAGE_BE 0x04
112
+static inline uint64_t get_cycles(void)
113
+{
114
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
115
+}
144
+
116
+
145
+#define IMSIC_MIN_ID ((IMSIC_EIPx_BITS * 2) - 1)
117
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
146
+#define IMSIC_MAX_ID (IMSIC_TOPEI_IID_MASK)
118
+{
119
+ const uint64_t cycle = riscv_iommu_reg_get64(
120
+ s, RISCV_IOMMU_REG_IOHPMCYCLES);
121
+ const uint32_t inhibit = riscv_iommu_reg_get32(
122
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
123
+ const uint64_t ctr_prev = s->hpmcycle_prev;
124
+ const uint64_t ctr_val = s->hpmcycle_val;
147
+
125
+
148
+#define IMSIC_EISTATE_PENDING (1U << 0)
126
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
149
+#define IMSIC_EISTATE_ENABLED (1U << 1)
127
+ /*
150
+#define IMSIC_EISTATE_ENPEND (IMSIC_EISTATE_ENABLED | \
128
+ * Counter should not increment if inhibit bit is set. We can't really
151
+ IMSIC_EISTATE_PENDING)
129
+ * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
152
+
130
+ * counter value to indicate that counter was not incremented.
153
+static uint32_t riscv_imsic_topei(RISCVIMSICState *imsic, uint32_t page)
131
+ */
154
+{
132
+ return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
155
+ uint32_t i, max_irq, base;
133
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
156
+
157
+ base = page * imsic->num_irqs;
158
+ max_irq = (imsic->eithreshold[page] &&
159
+ (imsic->eithreshold[page] <= imsic->num_irqs)) ?
160
+ imsic->eithreshold[page] : imsic->num_irqs;
161
+ for (i = 1; i < max_irq; i++) {
162
+ if ((imsic->eistate[base + i] & IMSIC_EISTATE_ENPEND) ==
163
+ IMSIC_EISTATE_ENPEND) {
164
+ return (i << IMSIC_TOPEI_IID_SHIFT) | i;
165
+ }
166
+ }
134
+ }
167
+
135
+
168
+ return 0;
136
+ return (ctr_val + get_cycles() - ctr_prev) |
137
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
169
+}
138
+}
170
+
139
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
171
+static void riscv_imsic_update(RISCVIMSICState *imsic, uint32_t page)
140
index XXXXXXX..XXXXXXX 100644
172
+{
141
--- a/hw/riscv/riscv-iommu.c
173
+ if (imsic->eidelivery[page] && riscv_imsic_topei(imsic, page)) {
142
+++ b/hw/riscv/riscv-iommu.c
174
+ qemu_irq_raise(imsic->external_irqs[page]);
143
@@ -XXX,XX +XXX,XX @@
144
#include "cpu_bits.h"
145
#include "riscv-iommu.h"
146
#include "riscv-iommu-bits.h"
147
+#include "riscv-iommu-hpm.h"
148
#include "trace.h"
149
150
#define LIMIT_CACHE_CTX (1U << 7)
151
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_read(void *opaque, hwaddr addr,
152
return MEMTX_ACCESS_ERROR;
153
}
154
155
- ptr = &s->regs_rw[addr];
156
+ /* Compute cycle register value. */
157
+ if ((addr & ~7) == RISCV_IOMMU_REG_IOHPMCYCLES) {
158
+ val = riscv_iommu_hpmcycle_read(s);
159
+ ptr = (uint8_t *)&val + (addr & 7);
160
+ } else if ((addr & ~3) == RISCV_IOMMU_REG_IOCOUNTOVF) {
161
+ /*
162
+ * Software can read RISCV_IOMMU_REG_IOCOUNTOVF before timer
163
+ * callback completes. In which case CY_OF bit in
164
+ * RISCV_IOMMU_IOHPMCYCLES_OVF would be 0. Here we take the
165
+ * CY_OF bit state from RISCV_IOMMU_REG_IOHPMCYCLES register as
166
+ * it's not dependent over the timer callback and is computed
167
+ * from cycle overflow.
168
+ */
169
+ val = ldq_le_p(&s->regs_rw[addr]);
170
+ val |= (riscv_iommu_hpmcycle_read(s) & RISCV_IOMMU_IOHPMCYCLES_OVF)
171
+ ? RISCV_IOMMU_IOCOUNTOVF_CY
172
+ : 0;
173
+ ptr = (uint8_t *)&val + (addr & 3);
175
+ } else {
174
+ } else {
176
+ qemu_irq_lower(imsic->external_irqs[page]);
175
+ ptr = &s->regs_rw[addr];
177
+ }
178
+}
179
+
180
+static int riscv_imsic_eidelivery_rmw(RISCVIMSICState *imsic, uint32_t page,
181
+ target_ulong *val,
182
+ target_ulong new_val,
183
+ target_ulong wr_mask)
184
+{
185
+ target_ulong old_val = imsic->eidelivery[page];
186
+
187
+ if (val) {
188
+ *val = old_val;
189
+ }
176
+ }
190
+
177
+
191
+ wr_mask &= 0x1;
178
val = ldn_le_p(ptr, size);
192
+ imsic->eidelivery[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
179
193
+
180
*data = val;
194
+ riscv_imsic_update(imsic, page);
181
diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build
195
+ return 0;
196
+}
197
+
198
+static int riscv_imsic_eithreshold_rmw(RISCVIMSICState *imsic, uint32_t page,
199
+ target_ulong *val,
200
+ target_ulong new_val,
201
+ target_ulong wr_mask)
202
+{
203
+ target_ulong old_val = imsic->eithreshold[page];
204
+
205
+ if (val) {
206
+ *val = old_val;
207
+ }
208
+
209
+ wr_mask &= IMSIC_MAX_ID;
210
+ imsic->eithreshold[page] = (old_val & ~wr_mask) | (new_val & wr_mask);
211
+
212
+ riscv_imsic_update(imsic, page);
213
+ return 0;
214
+}
215
+
216
+static int riscv_imsic_topei_rmw(RISCVIMSICState *imsic, uint32_t page,
217
+ target_ulong *val, target_ulong new_val,
218
+ target_ulong wr_mask)
219
+{
220
+ uint32_t base, topei = riscv_imsic_topei(imsic, page);
221
+
222
+ /* Read pending and enabled interrupt with highest priority */
223
+ if (val) {
224
+ *val = topei;
225
+ }
226
+
227
+ /* Writes ignore value and clear top pending interrupt */
228
+ if (topei && wr_mask) {
229
+ topei >>= IMSIC_TOPEI_IID_SHIFT;
230
+ base = page * imsic->num_irqs;
231
+ if (topei) {
232
+ imsic->eistate[base + topei] &= ~IMSIC_EISTATE_PENDING;
233
+ }
234
+
235
+ riscv_imsic_update(imsic, page);
236
+ }
237
+
238
+ return 0;
239
+}
240
+
241
+static int riscv_imsic_eix_rmw(RISCVIMSICState *imsic,
242
+ uint32_t xlen, uint32_t page,
243
+ uint32_t num, bool pend, target_ulong *val,
244
+ target_ulong new_val, target_ulong wr_mask)
245
+{
246
+ uint32_t i, base;
247
+ target_ulong mask;
248
+ uint32_t state = (pend) ? IMSIC_EISTATE_PENDING : IMSIC_EISTATE_ENABLED;
249
+
250
+ if (xlen != 32) {
251
+ if (num & 0x1) {
252
+ return -EINVAL;
253
+ }
254
+ num >>= 1;
255
+ }
256
+ if (num >= (imsic->num_irqs / xlen)) {
257
+ return -EINVAL;
258
+ }
259
+
260
+ base = (page * imsic->num_irqs) + (num * xlen);
261
+
262
+ if (val) {
263
+ *val = 0;
264
+ for (i = 0; i < xlen; i++) {
265
+ mask = (target_ulong)1 << i;
266
+ *val |= (imsic->eistate[base + i] & state) ? mask : 0;
267
+ }
268
+ }
269
+
270
+ for (i = 0; i < xlen; i++) {
271
+ /* Bit0 of eip0 and eie0 are read-only zero */
272
+ if (!num && !i) {
273
+ continue;
274
+ }
275
+
276
+ mask = (target_ulong)1 << i;
277
+ if (wr_mask & mask) {
278
+ if (new_val & mask) {
279
+ imsic->eistate[base + i] |= state;
280
+ } else {
281
+ imsic->eistate[base + i] &= ~state;
282
+ }
283
+ }
284
+ }
285
+
286
+ riscv_imsic_update(imsic, page);
287
+ return 0;
288
+}
289
+
290
+static int riscv_imsic_rmw(void *arg, target_ulong reg, target_ulong *val,
291
+ target_ulong new_val, target_ulong wr_mask)
292
+{
293
+ RISCVIMSICState *imsic = arg;
294
+ uint32_t isel, priv, virt, vgein, xlen, page;
295
+
296
+ priv = AIA_IREG_PRIV(reg);
297
+ virt = AIA_IREG_VIRT(reg);
298
+ isel = AIA_IREG_ISEL(reg);
299
+ vgein = AIA_IREG_VGEIN(reg);
300
+ xlen = AIA_IREG_XLEN(reg);
301
+
302
+ if (imsic->mmode) {
303
+ if (priv == PRV_M && !virt) {
304
+ page = 0;
305
+ } else {
306
+ goto err;
307
+ }
308
+ } else {
309
+ if (priv == PRV_S) {
310
+ if (virt) {
311
+ if (vgein && vgein < imsic->num_pages) {
312
+ page = vgein;
313
+ } else {
314
+ goto err;
315
+ }
316
+ } else {
317
+ page = 0;
318
+ }
319
+ } else {
320
+ goto err;
321
+ }
322
+ }
323
+
324
+ switch (isel) {
325
+ case ISELECT_IMSIC_EIDELIVERY:
326
+ return riscv_imsic_eidelivery_rmw(imsic, page, val,
327
+ new_val, wr_mask);
328
+ case ISELECT_IMSIC_EITHRESHOLD:
329
+ return riscv_imsic_eithreshold_rmw(imsic, page, val,
330
+ new_val, wr_mask);
331
+ case ISELECT_IMSIC_TOPEI:
332
+ return riscv_imsic_topei_rmw(imsic, page, val, new_val, wr_mask);
333
+ case ISELECT_IMSIC_EIP0 ... ISELECT_IMSIC_EIP63:
334
+ return riscv_imsic_eix_rmw(imsic, xlen, page,
335
+ isel - ISELECT_IMSIC_EIP0,
336
+ true, val, new_val, wr_mask);
337
+ case ISELECT_IMSIC_EIE0 ... ISELECT_IMSIC_EIE63:
338
+ return riscv_imsic_eix_rmw(imsic, xlen, page,
339
+ isel - ISELECT_IMSIC_EIE0,
340
+ false, val, new_val, wr_mask);
341
+ default:
342
+ break;
343
+ };
344
+
345
+err:
346
+ qemu_log_mask(LOG_GUEST_ERROR,
347
+ "%s: Invalid register priv=%d virt=%d isel=%d vgein=%d\n",
348
+ __func__, priv, virt, isel, vgein);
349
+ return -EINVAL;
350
+}
351
+
352
+static uint64_t riscv_imsic_read(void *opaque, hwaddr addr, unsigned size)
353
+{
354
+ RISCVIMSICState *imsic = opaque;
355
+
356
+ /* Reads must be 4 byte words */
357
+ if ((addr & 0x3) != 0) {
358
+ goto err;
359
+ }
360
+
361
+ /* Reads cannot be out of range */
362
+ if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
363
+ goto err;
364
+ }
365
+
366
+ return 0;
367
+
368
+err:
369
+ qemu_log_mask(LOG_GUEST_ERROR,
370
+ "%s: Invalid register read 0x%" HWADDR_PRIx "\n",
371
+ __func__, addr);
372
+ return 0;
373
+}
374
+
375
+static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
376
+ unsigned size)
377
+{
378
+ RISCVIMSICState *imsic = opaque;
379
+ uint32_t page;
380
+
381
+ /* Writes must be 4 byte words */
382
+ if ((addr & 0x3) != 0) {
383
+ goto err;
384
+ }
385
+
386
+ /* Writes cannot be out of range */
387
+ if (addr > IMSIC_MMIO_SIZE(imsic->num_pages)) {
388
+ goto err;
389
+ }
390
+
391
+ /* Writes only supported for MSI little-endian registers */
392
+ page = addr >> IMSIC_MMIO_PAGE_SHIFT;
393
+ if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
394
+ if (value && (value < imsic->num_irqs)) {
395
+ imsic->eistate[(page * imsic->num_irqs) + value] |=
396
+ IMSIC_EISTATE_PENDING;
397
+ }
398
+ }
399
+
400
+ /* Update CPU external interrupt status */
401
+ riscv_imsic_update(imsic, page);
402
+
403
+ return;
404
+
405
+err:
406
+ qemu_log_mask(LOG_GUEST_ERROR,
407
+ "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
408
+ __func__, addr);
409
+}
410
+
411
+static const MemoryRegionOps riscv_imsic_ops = {
412
+ .read = riscv_imsic_read,
413
+ .write = riscv_imsic_write,
414
+ .endianness = DEVICE_LITTLE_ENDIAN,
415
+ .valid = {
416
+ .min_access_size = 4,
417
+ .max_access_size = 4
418
+ }
419
+};
420
+
421
+static void riscv_imsic_realize(DeviceState *dev, Error **errp)
422
+{
423
+ RISCVIMSICState *imsic = RISCV_IMSIC(dev);
424
+ RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid));
425
+ CPUState *cpu = qemu_get_cpu(imsic->hartid);
426
+ CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
427
+
428
+ imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
429
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
430
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
431
+ imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
432
+
433
+ memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
434
+ imsic, TYPE_RISCV_IMSIC,
435
+ IMSIC_MMIO_SIZE(imsic->num_pages));
436
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
437
+
438
+ /* Claim the CPU interrupt to be triggered by this IMSIC */
439
+ if (riscv_cpu_claim_interrupts(rcpu,
440
+ (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
441
+ error_setg(errp, "%s already claimed",
442
+ (imsic->mmode) ? "MEIP" : "SEIP");
443
+ return;
444
+ }
445
+
446
+ /* Create output IRQ lines */
447
+ imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
448
+ qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
449
+
450
+ /* Force select AIA feature and setup CSR read-modify-write callback */
451
+ if (env) {
452
+ riscv_set_feature(env, RISCV_FEATURE_AIA);
453
+ if (!imsic->mmode) {
454
+ riscv_cpu_set_geilen(env, imsic->num_pages - 1);
455
+ }
456
+ riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
457
+ riscv_imsic_rmw, imsic);
458
+ }
459
+
460
+ msi_nonbroken = true;
461
+}
462
+
463
+static Property riscv_imsic_properties[] = {
464
+ DEFINE_PROP_BOOL("mmode", RISCVIMSICState, mmode, 0),
465
+ DEFINE_PROP_UINT32("hartid", RISCVIMSICState, hartid, 0),
466
+ DEFINE_PROP_UINT32("num-pages", RISCVIMSICState, num_pages, 0),
467
+ DEFINE_PROP_UINT32("num-irqs", RISCVIMSICState, num_irqs, 0),
468
+ DEFINE_PROP_END_OF_LIST(),
469
+};
470
+
471
+static const VMStateDescription vmstate_riscv_imsic = {
472
+ .name = "riscv_imsic",
473
+ .version_id = 1,
474
+ .minimum_version_id = 1,
475
+ .fields = (VMStateField[]) {
476
+ VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState,
477
+ num_pages, 0,
478
+ vmstate_info_uint32, uint32_t),
479
+ VMSTATE_VARRAY_UINT32(eithreshold, RISCVIMSICState,
480
+ num_pages, 0,
481
+ vmstate_info_uint32, uint32_t),
482
+ VMSTATE_VARRAY_UINT32(eistate, RISCVIMSICState,
483
+ num_eistate, 0,
484
+ vmstate_info_uint32, uint32_t),
485
+ VMSTATE_END_OF_LIST()
486
+ }
487
+};
488
+
489
+static void riscv_imsic_class_init(ObjectClass *klass, void *data)
490
+{
491
+ DeviceClass *dc = DEVICE_CLASS(klass);
492
+
493
+ device_class_set_props(dc, riscv_imsic_properties);
494
+ dc->realize = riscv_imsic_realize;
495
+ dc->vmsd = &vmstate_riscv_imsic;
496
+}
497
+
498
+static const TypeInfo riscv_imsic_info = {
499
+ .name = TYPE_RISCV_IMSIC,
500
+ .parent = TYPE_SYS_BUS_DEVICE,
501
+ .instance_size = sizeof(RISCVIMSICState),
502
+ .class_init = riscv_imsic_class_init,
503
+};
504
+
505
+static void riscv_imsic_register_types(void)
506
+{
507
+ type_register_static(&riscv_imsic_info);
508
+}
509
+
510
+type_init(riscv_imsic_register_types)
511
+
512
+/*
513
+ * Create IMSIC device.
514
+ */
515
+DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
516
+ uint32_t num_pages, uint32_t num_ids)
517
+{
518
+ DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC);
519
+ CPUState *cpu = qemu_get_cpu(hartid);
520
+ uint32_t i;
521
+
522
+ assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1)));
523
+ if (mmode) {
524
+ assert(num_pages == 1);
525
+ } else {
526
+ assert(num_pages >= 1 && num_pages <= (IRQ_LOCAL_GUEST_MAX + 1));
527
+ }
528
+ assert(IMSIC_MIN_ID <= num_ids);
529
+ assert(num_ids <= IMSIC_MAX_ID);
530
+ assert((num_ids & IMSIC_MIN_ID) == IMSIC_MIN_ID);
531
+
532
+ qdev_prop_set_bit(dev, "mmode", mmode);
533
+ qdev_prop_set_uint32(dev, "hartid", hartid);
534
+ qdev_prop_set_uint32(dev, "num-pages", num_pages);
535
+ qdev_prop_set_uint32(dev, "num-irqs", num_ids + 1);
536
+
537
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
538
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
539
+
540
+ for (i = 0; i < num_pages; i++) {
541
+ if (!i) {
542
+ qdev_connect_gpio_out_named(dev, NULL, i,
543
+ qdev_get_gpio_in(DEVICE(cpu),
544
+ (mmode) ? IRQ_M_EXT : IRQ_S_EXT));
545
+ } else {
546
+ qdev_connect_gpio_out_named(dev, NULL, i,
547
+ qdev_get_gpio_in(DEVICE(cpu),
548
+ IRQ_LOCAL_MAX + i - 1));
549
+ }
550
+ }
551
+
552
+ return dev;
553
+}
554
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
555
index XXXXXXX..XXXXXXX 100644
182
index XXXXXXX..XXXXXXX 100644
556
--- a/hw/intc/Kconfig
183
--- a/hw/riscv/meson.build
557
+++ b/hw/intc/Kconfig
184
+++ b/hw/riscv/meson.build
558
@@ -XXX,XX +XXX,XX @@ config RISCV_ACLINT
185
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c'))
559
config RISCV_APLIC
186
riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c'))
560
bool
187
riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c'))
561
188
riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
562
+config RISCV_IMSIC
189
-riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files('riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c'))
563
+ bool
190
+riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files(
564
+
191
+    'riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c', 'riscv-iommu-hpm.c'))
565
config SIFIVE_PLIC
192
riscv_ss.add(when: 'CONFIG_MICROBLAZE_V', if_true: files('microblaze-v-generic.c'))
566
bool
193
567
194
hw_arch += {'riscv': riscv_ss}
568
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
569
index XXXXXXX..XXXXXXX 100644
570
--- a/hw/intc/meson.build
571
+++ b/hw/intc/meson.build
572
@@ -XXX,XX +XXX,XX @@ specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
573
specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
574
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
575
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
576
+specific_ss.add(when: 'CONFIG_RISCV_IMSIC', if_true: files('riscv_imsic.c'))
577
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
578
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c'))
579
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],
580
--
195
--
581
2.35.1
196
2.48.1
diff view generated by jsdifflib
1
From: Anup Patel <anup.patel@wdc.com>
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
2
2
3
We extend virt machine to emulate AIA APLIC devices only when
3
This function will increment a specific counter, generating an interrupt
4
"aia=aplic" parameter is passed along with machine name in QEMU
4
when an overflow occurs.
5
command-line. When "aia=none" or not specified then we fallback
5
6
to original PLIC device emulation.
6
Some extra changes in riscv-iommu.c were required to add this new
7
7
helper in riscv-iommu-hpm.c:
8
Signed-off-by: Anup Patel <anup.patel@wdc.com>
8
9
Signed-off-by: Anup Patel <anup@brainfault.org>
9
- RISCVIOMMUContext was moved to riscv-iommu.h, making it visible in
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
riscv-iommu-hpm.c;
11
Message-Id: <20220220085526.808674-2-anup@brainfault.org>
11
12
- riscv_iommu_notify() is now public.
13
14
No behavior change is made since HPM support is not being advertised
15
yet.
16
17
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
18
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
Acked-by: Alistair Francis <alistair.francis@wdc.com>
20
Message-ID: <20250224190826.1858473-5-dbarboza@ventanamicro.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
22
---
14
include/hw/riscv/virt.h | 26 +++-
23
hw/riscv/riscv-iommu-hpm.h | 2 +
15
hw/riscv/virt.c | 291 ++++++++++++++++++++++++++++++++--------
24
hw/riscv/riscv-iommu.h | 18 ++++++
16
hw/riscv/Kconfig | 1 +
25
hw/riscv/riscv-iommu-hpm.c | 114 +++++++++++++++++++++++++++++++++++++
17
3 files changed, 259 insertions(+), 59 deletions(-)
26
hw/riscv/riscv-iommu.c | 43 +++++++++-----
18
27
4 files changed, 162 insertions(+), 15 deletions(-)
19
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
28
29
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
20
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/riscv/virt.h
31
--- a/hw/riscv/riscv-iommu-hpm.h
22
+++ b/include/hw/riscv/virt.h
32
+++ b/hw/riscv/riscv-iommu-hpm.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct RISCVVirtState RISCVVirtState;
33
@@ -XXX,XX +XXX,XX @@
24
DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
34
#include "hw/riscv/riscv-iommu.h"
25
TYPE_RISCV_VIRT_MACHINE)
35
26
36
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
27
+typedef enum RISCVVirtAIAType {
37
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
28
+ VIRT_AIA_TYPE_NONE = 0,
38
+ unsigned event_id);
29
+ VIRT_AIA_TYPE_APLIC,
39
30
+} RISCVVirtAIAType;
40
#endif
31
+
41
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
32
struct RISCVVirtState {
42
index XXXXXXX..XXXXXXX 100644
33
/*< private >*/
43
--- a/hw/riscv/riscv-iommu.h
34
MachineState parent;
44
+++ b/hw/riscv/riscv-iommu.h
35
45
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
36
/*< public >*/
46
/* HPM cycle counter */
37
RISCVHartArrayState soc[VIRT_SOCKETS_MAX];
47
uint64_t hpmcycle_val; /* Current value of cycle register */
38
- DeviceState *plic[VIRT_SOCKETS_MAX];
48
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
39
+ DeviceState *irqchip[VIRT_SOCKETS_MAX];
49
+
40
PFlashCFI01 *flash[2];
50
+ /* HPM event counters */
41
FWCfgState *fw_cfg;
51
+ GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
42
43
int fdt_size;
44
bool have_aclint;
45
+ RISCVVirtAIAType aia_type;
46
};
52
};
47
53
48
enum {
54
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
49
@@ -XXX,XX +XXX,XX @@ enum {
55
Error **errp);
50
VIRT_CLINT,
56
void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
51
VIRT_ACLINT_SSWI,
57
void riscv_iommu_reset(RISCVIOMMUState *s);
52
VIRT_PLIC,
58
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
53
+ VIRT_APLIC_M,
59
+
54
+ VIRT_APLIC_S,
60
+typedef struct RISCVIOMMUContext RISCVIOMMUContext;
55
VIRT_UART0,
61
+/* Device translation context state. */
56
VIRT_VIRTIO,
62
+struct RISCVIOMMUContext {
57
VIRT_FW_CFG,
63
+ uint64_t devid:24; /* Requester Id, AKA device_id */
58
@@ -XXX,XX +XXX,XX @@ enum {
64
+ uint64_t process_id:20; /* Process ID. PASID for PCIe */
59
VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
65
+ uint64_t tc; /* Translation Control */
66
+ uint64_t ta; /* Translation Attributes */
67
+ uint64_t satp; /* S-Stage address translation and protection */
68
+ uint64_t gatp; /* G-Stage address translation and protection */
69
+ uint64_t msi_addr_mask; /* MSI filtering - address mask */
70
+ uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
71
+ uint64_t msiptp; /* MSI redirection page table pointer */
72
+};
73
74
/* private helpers */
75
76
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/hw/riscv/riscv-iommu-hpm.c
79
+++ b/hw/riscv/riscv-iommu-hpm.c
80
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
81
return (ctr_val + get_cycles() - ctr_prev) |
82
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
83
}
84
+
85
+static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
86
+{
87
+ const uint32_t off = ctr_idx << 3;
88
+ uint64_t cntr_val;
89
+
90
+ cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
91
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
92
+
93
+ /* Handle the overflow scenario. */
94
+ if (cntr_val == UINT64_MAX) {
95
+ /*
96
+ * Generate interrupt only if OF bit is clear. +1 to offset the cycle
97
+ * register OF bit.
98
+ */
99
+ const uint32_t ovf =
100
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
101
+ BIT(ctr_idx + 1), 0);
102
+ if (!get_field(ovf, BIT(ctr_idx + 1))) {
103
+ riscv_iommu_reg_mod64(s,
104
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
105
+ RISCV_IOMMU_IOHPMEVT_OF,
106
+ 0);
107
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
108
+ }
109
+ }
110
+}
111
+
112
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
113
+ unsigned event_id)
114
+{
115
+ const uint32_t inhibit = riscv_iommu_reg_get32(
116
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
117
+ uint32_t did_gscid;
118
+ uint32_t pid_pscid;
119
+ uint32_t ctr_idx;
120
+ gpointer value;
121
+ uint32_t ctrs;
122
+ uint64_t evt;
123
+
124
+ if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
125
+ return;
126
+ }
127
+
128
+ value = g_hash_table_lookup(s->hpm_event_ctr_map,
129
+ GUINT_TO_POINTER(event_id));
130
+ if (value == NULL) {
131
+ return;
132
+ }
133
+
134
+ for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
135
+ ctr_idx = ctz32(ctrs);
136
+ if (get_field(inhibit, BIT(ctr_idx + 1))) {
137
+ continue;
138
+ }
139
+
140
+ evt = riscv_iommu_reg_get64(s,
141
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
142
+
143
+ /*
144
+ * It's quite possible that event ID has been changed in counter
145
+ * but hashtable hasn't been updated yet. We don't want to increment
146
+ * counter for the old event ID.
147
+ */
148
+ if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
149
+ continue;
150
+ }
151
+
152
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
153
+ did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
154
+ pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
155
+ } else {
156
+ did_gscid = ctx->devid;
157
+ pid_pscid = ctx->process_id;
158
+ }
159
+
160
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
161
+ /*
162
+ * If the transaction does not have a valid process_id, counter
163
+ * increments if device_id matches DID_GSCID. If the transaction
164
+ * has a valid process_id, counter increments if device_id
165
+ * matches DID_GSCID and process_id matches PID_PSCID. See
166
+ * IOMMU Specification, Chapter 5.23. Performance-monitoring
167
+ * event selector.
168
+ */
169
+ if (ctx->process_id &&
170
+ get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
171
+ continue;
172
+ }
173
+ }
174
+
175
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
176
+ uint32_t mask = ~0;
177
+
178
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
179
+ /*
180
+ * 1001 1011 mask = GSCID
181
+ * 0000 0111 mask = mask ^ (mask + 1)
182
+ * 1111 1000 mask = ~mask;
183
+ */
184
+ mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
185
+ mask = mask ^ (mask + 1);
186
+ mask = ~mask;
187
+ }
188
+
189
+ if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
190
+ (did_gscid & mask)) {
191
+ continue;
192
+ }
193
+ }
194
+
195
+ hpm_incr_ctr(s, ctr_idx);
196
+ }
197
+}
198
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/hw/riscv/riscv-iommu.c
201
+++ b/hw/riscv/riscv-iommu.c
202
@@ -XXX,XX +XXX,XX @@
203
#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
204
#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
205
206
-typedef struct RISCVIOMMUContext RISCVIOMMUContext;
207
typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
208
209
/* Device assigned I/O address space */
210
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUSpace {
211
QLIST_ENTRY(RISCVIOMMUSpace) list;
60
};
212
};
61
213
62
-#define VIRT_PLIC_NUM_SOURCES 127
214
-/* Device translation context state. */
63
-#define VIRT_PLIC_NUM_PRIORITIES 7
215
-struct RISCVIOMMUContext {
64
+#define VIRT_IRQCHIP_NUM_SOURCES 127
216
- uint64_t devid:24; /* Requester Id, AKA device_id */
65
+#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
217
- uint64_t process_id:20; /* Process ID. PASID for PCIe */
66
+
218
- uint64_t tc; /* Translation Control */
67
#define VIRT_PLIC_PRIORITY_BASE 0x04
219
- uint64_t ta; /* Translation Attributes */
68
#define VIRT_PLIC_PENDING_BASE 0x1000
220
- uint64_t satp; /* S-Stage address translation and protection */
69
#define VIRT_PLIC_ENABLE_BASE 0x2000
221
- uint64_t gatp; /* G-Stage address translation and protection */
70
@@ -XXX,XX +XXX,XX @@ enum {
222
- uint64_t msi_addr_mask; /* MSI filtering - address mask */
71
223
- uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
72
#define FDT_PCI_ADDR_CELLS 3
224
- uint64_t msiptp; /* MSI redirection page table pointer */
73
#define FDT_PCI_INT_CELLS 1
225
-};
74
-#define FDT_PLIC_ADDR_CELLS 0
226
-
75
#define FDT_PLIC_INT_CELLS 1
227
typedef enum RISCVIOMMUTransTag {
76
-#define FDT_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + 1 + \
228
RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
77
- FDT_PLIC_ADDR_CELLS + FDT_PLIC_INT_CELLS)
229
RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
78
+#define FDT_APLIC_INT_CELLS 2
230
@@ -XXX,XX +XXX,XX @@ static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type)
79
+#define FDT_MAX_INT_CELLS 2
231
}
80
+#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
81
+ 1 + FDT_MAX_INT_CELLS)
82
+#define FDT_PLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
83
+ 1 + FDT_PLIC_INT_CELLS)
84
+#define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
85
+ 1 + FDT_APLIC_INT_CELLS)
86
87
#endif
88
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/hw/riscv/virt.c
91
+++ b/hw/riscv/virt.c
92
@@ -XXX,XX +XXX,XX @@
93
#include "hw/riscv/boot.h"
94
#include "hw/riscv/numa.h"
95
#include "hw/intc/riscv_aclint.h"
96
+#include "hw/intc/riscv_aplic.h"
97
#include "hw/intc/sifive_plic.h"
98
#include "hw/misc/sifive_test.h"
99
#include "chardev/char.h"
100
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry virt_memmap[] = {
101
[VIRT_ACLINT_SSWI] = { 0x2F00000, 0x4000 },
102
[VIRT_PCIE_PIO] = { 0x3000000, 0x10000 },
103
[VIRT_PLIC] = { 0xc000000, VIRT_PLIC_SIZE(VIRT_CPUS_MAX * 2) },
104
+ [VIRT_APLIC_M] = { 0xc000000, APLIC_SIZE(VIRT_CPUS_MAX) },
105
+ [VIRT_APLIC_S] = { 0xd000000, APLIC_SIZE(VIRT_CPUS_MAX) },
106
[VIRT_UART0] = { 0x10000000, 0x100 },
107
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
108
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
109
@@ -XXX,XX +XXX,XX @@ static void virt_flash_map(RISCVVirtState *s,
110
sysmem);
111
}
232
}
112
233
113
-static void create_pcie_irq_map(void *fdt, char *nodename,
234
-static void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
114
- uint32_t plic_phandle)
235
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
115
+static void create_pcie_irq_map(RISCVVirtState *s, void *fdt, char *nodename,
116
+ uint32_t irqchip_phandle)
117
{
236
{
118
int pin, dev;
237
uint32_t ipsr, icvec, vector;
119
- uint32_t
238
120
- full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS * FDT_INT_MAP_WIDTH] = {};
239
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
121
+ uint32_t irq_map_stride = 0;
122
+ uint32_t full_irq_map[GPEX_NUM_IRQS * GPEX_NUM_IRQS *
123
+ FDT_MAX_INT_MAP_WIDTH] = {};
124
uint32_t *irq_map = full_irq_map;
125
126
/* This code creates a standard swizzle of interrupts such that
127
@@ -XXX,XX +XXX,XX @@ static void create_pcie_irq_map(void *fdt, char *nodename,
128
int irq_nr = PCIE_IRQ + ((pin + PCI_SLOT(devfn)) % GPEX_NUM_IRQS);
129
int i = 0;
130
131
+ /* Fill PCI address cells */
132
irq_map[i] = cpu_to_be32(devfn << 8);
133
-
134
i += FDT_PCI_ADDR_CELLS;
135
- irq_map[i] = cpu_to_be32(pin + 1);
136
137
+ /* Fill PCI Interrupt cells */
138
+ irq_map[i] = cpu_to_be32(pin + 1);
139
i += FDT_PCI_INT_CELLS;
140
- irq_map[i++] = cpu_to_be32(plic_phandle);
141
142
- i += FDT_PLIC_ADDR_CELLS;
143
- irq_map[i] = cpu_to_be32(irq_nr);
144
+ /* Fill interrupt controller phandle and cells */
145
+ irq_map[i++] = cpu_to_be32(irqchip_phandle);
146
+ irq_map[i++] = cpu_to_be32(irq_nr);
147
+ if (s->aia_type != VIRT_AIA_TYPE_NONE) {
148
+ irq_map[i++] = cpu_to_be32(0x4);
149
+ }
150
151
- irq_map += FDT_INT_MAP_WIDTH;
152
+ if (!irq_map_stride) {
153
+ irq_map_stride = i;
154
+ }
155
+ irq_map += irq_map_stride;
156
}
157
}
158
159
- qemu_fdt_setprop(fdt, nodename, "interrupt-map",
160
- full_irq_map, sizeof(full_irq_map));
161
+ qemu_fdt_setprop(fdt, nodename, "interrupt-map", full_irq_map,
162
+ GPEX_NUM_IRQS * GPEX_NUM_IRQS *
163
+ irq_map_stride * sizeof(uint32_t));
164
165
qemu_fdt_setprop_cells(fdt, nodename, "interrupt-map-mask",
166
0x1800, 0, 0, 0x7);
167
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
168
plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket);
169
plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr);
170
qemu_fdt_add_subnode(mc->fdt, plic_name);
171
- qemu_fdt_setprop_cell(mc->fdt, plic_name,
172
- "#address-cells", FDT_PLIC_ADDR_CELLS);
173
qemu_fdt_setprop_cell(mc->fdt, plic_name,
174
"#interrupt-cells", FDT_PLIC_INT_CELLS);
175
qemu_fdt_setprop_string_array(mc->fdt, plic_name, "compatible",
176
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
177
g_free(plic_cells);
178
}
179
180
+static void create_fdt_socket_aia(RISCVVirtState *s,
181
+ const MemMapEntry *memmap, int socket,
182
+ uint32_t *phandle, uint32_t *intc_phandles,
183
+ uint32_t *aplic_phandles)
184
+{
185
+ int cpu;
186
+ char *aplic_name;
187
+ uint32_t *aplic_cells;
188
+ unsigned long aplic_addr;
189
+ MachineState *mc = MACHINE(s);
190
+ uint32_t aplic_m_phandle, aplic_s_phandle;
191
+
192
+ aplic_m_phandle = (*phandle)++;
193
+ aplic_s_phandle = (*phandle)++;
194
+ aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
195
+
196
+ /* M-level APLIC node */
197
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
198
+ aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
199
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
200
+ }
201
+ aplic_addr = memmap[VIRT_APLIC_M].base +
202
+ (memmap[VIRT_APLIC_M].size * socket);
203
+ aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
204
+ qemu_fdt_add_subnode(mc->fdt, aplic_name);
205
+ qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
206
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name,
207
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
208
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
209
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
210
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
211
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
212
+ 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
213
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
214
+ VIRT_IRQCHIP_NUM_SOURCES);
215
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,children",
216
+ aplic_s_phandle);
217
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "riscv,delegate",
218
+ aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
219
+ riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
220
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_m_phandle);
221
+ g_free(aplic_name);
222
+
223
+ /* S-level APLIC node */
224
+ for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
225
+ aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
226
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
227
+ }
228
+ aplic_addr = memmap[VIRT_APLIC_S].base +
229
+ (memmap[VIRT_APLIC_S].size * socket);
230
+ aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
231
+ qemu_fdt_add_subnode(mc->fdt, aplic_name);
232
+ qemu_fdt_setprop_string(mc->fdt, aplic_name, "compatible", "riscv,aplic");
233
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name,
234
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
235
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
236
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
237
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
238
+ qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
239
+ 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
240
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
241
+ VIRT_IRQCHIP_NUM_SOURCES);
242
+ riscv_socket_fdt_write_id(mc, mc->fdt, aplic_name, socket);
243
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "phandle", aplic_s_phandle);
244
+ g_free(aplic_name);
245
+
246
+ g_free(aplic_cells);
247
+ aplic_phandles[socket] = aplic_s_phandle;
248
+}
249
+
250
static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
251
bool is_32_bit, uint32_t *phandle,
252
uint32_t *irq_mmio_phandle,
253
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
254
}
240
}
255
}
241
}
256
242
257
- create_fdt_socket_plic(s, memmap, socket, phandle,
243
+
258
- intc_phandles, xplic_phandles);
244
+ if (pass == S_STAGE) {
259
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
245
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
260
+ create_fdt_socket_plic(s, memmap, socket, phandle,
261
+ intc_phandles, xplic_phandles);
262
+ } else {
246
+ } else {
263
+ create_fdt_socket_aia(s, memmap, socket, phandle,
247
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
264
+ intc_phandles, xplic_phandles);
248
+ }
265
+ }
249
+
266
250
/* Read page table entry */
267
g_free(intc_phandles);
251
if (sc[pass].ptesize == 4) {
268
g_free(clust_name);
252
uint32_t pte32 = 0;
269
@@ -XXX,XX +XXX,XX @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
253
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
270
0x0, memmap[VIRT_VIRTIO].size);
254
271
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
255
/* Device directory tree walk */
272
irq_virtio_phandle);
256
for (; depth-- > 0; ) {
273
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", VIRTIO_IRQ + i);
257
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
274
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
258
/*
275
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts",
259
* Select device id index bits based on device directory tree level
276
+ VIRTIO_IRQ + i);
260
* and device context format.
277
+ } else {
261
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
278
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts",
262
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
279
+ VIRTIO_IRQ + i, 0x4);
263
}
280
+ }
264
281
g_free(name);
265
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
282
}
266
+
267
/* index into device context entry page */
268
addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
269
270
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
271
}
272
273
for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
274
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
275
+
276
/*
277
* Select process id index bits based on process directory tree
278
* level. See IOMMU Specification, 2.2. Process-Directory-Table.
279
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
280
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
281
}
282
283
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
284
+
285
/* Leaf entry in PDT */
286
addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
287
if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
288
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
289
GHashTable *iot_cache;
290
int fault;
291
292
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
293
+
294
iot_cache = g_hash_table_ref(s->iot_cache);
295
/*
296
* TC[32] is reserved for custom extensions, used here to temporarily
297
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
298
299
/* Check for ATS request. */
300
if (iotlb->perm == IOMMU_NONE) {
301
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
302
/* Check if ATS is disabled. */
303
if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
304
enable_pri = false;
305
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
306
goto done;
307
}
308
309
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
310
+
311
/* Translate using device directory / page table information. */
312
fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
313
314
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
315
memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
316
"riscv-iommu-trap", ~0ULL);
317
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
318
+
319
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
320
+ s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
321
+ }
283
}
322
}
284
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
323
285
2, virt_high_pcie_memmap.base,
324
static void riscv_iommu_unrealize(DeviceState *dev)
286
2, virt_high_pcie_memmap.base, 2, virt_high_pcie_memmap.size);
325
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_unrealize(DeviceState *dev)
287
326
288
- create_pcie_irq_map(mc->fdt, name, irq_pcie_phandle);
327
g_hash_table_unref(s->iot_cache);
289
+ create_pcie_irq_map(s, mc->fdt, name, irq_pcie_phandle);
328
g_hash_table_unref(s->ctx_cache);
290
g_free(name);
329
+
330
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
331
+ g_hash_table_unref(s->hpm_event_ctr_map);
332
+ }
291
}
333
}
292
334
293
@@ -XXX,XX +XXX,XX @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
335
void riscv_iommu_reset(RISCVIOMMUState *s)
294
0x0, memmap[VIRT_UART0].size);
295
qemu_fdt_setprop_cell(mc->fdt, name, "clock-frequency", 3686400);
296
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent", irq_mmio_phandle);
297
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
298
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
299
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", UART0_IRQ);
300
+ } else {
301
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", UART0_IRQ, 0x4);
302
+ }
303
304
qemu_fdt_add_subnode(mc->fdt, "/chosen");
305
qemu_fdt_setprop_string(mc->fdt, "/chosen", "stdout-path", name);
306
@@ -XXX,XX +XXX,XX @@ static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
307
0x0, memmap[VIRT_RTC].base, 0x0, memmap[VIRT_RTC].size);
308
qemu_fdt_setprop_cell(mc->fdt, name, "interrupt-parent",
309
irq_mmio_phandle);
310
- qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
311
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
312
+ qemu_fdt_setprop_cell(mc->fdt, name, "interrupts", RTC_IRQ);
313
+ } else {
314
+ qemu_fdt_setprop_cells(mc->fdt, name, "interrupts", RTC_IRQ, 0x4);
315
+ }
316
g_free(name);
317
}
318
319
@@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
320
hwaddr high_mmio_base,
321
hwaddr high_mmio_size,
322
hwaddr pio_base,
323
- DeviceState *plic)
324
+ DeviceState *irqchip)
325
{
326
DeviceState *dev;
327
MemoryRegion *ecam_alias, *ecam_reg;
328
@@ -XXX,XX +XXX,XX @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem,
329
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, pio_base);
330
331
for (i = 0; i < GPEX_NUM_IRQS; i++) {
332
- irq = qdev_get_gpio_in(plic, PCIE_IRQ + i);
333
+ irq = qdev_get_gpio_in(irqchip, PCIE_IRQ + i);
334
335
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, irq);
336
gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i);
337
@@ -XXX,XX +XXX,XX @@ static FWCfgState *create_fw_cfg(const MachineState *mc)
338
return fw_cfg;
339
}
340
341
+static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
342
+ int base_hartid, int hart_count)
343
+{
344
+ DeviceState *ret;
345
+ char *plic_hart_config;
346
+
347
+ /* Per-socket PLIC hart topology configuration string */
348
+ plic_hart_config = riscv_plic_hart_config_string(hart_count);
349
+
350
+ /* Per-socket PLIC */
351
+ ret = sifive_plic_create(
352
+ memmap[VIRT_PLIC].base + socket * memmap[VIRT_PLIC].size,
353
+ plic_hart_config, hart_count, base_hartid,
354
+ VIRT_IRQCHIP_NUM_SOURCES,
355
+ ((1U << VIRT_IRQCHIP_NUM_PRIO_BITS) - 1),
356
+ VIRT_PLIC_PRIORITY_BASE,
357
+ VIRT_PLIC_PENDING_BASE,
358
+ VIRT_PLIC_ENABLE_BASE,
359
+ VIRT_PLIC_ENABLE_STRIDE,
360
+ VIRT_PLIC_CONTEXT_BASE,
361
+ VIRT_PLIC_CONTEXT_STRIDE,
362
+ memmap[VIRT_PLIC].size);
363
+
364
+ g_free(plic_hart_config);
365
+
366
+ return ret;
367
+}
368
+
369
+static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type,
370
+ const MemMapEntry *memmap, int socket,
371
+ int base_hartid, int hart_count)
372
+{
373
+ DeviceState *aplic_m;
374
+
375
+ /* Per-socket M-level APLIC */
376
+ aplic_m = riscv_aplic_create(
377
+ memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
378
+ memmap[VIRT_APLIC_M].size,
379
+ base_hartid, hart_count,
380
+ VIRT_IRQCHIP_NUM_SOURCES,
381
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
382
+ false, true, NULL);
383
+
384
+ if (aplic_m) {
385
+ /* Per-socket S-level APLIC */
386
+ riscv_aplic_create(
387
+ memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
388
+ memmap[VIRT_APLIC_S].size,
389
+ base_hartid, hart_count,
390
+ VIRT_IRQCHIP_NUM_SOURCES,
391
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
392
+ false, false, aplic_m);
393
+ }
394
+
395
+ return aplic_m;
396
+}
397
+
398
static void virt_machine_init(MachineState *machine)
399
{
400
const MemMapEntry *memmap = virt_memmap;
401
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
402
MemoryRegion *system_memory = get_system_memory();
403
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
404
- char *plic_hart_config, *soc_name;
405
+ char *soc_name;
406
target_ulong start_addr = memmap[VIRT_DRAM].base;
407
target_ulong firmware_end_addr, kernel_start_addr;
408
uint32_t fdt_load_addr;
409
uint64_t kernel_entry;
410
- DeviceState *mmio_plic, *virtio_plic, *pcie_plic;
411
+ DeviceState *mmio_irqchip, *virtio_irqchip, *pcie_irqchip;
412
int i, base_hartid, hart_count;
413
414
/* Check socket count limit */
415
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
416
}
417
418
/* Initialize sockets */
419
- mmio_plic = virtio_plic = pcie_plic = NULL;
420
+ mmio_irqchip = virtio_irqchip = pcie_irqchip = NULL;
421
for (i = 0; i < riscv_socket_count(machine); i++) {
422
if (!riscv_socket_check_hartids(machine, i)) {
423
error_report("discontinuous hartids in socket%d", i);
424
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
425
}
426
}
427
428
- /* Per-socket PLIC hart topology configuration string */
429
- plic_hart_config = riscv_plic_hart_config_string(hart_count);
430
-
431
- /* Per-socket PLIC */
432
- s->plic[i] = sifive_plic_create(
433
- memmap[VIRT_PLIC].base + i * memmap[VIRT_PLIC].size,
434
- plic_hart_config, hart_count, base_hartid,
435
- VIRT_PLIC_NUM_SOURCES,
436
- VIRT_PLIC_NUM_PRIORITIES,
437
- VIRT_PLIC_PRIORITY_BASE,
438
- VIRT_PLIC_PENDING_BASE,
439
- VIRT_PLIC_ENABLE_BASE,
440
- VIRT_PLIC_ENABLE_STRIDE,
441
- VIRT_PLIC_CONTEXT_BASE,
442
- VIRT_PLIC_CONTEXT_STRIDE,
443
- memmap[VIRT_PLIC].size);
444
- g_free(plic_hart_config);
445
+ /* Per-socket interrupt controller */
446
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
447
+ s->irqchip[i] = virt_create_plic(memmap, i,
448
+ base_hartid, hart_count);
449
+ } else {
450
+ s->irqchip[i] = virt_create_aia(s->aia_type, memmap, i,
451
+ base_hartid, hart_count);
452
+ }
453
454
- /* Try to use different PLIC instance based device type */
455
+ /* Try to use different IRQCHIP instance based device type */
456
if (i == 0) {
457
- mmio_plic = s->plic[i];
458
- virtio_plic = s->plic[i];
459
- pcie_plic = s->plic[i];
460
+ mmio_irqchip = s->irqchip[i];
461
+ virtio_irqchip = s->irqchip[i];
462
+ pcie_irqchip = s->irqchip[i];
463
}
464
if (i == 1) {
465
- virtio_plic = s->plic[i];
466
- pcie_plic = s->plic[i];
467
+ virtio_irqchip = s->irqchip[i];
468
+ pcie_irqchip = s->irqchip[i];
469
}
470
if (i == 2) {
471
- pcie_plic = s->plic[i];
472
+ pcie_irqchip = s->irqchip[i];
473
}
474
}
475
476
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
477
for (i = 0; i < VIRTIO_COUNT; i++) {
478
sysbus_create_simple("virtio-mmio",
479
memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
480
- qdev_get_gpio_in(DEVICE(virtio_plic), VIRTIO_IRQ + i));
481
+ qdev_get_gpio_in(DEVICE(virtio_irqchip), VIRTIO_IRQ + i));
482
}
483
484
gpex_pcie_init(system_memory,
485
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
486
virt_high_pcie_memmap.base,
487
virt_high_pcie_memmap.size,
488
memmap[VIRT_PCIE_PIO].base,
489
- DEVICE(pcie_plic));
490
+ DEVICE(pcie_irqchip));
491
492
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
493
- 0, qdev_get_gpio_in(DEVICE(mmio_plic), UART0_IRQ), 399193,
494
+ 0, qdev_get_gpio_in(DEVICE(mmio_irqchip), UART0_IRQ), 399193,
495
serial_hd(0), DEVICE_LITTLE_ENDIAN);
496
497
sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
498
- qdev_get_gpio_in(DEVICE(mmio_plic), RTC_IRQ));
499
+ qdev_get_gpio_in(DEVICE(mmio_irqchip), RTC_IRQ));
500
501
virt_flash_create(s);
502
503
@@ -XXX,XX +XXX,XX @@ static void virt_machine_instance_init(Object *obj)
504
{
505
}
506
507
+static char *virt_get_aia(Object *obj, Error **errp)
508
+{
509
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
510
+ const char *val;
511
+
512
+ switch (s->aia_type) {
513
+ case VIRT_AIA_TYPE_APLIC:
514
+ val = "aplic";
515
+ break;
516
+ default:
517
+ val = "none";
518
+ break;
519
+ };
520
+
521
+ return g_strdup(val);
522
+}
523
+
524
+static void virt_set_aia(Object *obj, const char *val, Error **errp)
525
+{
526
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
527
+
528
+ if (!strcmp(val, "none")) {
529
+ s->aia_type = VIRT_AIA_TYPE_NONE;
530
+ } else if (!strcmp(val, "aplic")) {
531
+ s->aia_type = VIRT_AIA_TYPE_APLIC;
532
+ } else {
533
+ error_setg(errp, "Invalid AIA interrupt controller type");
534
+ error_append_hint(errp, "Valid values are none, and aplic.\n");
535
+ }
536
+}
537
+
538
static bool virt_get_aclint(Object *obj, Error **errp)
539
{
540
MachineState *ms = MACHINE(obj);
541
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
542
object_class_property_set_description(oc, "aclint",
543
"Set on/off to enable/disable "
544
"emulating ACLINT devices");
545
+
546
+ object_class_property_add_str(oc, "aia", virt_get_aia,
547
+ virt_set_aia);
548
+ object_class_property_set_description(oc, "aia",
549
+ "Set type of AIA interrupt "
550
+ "conttoller. Valid values are "
551
+ "none, and aplic.");
552
}
553
554
static const TypeInfo virt_machine_typeinfo = {
555
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
556
index XXXXXXX..XXXXXXX 100644
557
--- a/hw/riscv/Kconfig
558
+++ b/hw/riscv/Kconfig
559
@@ -XXX,XX +XXX,XX @@ config RISCV_VIRT
560
select PFLASH_CFI01
561
select SERIAL
562
select RISCV_ACLINT
563
+ select RISCV_APLIC
564
select SIFIVE_PLIC
565
select SIFIVE_TEST
566
select VIRTIO_MMIO
567
--
336
--
568
2.35.1
337
2.48.1
diff view generated by jsdifflib
New patch
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
2
3
The next HPM related changes requires the HPM overflow timer to be
4
initialized by the riscv-iommu base emulation.
5
6
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250224190826.1858473-6-dbarboza@ventanamicro.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
hw/riscv/riscv-iommu-hpm.h | 1 +
13
hw/riscv/riscv-iommu.h | 2 ++
14
hw/riscv/riscv-iommu-hpm.c | 36 ++++++++++++++++++++++++++++++++++++
15
hw/riscv/riscv-iommu.c | 3 +++
16
4 files changed, 42 insertions(+)
17
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu-hpm.h
21
+++ b/hw/riscv/riscv-iommu-hpm.h
22
@@ -XXX,XX +XXX,XX @@
23
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
24
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
25
unsigned event_id);
26
+void riscv_iommu_hpm_timer_cb(void *priv);
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu.h
32
+++ b/hw/riscv/riscv-iommu.h
33
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
34
QLIST_HEAD(, RISCVIOMMUSpace) spaces;
35
36
/* HPM cycle counter */
37
+ QEMUTimer *hpm_timer;
38
uint64_t hpmcycle_val; /* Current value of cycle register */
39
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
40
+ uint64_t irq_overflow_left; /* Value beyond INT64_MAX after overflow */
41
42
/* HPM event counters */
43
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
44
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/hw/riscv/riscv-iommu-hpm.c
47
+++ b/hw/riscv/riscv-iommu-hpm.c
48
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
49
hpm_incr_ctr(s, ctr_idx);
50
}
51
}
52
+
53
+/* Timer callback for cycle counter overflow. */
54
+void riscv_iommu_hpm_timer_cb(void *priv)
55
+{
56
+ RISCVIOMMUState *s = priv;
57
+ const uint32_t inhibit = riscv_iommu_reg_get32(
58
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
59
+ uint32_t ovf;
60
+
61
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
62
+ return;
63
+ }
64
+
65
+ if (s->irq_overflow_left > 0) {
66
+ uint64_t irq_trigger_at =
67
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
68
+ timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
69
+ s->irq_overflow_left = 0;
70
+ return;
71
+ }
72
+
73
+ ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
74
+ if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
75
+ /*
76
+ * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
77
+ * current clock value. The way we calculate iohpmcycs will overflow
78
+ * and return the correct value. This avoids the need to synchronize
79
+ * timer callback and write callback.
80
+ */
81
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
82
+ RISCV_IOMMU_IOCOUNTOVF_CY, 0);
83
+ riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
84
+ RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
85
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
86
+ }
87
+}
88
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/hw/riscv/riscv-iommu.c
91
+++ b/hw/riscv/riscv-iommu.c
92
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
93
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
94
95
if (s->cap & RISCV_IOMMU_CAP_HPM) {
96
+ s->hpm_timer =
97
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, riscv_iommu_hpm_timer_cb, s);
98
s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
99
}
100
}
101
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_unrealize(DeviceState *dev)
102
103
if (s->cap & RISCV_IOMMU_CAP_HPM) {
104
g_hash_table_unref(s->hpm_event_ctr_map);
105
+ timer_free(s->hpm_timer);
106
}
107
}
108
109
--
110
2.48.1
diff view generated by jsdifflib
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
2
2
3
-- update extension check REQUIRE_ZDINX_OR_D
3
RISCV_IOMMU_REG_IOCOUNTINH is done by riscv_iommu_process_iocntinh_cy(),
4
-- update double float point register read/write
4
which is called during riscv_iommu_mmio_write() callback via a new
5
riscv_iommu_pricess_hpm_writes() helper.
5
6
6
Co-authored-by: ardxwe <ardxwe@gmail.com>
7
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
7
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-ID: <20250224190826.1858473-7-dbarboza@ventanamicro.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-Id: <20220211043920.28981-5-liweiwei@iscas.ac.cn>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
12
---
14
target/riscv/translate.c | 52 +++++
13
hw/riscv/riscv-iommu-hpm.h | 1 +
15
target/riscv/insn_trans/trans_rvd.c.inc | 285 +++++++++++++++++-------
14
hw/riscv/riscv-iommu-hpm.c | 60 ++++++++++++++++++++++++++++++++++++++
16
2 files changed, 259 insertions(+), 78 deletions(-)
15
hw/riscv/riscv-iommu.c | 38 ++++++++++++++++++++++++
16
3 files changed, 99 insertions(+)
17
17
18
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
19
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/translate.c
20
--- a/hw/riscv/riscv-iommu-hpm.h
21
+++ b/target/riscv/translate.c
21
+++ b/hw/riscv/riscv-iommu-hpm.h
22
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num)
22
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
23
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
24
unsigned event_id);
25
void riscv_iommu_hpm_timer_cb(void *priv);
26
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu-hpm.c
32
+++ b/hw/riscv/riscv-iommu-hpm.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_timer_cb(void *priv)
34
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
23
}
35
}
24
}
36
}
25
37
+
26
+static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num)
38
+static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
27
+{
39
+{
28
+ if (!ctx->cfg_ptr->ext_zfinx) {
40
+ const uint32_t inhibit = riscv_iommu_reg_get32(
29
+ return cpu_fpr[reg_num];
41
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
42
+ uint64_t overflow_at, overflow_ns;
43
+
44
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
45
+ return;
30
+ }
46
+ }
31
+
47
+
32
+ if (reg_num == 0) {
48
+ /*
33
+ return tcg_constant_i64(0);
49
+ * We are using INT64_MAX here instead to UINT64_MAX because cycle counter
50
+ * has 63-bit precision and INT64_MAX is the maximum it can store.
51
+ */
52
+ if (value) {
53
+ overflow_ns = INT64_MAX - value + 1;
54
+ } else {
55
+ overflow_ns = INT64_MAX;
34
+ }
56
+ }
35
+ switch (get_xl(ctx)) {
57
+
36
+ case MXL_RV32:
58
+ overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
37
+ {
59
+
38
+ TCGv_i64 t = ftemp_new(ctx);
60
+ if (overflow_at > INT64_MAX) {
39
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]);
61
+ s->irq_overflow_left = overflow_at - INT64_MAX;
40
+ return t;
62
+ overflow_at = INT64_MAX;
41
+ }
63
+ }
42
+#ifdef TARGET_RISCV64
64
+
43
+ case MXL_RV64:
65
+ timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
44
+ return cpu_gpr[reg_num];
66
+}
45
+#endif
67
+
46
+ default:
68
+/* Updates the internal cycle counter state when iocntinh:CY is changed. */
47
+ g_assert_not_reached();
69
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
70
+{
71
+ const uint32_t inhibit = riscv_iommu_reg_get32(
72
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
73
+
74
+ /* We only need to process CY bit toggle. */
75
+ if (!(inhibit ^ prev_cy_inh)) {
76
+ return;
77
+ }
78
+
79
+ if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
80
+ /*
81
+ * Cycle counter is enabled. Just start the timer again and update
82
+ * the clock snapshot value to point to the current time to make
83
+ * sure iohpmcycles read is correct.
84
+ */
85
+ s->hpmcycle_prev = get_cycles();
86
+ hpm_setup_timer(s, s->hpmcycle_val);
87
+ } else {
88
+ /*
89
+ * Cycle counter is disabled. Stop the timer and update the cycle
90
+ * counter to record the current value which is last programmed
91
+ * value + the cycles passed so far.
92
+ */
93
+ s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
94
+ timer_del(s->hpm_timer);
95
+ }
96
+}
97
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/hw/riscv/riscv-iommu.c
100
+++ b/hw/riscv/riscv-iommu.c
101
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data)
102
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr);
103
}
104
105
+static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
106
+ uint32_t regb,
107
+ bool prev_cy_inh)
108
+{
109
+ switch (regb) {
110
+ case RISCV_IOMMU_REG_IOCOUNTINH:
111
+ riscv_iommu_process_iocntinh_cy(s, prev_cy_inh);
112
+ break;
113
+
114
+ case RISCV_IOMMU_REG_IOHPMCYCLES:
115
+ case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
116
+ /* not yet implemented */
117
+ break;
118
+
119
+ case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
120
+ RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
121
+ /* not yet implemented */
122
+ break;
48
+ }
123
+ }
49
+}
124
+}
50
+
125
+
51
static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num)
126
/*
52
{
127
* Write the resulting value of 'data' for the reg specified
53
if (!ctx->cfg_ptr->ext_zfinx) {
128
* by 'reg_addr', after considering read-only/read-write/write-clear
54
@@ -XXX,XX +XXX,XX @@ static void gen_set_fpr_hs(DisasContext *ctx, int reg_num, TCGv_i64 t)
129
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
130
uint32_t regb = addr & ~3;
131
uint32_t busy = 0;
132
uint64_t val = 0;
133
+ bool cy_inh = false;
134
135
if ((addr & (size - 1)) != 0) {
136
/* Unsupported MMIO alignment or access size */
137
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
138
busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY;
139
break;
140
141
+ case RISCV_IOMMU_REG_IOCOUNTINH:
142
+ if (addr != RISCV_IOMMU_REG_IOCOUNTINH) {
143
+ break;
144
+ }
145
+ /* Store previous value of CY bit. */
146
+ cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) &
147
+ RISCV_IOMMU_IOCOUNTINH_CY);
148
+ break;
149
+
150
+
151
default:
152
break;
55
}
153
}
56
}
154
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
57
155
stl_le_p(&s->regs_rw[regb], rw | busy);
58
+static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
156
}
59
+{
157
60
+ if (!ctx->cfg_ptr->ext_zfinx) {
158
+ /* Process HPM writes and update any internal state if needed. */
61
+ tcg_gen_mov_i64(cpu_fpr[reg_num], t);
159
+ if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF &&
62
+ return;
160
+ regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) {
161
+ riscv_iommu_process_hpm_writes(s, regb, cy_inh);
63
+ }
162
+ }
64
+
163
+
65
+ if (reg_num != 0) {
164
if (process_fn) {
66
+ switch (get_xl(ctx)) {
165
process_fn(s);
67
+ case MXL_RV32:
68
+#ifdef TARGET_RISCV32
69
+ tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t);
70
+ break;
71
+#else
72
+ tcg_gen_ext32s_i64(cpu_gpr[reg_num], t);
73
+ tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32);
74
+ break;
75
+ case MXL_RV64:
76
+ tcg_gen_mov_i64(cpu_gpr[reg_num], t);
77
+ break;
78
+#endif
79
+ default:
80
+ g_assert_not_reached();
81
+ }
82
+ }
83
+}
84
+
85
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
86
{
87
target_ulong next_pc;
88
diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/riscv/insn_trans/trans_rvd.c.inc
91
+++ b/target/riscv/insn_trans/trans_rvd.c.inc
92
@@ -XXX,XX +XXX,XX @@
93
* this program. If not, see <http://www.gnu.org/licenses/>.
94
*/
95
96
+#define REQUIRE_ZDINX_OR_D(ctx) do { \
97
+ if (!ctx->cfg_ptr->ext_zdinx) { \
98
+ REQUIRE_EXT(ctx, RVD); \
99
+ } \
100
+} while (0)
101
+
102
+#define REQUIRE_EVEN(ctx, reg) do { \
103
+ if (ctx->cfg_ptr->ext_zdinx && (get_xl(ctx) == MXL_RV32) && \
104
+ ((reg) & 0x1)) { \
105
+ return false; \
106
+ } \
107
+} while (0)
108
+
109
static bool trans_fld(DisasContext *ctx, arg_fld *a)
110
{
111
TCGv addr;
112
@@ -XXX,XX +XXX,XX @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
113
static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
114
{
115
REQUIRE_FPU;
116
- REQUIRE_EXT(ctx, RVD);
117
+ REQUIRE_ZDINX_OR_D(ctx);
118
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
119
+
120
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
121
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
122
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
123
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
124
+
125
gen_set_rm(ctx, a->rm);
126
- gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
127
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
128
+ gen_helper_fmadd_d(dest, cpu_env, src1, src2, src3);
129
+ gen_set_fpr_d(ctx, a->rd, dest);
130
mark_fs_dirty(ctx);
131
return true;
132
}
133
@@ -XXX,XX +XXX,XX @@ static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
134
static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
135
{
136
REQUIRE_FPU;
137
- REQUIRE_EXT(ctx, RVD);
138
+ REQUIRE_ZDINX_OR_D(ctx);
139
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
140
+
141
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
142
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
143
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
144
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
145
+
146
gen_set_rm(ctx, a->rm);
147
- gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
148
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
149
+ gen_helper_fmsub_d(dest, cpu_env, src1, src2, src3);
150
+ gen_set_fpr_d(ctx, a->rd, dest);
151
mark_fs_dirty(ctx);
152
return true;
153
}
154
@@ -XXX,XX +XXX,XX @@ static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
155
static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
156
{
157
REQUIRE_FPU;
158
- REQUIRE_EXT(ctx, RVD);
159
+ REQUIRE_ZDINX_OR_D(ctx);
160
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
161
+
162
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
163
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
164
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
165
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
166
+
167
gen_set_rm(ctx, a->rm);
168
- gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
169
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
170
+ gen_helper_fnmsub_d(dest, cpu_env, src1, src2, src3);
171
+ gen_set_fpr_d(ctx, a->rd, dest);
172
mark_fs_dirty(ctx);
173
return true;
174
}
175
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
176
static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
177
{
178
REQUIRE_FPU;
179
- REQUIRE_EXT(ctx, RVD);
180
+ REQUIRE_ZDINX_OR_D(ctx);
181
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2 | a->rs3);
182
+
183
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
184
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
185
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
186
+ TCGv_i64 src3 = get_fpr_d(ctx, a->rs3);
187
+
188
gen_set_rm(ctx, a->rm);
189
- gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
190
- cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
191
+ gen_helper_fnmadd_d(dest, cpu_env, src1, src2, src3);
192
+ gen_set_fpr_d(ctx, a->rd, dest);
193
mark_fs_dirty(ctx);
194
return true;
195
}
196
@@ -XXX,XX +XXX,XX @@ static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
197
static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
198
{
199
REQUIRE_FPU;
200
- REQUIRE_EXT(ctx, RVD);
201
+ REQUIRE_ZDINX_OR_D(ctx);
202
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
203
204
- gen_set_rm(ctx, a->rm);
205
- gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
206
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
207
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
208
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
209
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
210
211
+ gen_set_rm(ctx, a->rm);
212
+ gen_helper_fadd_d(dest, cpu_env, src1, src2);
213
+ gen_set_fpr_d(ctx, a->rd, dest);
214
mark_fs_dirty(ctx);
215
return true;
216
}
217
@@ -XXX,XX +XXX,XX @@ static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
218
static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
219
{
220
REQUIRE_FPU;
221
- REQUIRE_EXT(ctx, RVD);
222
+ REQUIRE_ZDINX_OR_D(ctx);
223
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
224
225
- gen_set_rm(ctx, a->rm);
226
- gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
227
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
228
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
229
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
230
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
231
232
+ gen_set_rm(ctx, a->rm);
233
+ gen_helper_fsub_d(dest, cpu_env, src1, src2);
234
+ gen_set_fpr_d(ctx, a->rd, dest);
235
mark_fs_dirty(ctx);
236
return true;
237
}
238
@@ -XXX,XX +XXX,XX @@ static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
239
static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
240
{
241
REQUIRE_FPU;
242
- REQUIRE_EXT(ctx, RVD);
243
+ REQUIRE_ZDINX_OR_D(ctx);
244
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
245
246
- gen_set_rm(ctx, a->rm);
247
- gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
248
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
249
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
250
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
251
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
252
253
+ gen_set_rm(ctx, a->rm);
254
+ gen_helper_fmul_d(dest, cpu_env, src1, src2);
255
+ gen_set_fpr_d(ctx, a->rd, dest);
256
mark_fs_dirty(ctx);
257
return true;
258
}
259
@@ -XXX,XX +XXX,XX @@ static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
260
static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
261
{
262
REQUIRE_FPU;
263
- REQUIRE_EXT(ctx, RVD);
264
+ REQUIRE_ZDINX_OR_D(ctx);
265
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
266
267
- gen_set_rm(ctx, a->rm);
268
- gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
269
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
270
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
271
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
272
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
273
274
+ gen_set_rm(ctx, a->rm);
275
+ gen_helper_fdiv_d(dest, cpu_env, src1, src2);
276
+ gen_set_fpr_d(ctx, a->rd, dest);
277
mark_fs_dirty(ctx);
278
return true;
279
}
280
@@ -XXX,XX +XXX,XX @@ static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
281
static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
282
{
283
REQUIRE_FPU;
284
- REQUIRE_EXT(ctx, RVD);
285
+ REQUIRE_ZDINX_OR_D(ctx);
286
+ REQUIRE_EVEN(ctx, a->rd | a->rs1);
287
288
- gen_set_rm(ctx, a->rm);
289
- gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
290
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
291
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
292
293
+ gen_set_rm(ctx, a->rm);
294
+ gen_helper_fsqrt_d(dest, cpu_env, src1);
295
+ gen_set_fpr_d(ctx, a->rd, dest);
296
mark_fs_dirty(ctx);
297
return true;
298
}
299
300
static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
301
{
302
+ REQUIRE_FPU;
303
+ REQUIRE_ZDINX_OR_D(ctx);
304
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
305
+
306
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
307
if (a->rs1 == a->rs2) { /* FMOV */
308
- tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
309
+ dest = get_fpr_d(ctx, a->rs1);
310
} else {
311
- tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
312
- cpu_fpr[a->rs1], 0, 63);
313
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
314
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
315
+ tcg_gen_deposit_i64(dest, src2, src1, 0, 63);
316
}
166
}
317
+ gen_set_fpr_d(ctx, a->rd, dest);
318
mark_fs_dirty(ctx);
319
return true;
320
}
321
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
322
static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
323
{
324
REQUIRE_FPU;
325
- REQUIRE_EXT(ctx, RVD);
326
+ REQUIRE_ZDINX_OR_D(ctx);
327
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
328
+
329
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
330
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
331
+
332
if (a->rs1 == a->rs2) { /* FNEG */
333
- tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
334
+ tcg_gen_xori_i64(dest, src1, INT64_MIN);
335
} else {
336
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
337
TCGv_i64 t0 = tcg_temp_new_i64();
338
- tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
339
- tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
340
+ tcg_gen_not_i64(t0, src2);
341
+ tcg_gen_deposit_i64(dest, t0, src1, 0, 63);
342
tcg_temp_free_i64(t0);
343
}
344
+ gen_set_fpr_d(ctx, a->rd, dest);
345
mark_fs_dirty(ctx);
346
return true;
347
}
348
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
349
static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
350
{
351
REQUIRE_FPU;
352
- REQUIRE_EXT(ctx, RVD);
353
+ REQUIRE_ZDINX_OR_D(ctx);
354
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
355
+
356
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
357
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
358
+
359
if (a->rs1 == a->rs2) { /* FABS */
360
- tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
361
+ tcg_gen_andi_i64(dest, src1, ~INT64_MIN);
362
} else {
363
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
364
TCGv_i64 t0 = tcg_temp_new_i64();
365
- tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
366
- tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
367
+ tcg_gen_andi_i64(t0, src2, INT64_MIN);
368
+ tcg_gen_xor_i64(dest, src1, t0);
369
tcg_temp_free_i64(t0);
370
}
371
+ gen_set_fpr_d(ctx, a->rd, dest);
372
mark_fs_dirty(ctx);
373
return true;
374
}
375
@@ -XXX,XX +XXX,XX @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
376
static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
377
{
378
REQUIRE_FPU;
379
- REQUIRE_EXT(ctx, RVD);
380
+ REQUIRE_ZDINX_OR_D(ctx);
381
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
382
383
- gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
384
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
385
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
386
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
387
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
388
389
+ gen_helper_fmin_d(dest, cpu_env, src1, src2);
390
+ gen_set_fpr_d(ctx, a->rd, dest);
391
mark_fs_dirty(ctx);
392
return true;
393
}
394
@@ -XXX,XX +XXX,XX @@ static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
395
static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
396
{
397
REQUIRE_FPU;
398
- REQUIRE_EXT(ctx, RVD);
399
+ REQUIRE_ZDINX_OR_D(ctx);
400
+ REQUIRE_EVEN(ctx, a->rd | a->rs1 | a->rs2);
401
402
- gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
403
- cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
404
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
405
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
406
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
407
408
+ gen_helper_fmax_d(dest, cpu_env, src1, src2);
409
+ gen_set_fpr_d(ctx, a->rd, dest);
410
mark_fs_dirty(ctx);
411
return true;
412
}
413
@@ -XXX,XX +XXX,XX @@ static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
414
static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
415
{
416
REQUIRE_FPU;
417
- REQUIRE_EXT(ctx, RVD);
418
+ REQUIRE_ZDINX_OR_D(ctx);
419
+ REQUIRE_EVEN(ctx, a->rs1);
420
421
- gen_set_rm(ctx, a->rm);
422
- gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
423
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
424
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
425
426
+ gen_set_rm(ctx, a->rm);
427
+ gen_helper_fcvt_s_d(dest, cpu_env, src1);
428
+ gen_set_fpr_hs(ctx, a->rd, dest);
429
mark_fs_dirty(ctx);
430
return true;
431
}
432
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
433
static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
434
{
435
REQUIRE_FPU;
436
- REQUIRE_EXT(ctx, RVD);
437
+ REQUIRE_ZDINX_OR_D(ctx);
438
+ REQUIRE_EVEN(ctx, a->rd);
439
440
- gen_set_rm(ctx, a->rm);
441
- gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
442
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
443
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
444
445
+ gen_set_rm(ctx, a->rm);
446
+ gen_helper_fcvt_d_s(dest, cpu_env, src1);
447
+ gen_set_fpr_d(ctx, a->rd, dest);
448
mark_fs_dirty(ctx);
449
return true;
450
}
451
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
452
static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
453
{
454
REQUIRE_FPU;
455
- REQUIRE_EXT(ctx, RVD);
456
+ REQUIRE_ZDINX_OR_D(ctx);
457
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
458
459
TCGv dest = dest_gpr(ctx, a->rd);
460
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
461
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
462
463
- gen_helper_feq_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
464
+ gen_helper_feq_d(dest, cpu_env, src1, src2);
465
gen_set_gpr(ctx, a->rd, dest);
466
return true;
467
}
468
@@ -XXX,XX +XXX,XX @@ static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
469
static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
470
{
471
REQUIRE_FPU;
472
- REQUIRE_EXT(ctx, RVD);
473
+ REQUIRE_ZDINX_OR_D(ctx);
474
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
475
476
TCGv dest = dest_gpr(ctx, a->rd);
477
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
478
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
479
480
- gen_helper_flt_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
481
+ gen_helper_flt_d(dest, cpu_env, src1, src2);
482
gen_set_gpr(ctx, a->rd, dest);
483
return true;
484
}
485
@@ -XXX,XX +XXX,XX @@ static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
486
static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
487
{
488
REQUIRE_FPU;
489
- REQUIRE_EXT(ctx, RVD);
490
+ REQUIRE_ZDINX_OR_D(ctx);
491
+ REQUIRE_EVEN(ctx, a->rs1 | a->rs2);
492
493
TCGv dest = dest_gpr(ctx, a->rd);
494
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
495
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
496
497
- gen_helper_fle_d(dest, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
498
+ gen_helper_fle_d(dest, cpu_env, src1, src2);
499
gen_set_gpr(ctx, a->rd, dest);
500
return true;
501
}
502
@@ -XXX,XX +XXX,XX @@ static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
503
static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
504
{
505
REQUIRE_FPU;
506
- REQUIRE_EXT(ctx, RVD);
507
+ REQUIRE_ZDINX_OR_D(ctx);
508
+ REQUIRE_EVEN(ctx, a->rs1);
509
510
TCGv dest = dest_gpr(ctx, a->rd);
511
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
512
513
- gen_helper_fclass_d(dest, cpu_fpr[a->rs1]);
514
+ gen_helper_fclass_d(dest, src1);
515
gen_set_gpr(ctx, a->rd, dest);
516
return true;
517
}
518
@@ -XXX,XX +XXX,XX @@ static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
519
static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
520
{
521
REQUIRE_FPU;
522
- REQUIRE_EXT(ctx, RVD);
523
+ REQUIRE_ZDINX_OR_D(ctx);
524
+ REQUIRE_EVEN(ctx, a->rs1);
525
526
TCGv dest = dest_gpr(ctx, a->rd);
527
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
528
529
gen_set_rm(ctx, a->rm);
530
- gen_helper_fcvt_w_d(dest, cpu_env, cpu_fpr[a->rs1]);
531
+ gen_helper_fcvt_w_d(dest, cpu_env, src1);
532
gen_set_gpr(ctx, a->rd, dest);
533
return true;
534
}
535
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
536
static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
537
{
538
REQUIRE_FPU;
539
- REQUIRE_EXT(ctx, RVD);
540
+ REQUIRE_ZDINX_OR_D(ctx);
541
+ REQUIRE_EVEN(ctx, a->rs1);
542
543
TCGv dest = dest_gpr(ctx, a->rd);
544
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
545
546
gen_set_rm(ctx, a->rm);
547
- gen_helper_fcvt_wu_d(dest, cpu_env, cpu_fpr[a->rs1]);
548
+ gen_helper_fcvt_wu_d(dest, cpu_env, src1);
549
gen_set_gpr(ctx, a->rd, dest);
550
return true;
551
}
552
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
553
static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
554
{
555
REQUIRE_FPU;
556
- REQUIRE_EXT(ctx, RVD);
557
+ REQUIRE_ZDINX_OR_D(ctx);
558
+ REQUIRE_EVEN(ctx, a->rd);
559
560
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
561
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
562
563
gen_set_rm(ctx, a->rm);
564
- gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, src);
565
+ gen_helper_fcvt_d_w(dest, cpu_env, src);
566
+ gen_set_fpr_d(ctx, a->rd, dest);
567
568
mark_fs_dirty(ctx);
569
return true;
570
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
571
static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
572
{
573
REQUIRE_FPU;
574
- REQUIRE_EXT(ctx, RVD);
575
+ REQUIRE_ZDINX_OR_D(ctx);
576
+ REQUIRE_EVEN(ctx, a->rd);
577
578
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
579
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
580
581
gen_set_rm(ctx, a->rm);
582
- gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, src);
583
+ gen_helper_fcvt_d_wu(dest, cpu_env, src);
584
+ gen_set_fpr_d(ctx, a->rd, dest);
585
586
mark_fs_dirty(ctx);
587
return true;
588
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
589
{
590
REQUIRE_64BIT(ctx);
591
REQUIRE_FPU;
592
- REQUIRE_EXT(ctx, RVD);
593
+ REQUIRE_ZDINX_OR_D(ctx);
594
+ REQUIRE_EVEN(ctx, a->rs1);
595
596
TCGv dest = dest_gpr(ctx, a->rd);
597
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
598
599
gen_set_rm(ctx, a->rm);
600
- gen_helper_fcvt_l_d(dest, cpu_env, cpu_fpr[a->rs1]);
601
+ gen_helper_fcvt_l_d(dest, cpu_env, src1);
602
gen_set_gpr(ctx, a->rd, dest);
603
return true;
604
}
605
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
606
{
607
REQUIRE_64BIT(ctx);
608
REQUIRE_FPU;
609
- REQUIRE_EXT(ctx, RVD);
610
+ REQUIRE_ZDINX_OR_D(ctx);
611
+ REQUIRE_EVEN(ctx, a->rs1);
612
613
TCGv dest = dest_gpr(ctx, a->rd);
614
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
615
616
gen_set_rm(ctx, a->rm);
617
- gen_helper_fcvt_lu_d(dest, cpu_env, cpu_fpr[a->rs1]);
618
+ gen_helper_fcvt_lu_d(dest, cpu_env, src1);
619
gen_set_gpr(ctx, a->rd, dest);
620
return true;
621
}
622
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
623
{
624
REQUIRE_64BIT(ctx);
625
REQUIRE_FPU;
626
- REQUIRE_EXT(ctx, RVD);
627
+ REQUIRE_ZDINX_OR_D(ctx);
628
+ REQUIRE_EVEN(ctx, a->rd);
629
630
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
631
TCGv src = get_gpr(ctx, a->rs1, EXT_SIGN);
632
633
gen_set_rm(ctx, a->rm);
634
- gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, src);
635
+ gen_helper_fcvt_d_l(dest, cpu_env, src);
636
+ gen_set_fpr_d(ctx, a->rd, dest);
637
638
mark_fs_dirty(ctx);
639
return true;
640
@@ -XXX,XX +XXX,XX @@ static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
641
{
642
REQUIRE_64BIT(ctx);
643
REQUIRE_FPU;
644
- REQUIRE_EXT(ctx, RVD);
645
+ REQUIRE_ZDINX_OR_D(ctx);
646
+ REQUIRE_EVEN(ctx, a->rd);
647
648
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
649
TCGv src = get_gpr(ctx, a->rs1, EXT_ZERO);
650
651
gen_set_rm(ctx, a->rm);
652
- gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, src);
653
+ gen_helper_fcvt_d_lu(dest, cpu_env, src);
654
+ gen_set_fpr_d(ctx, a->rd, dest);
655
656
mark_fs_dirty(ctx);
657
return true;
658
--
167
--
659
2.35.1
168
2.48.1
diff view generated by jsdifflib
New patch
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
2
3
RISCV_IOMMU_REG_IOHPMCYCLES writes are done by
4
riscv_iommu_process_hpmcycle_write(), called by the mmio write callback
5
via riscv_iommu_process_hpm_writes().
6
7
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
8
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250224190826.1858473-8-dbarboza@ventanamicro.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
hw/riscv/riscv-iommu-hpm.h | 1 +
14
hw/riscv/riscv-iommu-hpm.c | 19 +++++++++++++++++++
15
hw/riscv/riscv-iommu.c | 2 +-
16
3 files changed, 21 insertions(+), 1 deletion(-)
17
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu-hpm.h
21
+++ b/hw/riscv/riscv-iommu-hpm.h
22
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
23
unsigned event_id);
24
void riscv_iommu_hpm_timer_cb(void *priv);
25
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
26
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu-hpm.c
32
+++ b/hw/riscv/riscv-iommu-hpm.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
34
timer_del(s->hpm_timer);
35
}
36
}
37
+
38
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
39
+{
40
+ const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
41
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
42
+
43
+ /*
44
+ * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
45
+ */
46
+ if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
47
+ !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
48
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
49
+ RISCV_IOMMU_IOCOUNTOVF_CY);
50
+ }
51
+
52
+ s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
53
+ s->hpmcycle_prev = get_cycles();
54
+ hpm_setup_timer(s, s->hpmcycle_val);
55
+}
56
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/riscv/riscv-iommu.c
59
+++ b/hw/riscv/riscv-iommu.c
60
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
61
62
case RISCV_IOMMU_REG_IOHPMCYCLES:
63
case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
64
- /* not yet implemented */
65
+ riscv_iommu_process_hpmcycle_write(s);
66
break;
67
68
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
69
--
70
2.48.1
diff view generated by jsdifflib
1
From: Anup Patel <anup.patel@wdc.com>
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
2
2
3
We extend virt machine to emulate both AIA IMSIC and AIA APLIC
3
To support hpm events mmio writes, done via
4
devices only when "aia=aplic-imsic" parameter is passed along
4
riscv_iommu_process_hpmevt_write(), we're also adding the 'hpm-counters'
5
with machine name in the QEMU command-line. The AIA IMSIC is
5
IOMMU property that are used to determine the amount of counters
6
only a per-HART MSI controller so we use AIA APLIC in MSI-mode
6
available in the IOMMU.
7
to forward all wired interrupts as MSIs to the AIA IMSIC.
8
7
9
We also provide "aia-guests=<xyz>" parameter which can be used
8
Note that everything we did so far didn't change any IOMMU behavior
10
to specify number of VS-level AIA IMSIC Guests MMIO pages for
9
because we're still not advertising HPM capability to software. This
11
each HART.
10
will be done in the next patch.
12
11
13
Signed-off-by: Anup Patel <anup.patel@wdc.com>
12
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
14
Signed-off-by: Anup Patel <anup@brainfault.org>
13
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Acked-by: Alistair Francis <alistair.francis@wdc.com>
16
Message-Id: <20220220085526.808674-4-anup@brainfault.org>
15
Message-ID: <20250224190826.1858473-9-dbarboza@ventanamicro.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
17
---
19
include/hw/riscv/virt.h | 17 +-
18
hw/riscv/riscv-iommu-hpm.h | 1 +
20
hw/riscv/virt.c | 439 ++++++++++++++++++++++++++++++++--------
19
hw/riscv/riscv-iommu.h | 1 +
21
hw/riscv/Kconfig | 1 +
20
hw/riscv/riscv-iommu-hpm.c | 88 ++++++++++++++++++++++++++++++++++++++
22
3 files changed, 373 insertions(+), 84 deletions(-)
21
hw/riscv/riscv-iommu.c | 4 +-
22
4 files changed, 93 insertions(+), 1 deletion(-)
23
23
24
diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h
24
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
25
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/hw/riscv/virt.h
26
--- a/hw/riscv/riscv-iommu-hpm.h
27
+++ b/include/hw/riscv/virt.h
27
+++ b/hw/riscv/riscv-iommu-hpm.h
28
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
29
#include "hw/block/flash.h"
29
void riscv_iommu_hpm_timer_cb(void *priv);
30
#include "qom/object.h"
30
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
31
31
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
32
-#define VIRT_CPUS_MAX 32
32
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
33
-#define VIRT_SOCKETS_MAX 8
33
34
+#define VIRT_CPUS_MAX_BITS 3
34
#endif
35
+#define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS)
35
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
36
+#define VIRT_SOCKETS_MAX_BITS 2
36
index XXXXXXX..XXXXXXX 100644
37
+#define VIRT_SOCKETS_MAX (1 << VIRT_SOCKETS_MAX_BITS)
37
--- a/hw/riscv/riscv-iommu.h
38
38
+++ b/hw/riscv/riscv-iommu.h
39
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
39
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
40
typedef struct RISCVVirtState RISCVVirtState;
40
41
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(RISCVVirtState, RISCV_VIRT_MACHINE,
41
/* HPM event counters */
42
typedef enum RISCVVirtAIAType {
42
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
43
VIRT_AIA_TYPE_NONE = 0,
43
+ uint8_t hpm_cntrs;
44
VIRT_AIA_TYPE_APLIC,
45
+ VIRT_AIA_TYPE_APLIC_IMSIC,
46
} RISCVVirtAIAType;
47
48
struct RISCVVirtState {
49
@@ -XXX,XX +XXX,XX @@ struct RISCVVirtState {
50
int fdt_size;
51
bool have_aclint;
52
RISCVVirtAIAType aia_type;
53
+ int aia_guests;
54
};
44
};
55
45
56
enum {
46
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
57
@@ -XXX,XX +XXX,XX @@ enum {
47
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
58
VIRT_UART0,
59
VIRT_VIRTIO,
60
VIRT_FW_CFG,
61
+ VIRT_IMSIC_M,
62
+ VIRT_IMSIC_S,
63
VIRT_FLASH,
64
VIRT_DRAM,
65
VIRT_PCIE_MMIO,
66
@@ -XXX,XX +XXX,XX @@ enum {
67
VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */
68
};
69
70
-#define VIRT_IRQCHIP_NUM_SOURCES 127
71
+#define VIRT_IRQCHIP_IPI_MSI 1
72
+#define VIRT_IRQCHIP_NUM_MSIS 255
73
+#define VIRT_IRQCHIP_NUM_SOURCES VIRTIO_NDEV
74
#define VIRT_IRQCHIP_NUM_PRIO_BITS 3
75
+#define VIRT_IRQCHIP_MAX_GUESTS_BITS 3
76
+#define VIRT_IRQCHIP_MAX_GUESTS ((1U << VIRT_IRQCHIP_MAX_GUESTS_BITS) - 1U)
77
78
#define VIRT_PLIC_PRIORITY_BASE 0x04
79
#define VIRT_PLIC_PENDING_BASE 0x1000
80
@@ -XXX,XX +XXX,XX @@ enum {
81
#define FDT_PCI_INT_CELLS 1
82
#define FDT_PLIC_INT_CELLS 1
83
#define FDT_APLIC_INT_CELLS 2
84
+#define FDT_IMSIC_INT_CELLS 0
85
#define FDT_MAX_INT_CELLS 2
86
#define FDT_MAX_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \
87
1 + FDT_MAX_INT_CELLS)
88
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
89
index XXXXXXX..XXXXXXX 100644
48
index XXXXXXX..XXXXXXX 100644
90
--- a/hw/riscv/virt.c
49
--- a/hw/riscv/riscv-iommu-hpm.c
91
+++ b/hw/riscv/virt.c
50
+++ b/hw/riscv/riscv-iommu-hpm.c
92
@@ -XXX,XX +XXX,XX @@
51
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
93
#include "hw/riscv/numa.h"
52
s->hpmcycle_prev = get_cycles();
94
#include "hw/intc/riscv_aclint.h"
53
hpm_setup_timer(s, s->hpmcycle_val);
95
#include "hw/intc/riscv_aplic.h"
54
}
96
+#include "hw/intc/riscv_imsic.h"
97
#include "hw/intc/sifive_plic.h"
98
#include "hw/misc/sifive_test.h"
99
#include "chardev/char.h"
100
@@ -XXX,XX +XXX,XX @@
101
#include "hw/pci-host/gpex.h"
102
#include "hw/display/ramfb.h"
103
104
+#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT)
105
+#if VIRT_IMSIC_GROUP_MAX_SIZE < \
106
+ IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS)
107
+#error "Can't accomodate single IMSIC group in address space"
108
+#endif
109
+
55
+
110
+#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \
56
+static inline bool check_valid_event_id(unsigned event_id)
111
+ VIRT_IMSIC_GROUP_MAX_SIZE)
57
+{
112
+#if 0x4000000 < VIRT_IMSIC_MAX_SIZE
58
+ return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
113
+#error "Can't accomodate all IMSIC groups in address space"
59
+ event_id < RISCV_IOMMU_HPMEVENT_MAX;
114
+#endif
60
+}
115
+
61
+
116
static const MemMapEntry virt_memmap[] = {
62
+static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
117
[VIRT_DEBUG] = { 0x0, 0x100 },
118
[VIRT_MROM] = { 0x1000, 0xf000 },
119
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry virt_memmap[] = {
120
[VIRT_VIRTIO] = { 0x10001000, 0x1000 },
121
[VIRT_FW_CFG] = { 0x10100000, 0x18 },
122
[VIRT_FLASH] = { 0x20000000, 0x4000000 },
123
+ [VIRT_IMSIC_M] = { 0x24000000, VIRT_IMSIC_MAX_SIZE },
124
+ [VIRT_IMSIC_S] = { 0x28000000, VIRT_IMSIC_MAX_SIZE },
125
[VIRT_PCIE_ECAM] = { 0x30000000, 0x10000000 },
126
[VIRT_PCIE_MMIO] = { 0x40000000, 0x40000000 },
127
[VIRT_DRAM] = { 0x80000000, 0x0 },
128
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
129
{
130
int cpu;
131
char *name;
132
- unsigned long addr;
133
+ unsigned long addr, size;
134
uint32_t aclint_cells_size;
135
uint32_t *aclint_mswi_cells;
136
uint32_t *aclint_sswi_cells;
137
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
138
}
139
aclint_cells_size = s->soc[socket].num_harts * sizeof(uint32_t) * 2;
140
141
- addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
142
- name = g_strdup_printf("/soc/mswi@%lx", addr);
143
- qemu_fdt_add_subnode(mc->fdt, name);
144
- qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-mswi");
145
- qemu_fdt_setprop_cells(mc->fdt, name, "reg",
146
- 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
147
- qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
148
- aclint_mswi_cells, aclint_cells_size);
149
- qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
150
- qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
151
- riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
152
- g_free(name);
153
+ if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
154
+ addr = memmap[VIRT_CLINT].base + (memmap[VIRT_CLINT].size * socket);
155
+ name = g_strdup_printf("/soc/mswi@%lx", addr);
156
+ qemu_fdt_add_subnode(mc->fdt, name);
157
+ qemu_fdt_setprop_string(mc->fdt, name, "compatible",
158
+ "riscv,aclint-mswi");
159
+ qemu_fdt_setprop_cells(mc->fdt, name, "reg",
160
+ 0x0, addr, 0x0, RISCV_ACLINT_SWI_SIZE);
161
+ qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
162
+ aclint_mswi_cells, aclint_cells_size);
163
+ qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
164
+ qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
165
+ riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
166
+ g_free(name);
167
+ }
168
169
- addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
170
- (memmap[VIRT_CLINT].size * socket);
171
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
172
+ addr = memmap[VIRT_CLINT].base +
173
+ (RISCV_ACLINT_DEFAULT_MTIMER_SIZE * socket);
174
+ size = RISCV_ACLINT_DEFAULT_MTIMER_SIZE;
175
+ } else {
176
+ addr = memmap[VIRT_CLINT].base + RISCV_ACLINT_SWI_SIZE +
177
+ (memmap[VIRT_CLINT].size * socket);
178
+ size = memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE;
179
+ }
180
name = g_strdup_printf("/soc/mtimer@%lx", addr);
181
qemu_fdt_add_subnode(mc->fdt, name);
182
qemu_fdt_setprop_string(mc->fdt, name, "compatible",
183
"riscv,aclint-mtimer");
184
qemu_fdt_setprop_cells(mc->fdt, name, "reg",
185
0x0, addr + RISCV_ACLINT_DEFAULT_MTIME,
186
- 0x0, memmap[VIRT_CLINT].size - RISCV_ACLINT_SWI_SIZE -
187
- RISCV_ACLINT_DEFAULT_MTIME,
188
+ 0x0, size - RISCV_ACLINT_DEFAULT_MTIME,
189
0x0, addr + RISCV_ACLINT_DEFAULT_MTIMECMP,
190
0x0, RISCV_ACLINT_DEFAULT_MTIME);
191
qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
192
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aclint(RISCVVirtState *s,
193
riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
194
g_free(name);
195
196
- addr = memmap[VIRT_ACLINT_SSWI].base +
197
- (memmap[VIRT_ACLINT_SSWI].size * socket);
198
- name = g_strdup_printf("/soc/sswi@%lx", addr);
199
- qemu_fdt_add_subnode(mc->fdt, name);
200
- qemu_fdt_setprop_string(mc->fdt, name, "compatible", "riscv,aclint-sswi");
201
- qemu_fdt_setprop_cells(mc->fdt, name, "reg",
202
- 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
203
- qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
204
- aclint_sswi_cells, aclint_cells_size);
205
- qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
206
- qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
207
- riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
208
- g_free(name);
209
+ if (s->aia_type != VIRT_AIA_TYPE_APLIC_IMSIC) {
210
+ addr = memmap[VIRT_ACLINT_SSWI].base +
211
+ (memmap[VIRT_ACLINT_SSWI].size * socket);
212
+ name = g_strdup_printf("/soc/sswi@%lx", addr);
213
+ qemu_fdt_add_subnode(mc->fdt, name);
214
+ qemu_fdt_setprop_string(mc->fdt, name, "compatible",
215
+ "riscv,aclint-sswi");
216
+ qemu_fdt_setprop_cells(mc->fdt, name, "reg",
217
+ 0x0, addr, 0x0, memmap[VIRT_ACLINT_SSWI].size);
218
+ qemu_fdt_setprop(mc->fdt, name, "interrupts-extended",
219
+ aclint_sswi_cells, aclint_cells_size);
220
+ qemu_fdt_setprop(mc->fdt, name, "interrupt-controller", NULL, 0);
221
+ qemu_fdt_setprop_cell(mc->fdt, name, "#interrupt-cells", 0);
222
+ riscv_socket_fdt_write_id(mc, mc->fdt, name, socket);
223
+ g_free(name);
224
+ }
225
226
g_free(aclint_mswi_cells);
227
g_free(aclint_mtimer_cells);
228
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_plic(RISCVVirtState *s,
229
g_free(plic_cells);
230
}
231
232
-static void create_fdt_socket_aia(RISCVVirtState *s,
233
- const MemMapEntry *memmap, int socket,
234
- uint32_t *phandle, uint32_t *intc_phandles,
235
- uint32_t *aplic_phandles)
236
+static uint32_t imsic_num_bits(uint32_t count)
237
+{
63
+{
238
+ uint32_t ret = 0;
64
+ uint32_t *pair = udata;
239
+
65
+
240
+ while (BIT(ret) < count) {
66
+ if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
241
+ ret++;
67
+ pair[1] = GPOINTER_TO_UINT(key);
68
+ return true;
242
+ }
69
+ }
243
+
70
+
244
+ return ret;
71
+ return false;
245
+}
72
+}
246
+
73
+
247
+static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
74
+/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
248
+ uint32_t *phandle, uint32_t *intc_phandles,
75
+static void update_event_map(RISCVIOMMUState *s, uint64_t value,
249
+ uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
76
+ uint32_t ctr_idx)
250
+{
77
+{
251
+ int cpu, socket;
78
+ unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
252
+ char *imsic_name;
79
+ uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
253
+ MachineState *mc = MACHINE(s);
80
+ uint32_t new_value = 1 << ctr_idx;
254
+ uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
81
+ gpointer data;
255
+ uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
256
+
82
+
257
+ *msi_m_phandle = (*phandle)++;
83
+ /*
258
+ *msi_s_phandle = (*phandle)++;
84
+ * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
259
+ imsic_cells = g_new0(uint32_t, mc->smp.cpus * 2);
85
+ * remove the current mapping.
260
+ imsic_regs = g_new0(uint32_t, riscv_socket_count(mc) * 4);
86
+ */
87
+ if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
88
+ data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
261
+
89
+
262
+ /* M-level IMSIC node */
90
+ new_value = GPOINTER_TO_UINT(data) & ~(new_value);
263
+ for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
91
+ if (new_value != 0) {
264
+ imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
92
+ g_hash_table_replace(s->hpm_event_ctr_map,
265
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
93
+ GUINT_TO_POINTER(pair[1]),
94
+ GUINT_TO_POINTER(new_value));
95
+ } else {
96
+ g_hash_table_remove(s->hpm_event_ctr_map,
97
+ GUINT_TO_POINTER(pair[1]));
98
+ }
99
+
100
+ return;
266
+ }
101
+ }
267
+ imsic_max_hart_per_socket = 0;
102
+
268
+ for (socket = 0; socket < riscv_socket_count(mc); socket++) {
103
+ /* Update the counter mask if the event is already enabled. */
269
+ imsic_addr = memmap[VIRT_IMSIC_M].base +
104
+ if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
270
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
105
+ GUINT_TO_POINTER(event_id),
271
+ imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
106
+ NULL,
272
+ imsic_regs[socket * 4 + 0] = 0;
107
+ &data)) {
273
+ imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
108
+ new_value |= GPOINTER_TO_UINT(data);
274
+ imsic_regs[socket * 4 + 2] = 0;
275
+ imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
276
+ if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
277
+ imsic_max_hart_per_socket = s->soc[socket].num_harts;
278
+ }
279
+ }
109
+ }
280
+ imsic_name = g_strdup_printf("/soc/imsics@%lx",
281
+ (unsigned long)memmap[VIRT_IMSIC_M].base);
282
+ qemu_fdt_add_subnode(mc->fdt, imsic_name);
283
+ qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
284
+ "riscv,imsics");
285
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
286
+ FDT_IMSIC_INT_CELLS);
287
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
288
+ NULL, 0);
289
+ qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
290
+ NULL, 0);
291
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
292
+ imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
293
+ qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
294
+ riscv_socket_count(mc) * sizeof(uint32_t) * 4);
295
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
296
+ VIRT_IRQCHIP_NUM_MSIS);
297
+ qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
298
+ VIRT_IRQCHIP_IPI_MSI);
299
+ if (riscv_socket_count(mc) > 1) {
300
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
301
+ imsic_num_bits(imsic_max_hart_per_socket));
302
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
303
+ imsic_num_bits(riscv_socket_count(mc)));
304
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
305
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
306
+ }
307
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_m_phandle);
308
+ g_free(imsic_name);
309
+
110
+
310
+ /* S-level IMSIC node */
111
+ g_hash_table_insert(s->hpm_event_ctr_map,
311
+ for (cpu = 0; cpu < mc->smp.cpus; cpu++) {
112
+ GUINT_TO_POINTER(event_id),
312
+ imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
113
+ GUINT_TO_POINTER(new_value));
313
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
314
+ }
315
+ imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
316
+ imsic_max_hart_per_socket = 0;
317
+ for (socket = 0; socket < riscv_socket_count(mc); socket++) {
318
+ imsic_addr = memmap[VIRT_IMSIC_S].base +
319
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
320
+ imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
321
+ s->soc[socket].num_harts;
322
+ imsic_regs[socket * 4 + 0] = 0;
323
+ imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
324
+ imsic_regs[socket * 4 + 2] = 0;
325
+ imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
326
+ if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
327
+ imsic_max_hart_per_socket = s->soc[socket].num_harts;
328
+ }
329
+ }
330
+ imsic_name = g_strdup_printf("/soc/imsics@%lx",
331
+ (unsigned long)memmap[VIRT_IMSIC_S].base);
332
+ qemu_fdt_add_subnode(mc->fdt, imsic_name);
333
+ qemu_fdt_setprop_string(mc->fdt, imsic_name, "compatible",
334
+ "riscv,imsics");
335
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "#interrupt-cells",
336
+ FDT_IMSIC_INT_CELLS);
337
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupt-controller",
338
+ NULL, 0);
339
+ qemu_fdt_setprop(mc->fdt, imsic_name, "msi-controller",
340
+ NULL, 0);
341
+ qemu_fdt_setprop(mc->fdt, imsic_name, "interrupts-extended",
342
+ imsic_cells, mc->smp.cpus * sizeof(uint32_t) * 2);
343
+ qemu_fdt_setprop(mc->fdt, imsic_name, "reg", imsic_regs,
344
+ riscv_socket_count(mc) * sizeof(uint32_t) * 4);
345
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,num-ids",
346
+ VIRT_IRQCHIP_NUM_MSIS);
347
+ qemu_fdt_setprop_cells(mc->fdt, imsic_name, "riscv,ipi-id",
348
+ VIRT_IRQCHIP_IPI_MSI);
349
+ if (imsic_guest_bits) {
350
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,guest-index-bits",
351
+ imsic_guest_bits);
352
+ }
353
+ if (riscv_socket_count(mc) > 1) {
354
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,hart-index-bits",
355
+ imsic_num_bits(imsic_max_hart_per_socket));
356
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-bits",
357
+ imsic_num_bits(riscv_socket_count(mc)));
358
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "riscv,group-index-shift",
359
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
360
+ }
361
+ qemu_fdt_setprop_cell(mc->fdt, imsic_name, "phandle", *msi_s_phandle);
362
+ g_free(imsic_name);
363
+
364
+ g_free(imsic_regs);
365
+ g_free(imsic_cells);
366
+}
114
+}
367
+
115
+
368
+static void create_fdt_socket_aplic(RISCVVirtState *s,
116
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
369
+ const MemMapEntry *memmap, int socket,
117
+{
370
+ uint32_t msi_m_phandle,
118
+ const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
371
+ uint32_t msi_s_phandle,
119
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
372
+ uint32_t *phandle,
120
+ uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
373
+ uint32_t *intc_phandles,
374
+ uint32_t *aplic_phandles)
375
{
376
int cpu;
377
char *aplic_name;
378
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aia(RISCVVirtState *s,
379
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
380
"#interrupt-cells", FDT_APLIC_INT_CELLS);
381
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
382
- qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
383
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
384
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
385
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
386
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
387
+ } else {
388
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
389
+ msi_m_phandle);
390
+ }
391
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
392
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
393
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
394
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aia(RISCVVirtState *s,
395
qemu_fdt_setprop_cell(mc->fdt, aplic_name,
396
"#interrupt-cells", FDT_APLIC_INT_CELLS);
397
qemu_fdt_setprop(mc->fdt, aplic_name, "interrupt-controller", NULL, 0);
398
- qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
399
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
400
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
401
+ qemu_fdt_setprop(mc->fdt, aplic_name, "interrupts-extended",
402
+ aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
403
+ } else {
404
+ qemu_fdt_setprop_cell(mc->fdt, aplic_name, "msi-parent",
405
+ msi_s_phandle);
406
+ }
407
qemu_fdt_setprop_cells(mc->fdt, aplic_name, "reg",
408
0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
409
qemu_fdt_setprop_cell(mc->fdt, aplic_name, "riscv,num-sources",
410
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
411
bool is_32_bit, uint32_t *phandle,
412
uint32_t *irq_mmio_phandle,
413
uint32_t *irq_pcie_phandle,
414
- uint32_t *irq_virtio_phandle)
415
+ uint32_t *irq_virtio_phandle,
416
+ uint32_t *msi_pcie_phandle)
417
{
418
- int socket;
419
char *clust_name;
420
- uint32_t *intc_phandles;
421
+ int socket, phandle_pos;
422
MachineState *mc = MACHINE(s);
423
- uint32_t xplic_phandles[MAX_NODES];
424
+ uint32_t msi_m_phandle = 0, msi_s_phandle = 0;
425
+ uint32_t *intc_phandles, xplic_phandles[MAX_NODES];
426
427
qemu_fdt_add_subnode(mc->fdt, "/cpus");
428
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "timebase-frequency",
429
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
430
qemu_fdt_setprop_cell(mc->fdt, "/cpus", "#address-cells", 0x1);
431
qemu_fdt_add_subnode(mc->fdt, "/cpus/cpu-map");
432
433
+ intc_phandles = g_new0(uint32_t, mc->smp.cpus);
434
+
121
+
435
+ phandle_pos = mc->smp.cpus;
122
+ if (ctr_idx >= s->hpm_cntrs) {
436
for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
123
+ return;
437
+ phandle_pos -= s->soc[socket].num_harts;
438
+
439
clust_name = g_strdup_printf("/cpus/cpu-map/cluster%d", socket);
440
qemu_fdt_add_subnode(mc->fdt, clust_name);
441
442
- intc_phandles = g_new0(uint32_t, s->soc[socket].num_harts);
443
-
444
create_fdt_socket_cpus(s, socket, clust_name, phandle,
445
- is_32_bit, intc_phandles);
446
+ is_32_bit, &intc_phandles[phandle_pos]);
447
448
create_fdt_socket_memory(s, memmap, socket);
449
450
+ g_free(clust_name);
451
+
452
if (!kvm_enabled()) {
453
if (s->have_aclint) {
454
- create_fdt_socket_aclint(s, memmap, socket, intc_phandles);
455
+ create_fdt_socket_aclint(s, memmap, socket,
456
+ &intc_phandles[phandle_pos]);
457
} else {
458
- create_fdt_socket_clint(s, memmap, socket, intc_phandles);
459
+ create_fdt_socket_clint(s, memmap, socket,
460
+ &intc_phandles[phandle_pos]);
461
}
462
}
463
+ }
124
+ }
464
+
125
+
465
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
126
+ /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
466
+ create_fdt_imsic(s, memmap, phandle, intc_phandles,
127
+ if (get_field(ovf, BIT(ctr_idx + 1)) &&
467
+ &msi_m_phandle, &msi_s_phandle);
128
+ !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
468
+ *msi_pcie_phandle = msi_s_phandle;
129
+ /* +1 to offset CYCLE register OF bit. */
130
+ riscv_iommu_reg_mod32(
131
+ s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
469
+ }
132
+ }
470
+
133
+
471
+ phandle_pos = mc->smp.cpus;
134
+ if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
472
+ for (socket = (riscv_socket_count(mc) - 1); socket >= 0; socket--) {
135
+ /* Reset EventID (WARL) field to invalid. */
473
+ phandle_pos -= s->soc[socket].num_harts;
136
+ val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
474
137
+ RISCV_IOMMU_HPMEVENT_INVALID);
475
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
138
+ riscv_iommu_reg_set64(s, evt_reg, val);
476
create_fdt_socket_plic(s, memmap, socket, phandle,
139
+ }
477
- intc_phandles, xplic_phandles);
478
+ &intc_phandles[phandle_pos], xplic_phandles);
479
} else {
480
- create_fdt_socket_aia(s, memmap, socket, phandle,
481
- intc_phandles, xplic_phandles);
482
+ create_fdt_socket_aplic(s, memmap, socket,
483
+ msi_m_phandle, msi_s_phandle, phandle,
484
+ &intc_phandles[phandle_pos], xplic_phandles);
485
}
486
-
487
- g_free(intc_phandles);
488
- g_free(clust_name);
489
}
490
491
+ g_free(intc_phandles);
492
+
140
+
493
for (socket = 0; socket < riscv_socket_count(mc); socket++) {
141
+ update_event_map(s, val, ctr_idx);
494
if (socket == 0) {
495
*irq_mmio_phandle = xplic_phandles[socket];
496
@@ -XXX,XX +XXX,XX @@ static void create_fdt_virtio(RISCVVirtState *s, const MemMapEntry *memmap,
497
}
498
499
static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
500
- uint32_t irq_pcie_phandle)
501
+ uint32_t irq_pcie_phandle,
502
+ uint32_t msi_pcie_phandle)
503
{
504
char *name;
505
MachineState *mc = MACHINE(s);
506
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pcie(RISCVVirtState *s, const MemMapEntry *memmap,
507
qemu_fdt_setprop_cells(mc->fdt, name, "bus-range", 0,
508
memmap[VIRT_PCIE_ECAM].size / PCIE_MMCFG_SIZE_MIN - 1);
509
qemu_fdt_setprop(mc->fdt, name, "dma-coherent", NULL, 0);
510
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
511
+ qemu_fdt_setprop_cell(mc->fdt, name, "msi-parent", msi_pcie_phandle);
512
+ }
513
qemu_fdt_setprop_cells(mc->fdt, name, "reg", 0,
514
memmap[VIRT_PCIE_ECAM].base, 0, memmap[VIRT_PCIE_ECAM].size);
515
qemu_fdt_setprop_sized_cells(mc->fdt, name, "ranges",
516
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
517
uint64_t mem_size, const char *cmdline, bool is_32_bit)
518
{
519
MachineState *mc = MACHINE(s);
520
- uint32_t phandle = 1, irq_mmio_phandle = 1;
521
+ uint32_t phandle = 1, irq_mmio_phandle = 1, msi_pcie_phandle = 1;
522
uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1;
523
524
if (mc->dtb) {
525
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap,
526
qemu_fdt_setprop_cell(mc->fdt, "/soc", "#address-cells", 0x2);
527
528
create_fdt_sockets(s, memmap, is_32_bit, &phandle,
529
- &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle);
530
+ &irq_mmio_phandle, &irq_pcie_phandle, &irq_virtio_phandle,
531
+ &msi_pcie_phandle);
532
533
create_fdt_virtio(s, memmap, irq_virtio_phandle);
534
535
- create_fdt_pcie(s, memmap, irq_pcie_phandle);
536
+ create_fdt_pcie(s, memmap, irq_pcie_phandle, msi_pcie_phandle);
537
538
create_fdt_reset(s, memmap, &phandle);
539
540
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_plic(const MemMapEntry *memmap, int socket,
541
return ret;
542
}
543
544
-static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type,
545
+static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
546
const MemMapEntry *memmap, int socket,
547
int base_hartid, int hart_count)
548
{
549
+ int i;
550
+ hwaddr addr;
551
+ uint32_t guest_bits;
552
DeviceState *aplic_m;
553
+ bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
554
+
555
+ if (msimode) {
556
+ /* Per-socket M-level IMSICs */
557
+ addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
558
+ for (i = 0; i < hart_count; i++) {
559
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
560
+ base_hartid + i, true, 1,
561
+ VIRT_IRQCHIP_NUM_MSIS);
562
+ }
563
+
564
+ /* Per-socket S-level IMSICs */
565
+ guest_bits = imsic_num_bits(aia_guests + 1);
566
+ addr = memmap[VIRT_IMSIC_S].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
567
+ for (i = 0; i < hart_count; i++) {
568
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(guest_bits),
569
+ base_hartid + i, false, 1 + aia_guests,
570
+ VIRT_IRQCHIP_NUM_MSIS);
571
+ }
572
+ }
573
574
/* Per-socket M-level APLIC */
575
aplic_m = riscv_aplic_create(
576
memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
577
memmap[VIRT_APLIC_M].size,
578
- base_hartid, hart_count,
579
+ (msimode) ? 0 : base_hartid,
580
+ (msimode) ? 0 : hart_count,
581
VIRT_IRQCHIP_NUM_SOURCES,
582
VIRT_IRQCHIP_NUM_PRIO_BITS,
583
- false, true, NULL);
584
+ msimode, true, NULL);
585
586
if (aplic_m) {
587
/* Per-socket S-level APLIC */
588
riscv_aplic_create(
589
memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
590
memmap[VIRT_APLIC_S].size,
591
- base_hartid, hart_count,
592
+ (msimode) ? 0 : base_hartid,
593
+ (msimode) ? 0 : hart_count,
594
VIRT_IRQCHIP_NUM_SOURCES,
595
VIRT_IRQCHIP_NUM_PRIO_BITS,
596
- false, false, aplic_m);
597
+ msimode, false, aplic_m);
598
}
599
600
return aplic_m;
601
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
602
sysbus_realize(SYS_BUS_DEVICE(&s->soc[i]), &error_abort);
603
604
if (!kvm_enabled()) {
605
- /* Per-socket CLINT */
606
- riscv_aclint_swi_create(
607
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
608
- base_hartid, hart_count, false);
609
- riscv_aclint_mtimer_create(
610
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size +
611
- RISCV_ACLINT_SWI_SIZE,
612
- RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
613
- RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
614
- RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
615
-
616
- /* Per-socket ACLINT SSWI */
617
if (s->have_aclint) {
618
+ if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
619
+ /* Per-socket ACLINT MTIMER */
620
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
621
+ i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
622
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
623
+ base_hartid, hart_count,
624
+ RISCV_ACLINT_DEFAULT_MTIMECMP,
625
+ RISCV_ACLINT_DEFAULT_MTIME,
626
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
627
+ } else {
628
+ /* Per-socket ACLINT MSWI, MTIMER, and SSWI */
629
+ riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
630
+ i * memmap[VIRT_CLINT].size,
631
+ base_hartid, hart_count, false);
632
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
633
+ i * memmap[VIRT_CLINT].size +
634
+ RISCV_ACLINT_SWI_SIZE,
635
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
636
+ base_hartid, hart_count,
637
+ RISCV_ACLINT_DEFAULT_MTIMECMP,
638
+ RISCV_ACLINT_DEFAULT_MTIME,
639
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
640
+ riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
641
+ i * memmap[VIRT_ACLINT_SSWI].size,
642
+ base_hartid, hart_count, true);
643
+ }
644
+ } else {
645
+ /* Per-socket SiFive CLINT */
646
riscv_aclint_swi_create(
647
- memmap[VIRT_ACLINT_SSWI].base +
648
- i * memmap[VIRT_ACLINT_SSWI].size,
649
- base_hartid, hart_count, true);
650
+ memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
651
+ base_hartid, hart_count, false);
652
+ riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
653
+ i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
654
+ RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
655
+ RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
656
+ RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
657
}
658
}
659
660
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
661
s->irqchip[i] = virt_create_plic(memmap, i,
662
base_hartid, hart_count);
663
} else {
664
- s->irqchip[i] = virt_create_aia(s->aia_type, memmap, i,
665
- base_hartid, hart_count);
666
+ s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
667
+ memmap, i, base_hartid,
668
+ hart_count);
669
}
670
671
/* Try to use different IRQCHIP instance based device type */
672
@@ -XXX,XX +XXX,XX @@ static void virt_machine_instance_init(Object *obj)
673
{
674
}
675
676
+static char *virt_get_aia_guests(Object *obj, Error **errp)
677
+{
678
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
679
+ char val[32];
680
+
681
+ sprintf(val, "%d", s->aia_guests);
682
+ return g_strdup(val);
683
+}
142
+}
684
+
143
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
685
+static void virt_set_aia_guests(Object *obj, const char *val, Error **errp)
144
index XXXXXXX..XXXXXXX 100644
686
+{
145
--- a/hw/riscv/riscv-iommu.c
687
+ RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
146
+++ b/hw/riscv/riscv-iommu.c
688
+
147
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
689
+ s->aia_guests = atoi(val);
148
690
+ if (s->aia_guests < 0 || s->aia_guests > VIRT_IRQCHIP_MAX_GUESTS) {
149
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
691
+ error_setg(errp, "Invalid number of AIA IMSIC guests");
150
RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
692
+ error_append_hint(errp, "Valid values be between 0 and %d.\n",
151
- /* not yet implemented */
693
+ VIRT_IRQCHIP_MAX_GUESTS);
152
+ riscv_iommu_process_hpmevt_write(s, regb & ~7);
694
+ }
695
+}
696
+
697
static char *virt_get_aia(Object *obj, Error **errp)
698
{
699
RISCVVirtState *s = RISCV_VIRT_MACHINE(obj);
700
@@ -XXX,XX +XXX,XX @@ static char *virt_get_aia(Object *obj, Error **errp)
701
case VIRT_AIA_TYPE_APLIC:
702
val = "aplic";
703
break;
153
break;
704
+ case VIRT_AIA_TYPE_APLIC_IMSIC:
705
+ val = "aplic-imsic";
706
+ break;
707
default:
708
val = "none";
709
break;
710
@@ -XXX,XX +XXX,XX @@ static void virt_set_aia(Object *obj, const char *val, Error **errp)
711
s->aia_type = VIRT_AIA_TYPE_NONE;
712
} else if (!strcmp(val, "aplic")) {
713
s->aia_type = VIRT_AIA_TYPE_APLIC;
714
+ } else if (!strcmp(val, "aplic-imsic")) {
715
+ s->aia_type = VIRT_AIA_TYPE_APLIC_IMSIC;
716
} else {
717
error_setg(errp, "Invalid AIA interrupt controller type");
718
- error_append_hint(errp, "Valid values are none, and aplic.\n");
719
+ error_append_hint(errp, "Valid values are none, aplic, and "
720
+ "aplic-imsic.\n");
721
}
154
}
722
}
155
}
723
156
@@ -XXX,XX +XXX,XX @@ static const Property riscv_iommu_properties[] = {
724
@@ -XXX,XX +XXX,XX @@ static void virt_set_aclint(Object *obj, bool value, Error **errp)
157
DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
725
158
DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
726
static void virt_machine_class_init(ObjectClass *oc, void *data)
159
TYPE_MEMORY_REGION, MemoryRegion *),
727
{
160
+ DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
728
+ char str[128];
161
+ RISCV_IOMMU_IOCOUNT_NUM),
729
MachineClass *mc = MACHINE_CLASS(oc);
162
};
730
163
731
mc->desc = "RISC-V VirtIO board";
164
static void riscv_iommu_class_init(ObjectClass *klass, void* data)
732
@@ -XXX,XX +XXX,XX @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
733
object_class_property_set_description(oc, "aia",
734
"Set type of AIA interrupt "
735
"conttoller. Valid values are "
736
- "none, and aplic.");
737
+ "none, aplic, and aplic-imsic.");
738
+
739
+ object_class_property_add_str(oc, "aia-guests",
740
+ virt_get_aia_guests,
741
+ virt_set_aia_guests);
742
+ sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value "
743
+ "should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS);
744
+ object_class_property_set_description(oc, "aia-guests", str);
745
}
746
747
static const TypeInfo virt_machine_typeinfo = {
748
diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig
749
index XXXXXXX..XXXXXXX 100644
750
--- a/hw/riscv/Kconfig
751
+++ b/hw/riscv/Kconfig
752
@@ -XXX,XX +XXX,XX @@ config RISCV_VIRT
753
select SERIAL
754
select RISCV_ACLINT
755
select RISCV_APLIC
756
+ select RISCV_IMSIC
757
select SIFIVE_PLIC
758
select SIFIVE_TEST
759
select VIRTIO_MMIO
760
--
165
--
761
2.35.1
166
2.48.1
diff view generated by jsdifflib
New patch
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
2
3
Now that we have every piece in place we can advertise CAP_HTM to
4
software, allowing any HPM aware driver to make use of the counters.
5
6
HPM is enabled/disabled via the 'hpm-counters' attribute. Default value
7
is 31, max value is also 31. Setting it to zero will disable HPM
8
support.
9
10
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20250224190826.1858473-10-dbarboza@ventanamicro.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
hw/riscv/riscv-iommu.c | 21 +++++++++++++++++++++
17
1 file changed, 21 insertions(+)
18
19
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/riscv/riscv-iommu.c
22
+++ b/hw/riscv/riscv-iommu.c
23
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
24
RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4;
25
}
26
27
+ if (s->hpm_cntrs > 0) {
28
+ /* Clip number of HPM counters to maximum supported (31). */
29
+ if (s->hpm_cntrs > RISCV_IOMMU_IOCOUNT_NUM) {
30
+ s->hpm_cntrs = RISCV_IOMMU_IOCOUNT_NUM;
31
+ }
32
+ /* Enable hardware performance monitor interface */
33
+ s->cap |= RISCV_IOMMU_CAP_HPM;
34
+ }
35
+
36
/* Out-of-reset translation mode: OFF (DMA disabled) BARE (passthrough) */
37
s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, s->enable_off ?
38
RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE);
39
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
40
RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
41
}
42
43
+ /* If HPM registers are enabled. */
44
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
45
+ /* +1 for cycle counter bit. */
46
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOCOUNTINH],
47
+ ~((2 << s->hpm_cntrs) - 1));
48
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCYCLES], 0);
49
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCTR_BASE],
50
+ 0x00, s->hpm_cntrs * 8);
51
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMEVT_BASE],
52
+ 0x00, s->hpm_cntrs * 8);
53
+ }
54
+
55
/* Memory region for downstream access, if specified. */
56
if (s->target_mr) {
57
s->target_as = g_new0(AddressSpace, 1);
58
--
59
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Add a handful of trace events to allow for an easier time debugging the
4
HPM feature.
5
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <20250224190826.1858473-11-dbarboza@ventanamicro.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
hw/riscv/riscv-iommu-hpm.c | 10 ++++++++++
12
hw/riscv/trace-events | 5 +++++
13
2 files changed, 15 insertions(+)
14
15
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/riscv/riscv-iommu-hpm.c
18
+++ b/hw/riscv/riscv-iommu-hpm.c
19
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
20
const uint64_t ctr_prev = s->hpmcycle_prev;
21
const uint64_t ctr_val = s->hpmcycle_val;
22
23
+ trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
24
+
25
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
26
/*
27
* Counter should not increment if inhibit bit is set. We can't really
28
@@ -XXX,XX +XXX,XX @@ static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
29
cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
30
stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
31
32
+ trace_riscv_iommu_hpm_incr_ctr(cntr_val);
33
+
34
/* Handle the overflow scenario. */
35
if (cntr_val == UINT64_MAX) {
36
/*
37
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
38
return;
39
}
40
41
+ trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
42
+
43
if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
44
/*
45
* Cycle counter is enabled. Just start the timer again and update
46
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
47
const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
48
const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
49
50
+ trace_riscv_iommu_hpm_cycle_write(ovf, val);
51
+
52
/*
53
* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
54
*/
55
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
56
return;
57
}
58
59
+ trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
60
+
61
/* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
62
if (get_field(ovf, BIT(ctr_idx + 1)) &&
63
!get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
64
diff --git a/hw/riscv/trace-events b/hw/riscv/trace-events
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/riscv/trace-events
67
+++ b/hw/riscv/trace-events
68
@@ -XXX,XX +XXX,XX @@ riscv_iommu_sys_irq_sent(uint32_t vector) "IRQ sent to vector %u"
69
riscv_iommu_sys_msi_sent(uint32_t vector, uint64_t msi_addr, uint32_t msi_data, uint32_t result) "MSI sent to vector %u msi_addr 0x%"PRIx64" msi_data 0x%x result %u"
70
riscv_iommu_sys_reset_hold(int reset_type) "reset type %d"
71
riscv_iommu_pci_reset_hold(int reset_type) "reset type %d"
72
+riscv_iommu_hpm_read(uint64_t cycle, uint32_t inhibit, uint64_t ctr_prev, uint64_t ctr_val) "cycle 0x%"PRIx64" inhibit 0x%x ctr_prev 0x%"PRIx64" ctr_val 0x%"PRIx64
73
+riscv_iommu_hpm_incr_ctr(uint64_t cntr_val) "cntr_val 0x%"PRIx64
74
+riscv_iommu_hpm_iocntinh_cy(bool prev_cy_inh) "prev_cy_inh %d"
75
+riscv_iommu_hpm_cycle_write(uint32_t ovf, uint64_t val) "ovf 0x%x val 0x%"PRIx64
76
+riscv_iommu_hpm_evt_write(uint32_t ctr_idx, uint32_t ovf, uint64_t val) "ctr_idx 0x%x ovf 0x%x val 0x%"PRIx64
77
--
78
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-ID: <20250224190826.1858473-12-dbarboza@ventanamicro.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
8
docs/specs/riscv-iommu.rst | 2 ++
9
1 file changed, 2 insertions(+)
10
11
diff --git a/docs/specs/riscv-iommu.rst b/docs/specs/riscv-iommu.rst
12
index XXXXXXX..XXXXXXX 100644
13
--- a/docs/specs/riscv-iommu.rst
14
+++ b/docs/specs/riscv-iommu.rst
15
@@ -XXX,XX +XXX,XX @@ Several options are available to control the capabilities of the device, namely:
16
- "off" (Out-of-reset translation mode: 'on' for DMA disabled, 'off' for 'BARE' (passthrough))
17
- "s-stage": enable s-stage support
18
- "g-stage": enable g-stage support
19
+- "hpm-counters": number of hardware performance counters available. Maximum value is 31.
20
+ Default value is 31. Use 0 (zero) to disable HPM support
21
22
riscv-iommu-sys device
23
----------------------
24
--
25
2.48.1
diff view generated by jsdifflib
New patch
1
From: Quan Zhou <zhouquan@iscas.ac.cn>
1
2
3
When the Sscofpmf/Svade/Svadu/Smnpm/Ssnpm exts is available
4
expose it to the guest so that guest can use it.
5
6
Signed-off-by: Quan Zhou <zhouquan@iscas.ac.cn>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Message-ID: <303616ccad2b5309768157b50d93b3e89fecc9cb.1740371468.git.zhouquan@iscas.ac.cn>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/kvm/kvm-cpu.c | 5 +++++
12
1 file changed, 5 insertions(+)
13
14
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/kvm/kvm-cpu.c
17
+++ b/target/riscv/kvm/kvm-cpu.c
18
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
19
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
20
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
21
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
22
+ KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
23
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
24
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
25
+ KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
26
+ KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
27
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
28
+ KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
29
+ KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
30
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
31
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
32
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
33
--
34
2.48.1
diff view generated by jsdifflib
New patch
1
From: Andrea Bolognani <abologna@redhat.com>
1
2
3
This should make no difference from the functional point of
4
view and it's just preparation for upcoming changes.
5
6
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
9
Message-ID: <20250127182924.103510-2-abologna@redhat.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
scripts/qemu-binfmt-conf.sh | 17 ++++++++++-------
13
1 file changed, 10 insertions(+), 7 deletions(-)
14
15
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
16
index XXXXXXX..XXXXXXX 100755
17
--- a/scripts/qemu-binfmt-conf.sh
18
+++ b/scripts/qemu-binfmt-conf.sh
19
@@ -XXX,XX +XXX,XX @@ qemu_set_binfmts() {
20
mask=$(eval echo \$${cpu}_mask)
21
family=$(eval echo \$${cpu}_family)
22
23
+ target="$cpu"
24
+ if [ "$cpu" = "i486" ] ; then
25
+ target="i386"
26
+ fi
27
+
28
+ qemu="$QEMU_PATH/qemu-$target$QEMU_SUFFIX"
29
+
30
if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then
31
echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2
32
continue
33
fi
34
35
- qemu="$QEMU_PATH/qemu-$cpu"
36
- if [ "$cpu" = "i486" ] ; then
37
- qemu="$QEMU_PATH/qemu-i386"
38
+ if [ "$host_family" = "$family" ] ; then
39
+ continue
40
fi
41
42
- qemu="$qemu$QEMU_SUFFIX"
43
- if [ "$host_family" != "$family" ] ; then
44
- $BINFMT_SET
45
- fi
46
+ $BINFMT_SET
47
done
48
}
49
50
--
51
2.48.1
diff view generated by jsdifflib
New patch
1
From: Andrea Bolognani <abologna@redhat.com>
1
2
3
Right now information regarding the family each CPU type belongs
4
to is recorded in two places: the large data table at the top of
5
the script, and the qemu_host_family() function.
6
7
We can make things better by mapping host CPU architecture to
8
QEMU target in the few cases where the two don't already match
9
and then using the data table to look up the family, same as
10
we're already doing for the guest CPU architecture.
11
12
Being able to reason in terms of QEMU target regardless of
13
whether we're looking at the host or guest CPU architecture will
14
come in handy to implement upcoming changes.
15
16
A couple of entries are dropped in the process: BePC and Power
17
Macintosh. I'm quite certain neither of those have ever been
18
reported as CPU architectures by Linux. I believe many more of
19
the entries that are carried forward could be dropped as well,
20
but I don't have the same level of confidence there so I
21
decided to play it safe just in case.
22
23
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
26
Message-ID: <20250127182924.103510-3-abologna@redhat.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
---
29
scripts/qemu-binfmt-conf.sh | 44 +++++++++++++++++++++----------------
30
1 file changed, 25 insertions(+), 19 deletions(-)
31
32
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
33
index XXXXXXX..XXXXXXX 100755
34
--- a/scripts/qemu-binfmt-conf.sh
35
+++ b/scripts/qemu-binfmt-conf.sh
36
@@ -XXX,XX +XXX,XX @@ loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x
37
loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
38
loongarch64_family=loongarch
39
40
-qemu_get_family() {
41
- cpu=${HOST_ARCH:-$(uname -m)}
42
+# Converts the name of a host CPU architecture to the corresponding QEMU
43
+# target.
44
+#
45
+# FIXME: This can probably be simplified a lot by dropping most entries.
46
+# Remember that the script is only used on Linux, so we only need to
47
+# handle the strings Linux uses to report the host CPU architecture.
48
+qemu_normalize() {
49
+ cpu="$1"
50
case "$cpu" in
51
- amd64|i386|i486|i586|i686|i86pc|BePC|x86_64)
52
+ i[3-6]86)
53
echo "i386"
54
;;
55
- mips*)
56
- echo "mips"
57
+ amd64)
58
+ echo "x86_64"
59
;;
60
- "Power Macintosh"|ppc64|powerpc|ppc)
61
+ powerpc)
62
echo "ppc"
63
;;
64
- ppc64el|ppc64le)
65
- echo "ppcle"
66
+ ppc64el)
67
+ echo "ppc64le"
68
;;
69
- arm|armel|armhf|arm64|armv[4-9]*l|aarch64)
70
+ armel|armhf|armv[4-9]*l)
71
echo "arm"
72
;;
73
- armeb|armv[4-9]*b|aarch64_be)
74
+ armv[4-9]*b)
75
echo "armeb"
76
;;
77
- sparc*)
78
- echo "sparc"
79
- ;;
80
- riscv*)
81
- echo "riscv"
82
- ;;
83
- loongarch*)
84
- echo "loongarch"
85
+ arm64)
86
+ echo "aarch64"
87
;;
88
*)
89
echo "$cpu"
90
@@ -XXX,XX +XXX,XX @@ EOF
91
92
qemu_set_binfmts() {
93
# probe cpu type
94
- host_family=$(qemu_get_family)
95
+ host_cpu=$(qemu_normalize ${HOST_ARCH:-$(uname -m)})
96
+ host_family=$(eval echo \$${host_cpu}_family)
97
+
98
+ if [ "$host_family" = "" ] ; then
99
+ echo "INTERNAL ERROR: unknown host cpu $host_cpu" 1>&2
100
+ exit 1
101
+ fi
102
103
# register the interpreter for each cpu except for the native one
104
105
--
106
2.48.1
diff view generated by jsdifflib
New patch
1
From: Andrea Bolognani <abologna@redhat.com>
1
2
3
Until now, the script has worked under the assumption that a
4
host CPU can run binaries targeting any CPU in the same family.
5
That's a fair enough assumption when it comes to running i386
6
binaries on x86_64, but it doesn't quite apply in the general
7
case.
8
9
For example, while riscv64 CPUs could theoretically run riscv32
10
applications natively, in practice there exist few (if any?)
11
CPUs that implement the necessary silicon; moreover, even if you
12
had one such CPU, your host OS would most likely not have
13
enabled the necessary kernel bits.
14
15
This new option gives distro packagers the ability to opt out of
16
the assumption, likely on a per-architecture basis, and make
17
things work out of the box for a larger fraction of their user
18
base.
19
20
As an interesting side effect, this makes it possible to enable
21
execution of 64-bit binaries on 32-bit CPUs of the same family,
22
which is a perfectly valid use case that apparently hadn't been
23
considered until now.
24
25
Link: https://src.fedoraproject.org/rpms/qemu/pull-request/72
26
Thanks: David Abdurachmanov <davidlt@rivosinc.com>
27
Thanks: Daniel P. Berrangé <berrange@redhat.com>
28
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
29
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
30
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
31
Message-ID: <20250127182924.103510-4-abologna@redhat.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
---
34
scripts/qemu-binfmt-conf.sh | 19 ++++++++++++++++---
35
1 file changed, 16 insertions(+), 3 deletions(-)
36
37
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
38
index XXXXXXX..XXXXXXX 100755
39
--- a/scripts/qemu-binfmt-conf.sh
40
+++ b/scripts/qemu-binfmt-conf.sh
41
@@ -XXX,XX +XXX,XX @@ Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU]
42
--persistent: if yes, the interpreter is loaded when binfmt is
43
configured and remains in memory. All future uses
44
are cloned from the open file.
45
+ --ignore-family: if yes, it is assumed that the host CPU (e.g. riscv64)
46
+ can't natively run programs targeting a CPU that is
47
+ part of the same family (e.g. riscv32).
48
--preserve-argv0 preserve argv[0]
49
50
To import templates with update-binfmts, use :
51
@@ -XXX,XX +XXX,XX @@ qemu_set_binfmts() {
52
fi
53
54
if [ "$host_family" = "$family" ] ; then
55
- continue
56
+ # When --ignore-family is used, we have to generate rules even
57
+ # for targets that are in the same family as the host CPU. The
58
+ # only exception is of course when the CPU types exactly match
59
+ if [ "$target" = "$host_cpu" ] || [ "$IGNORE_FAMILY" = "no" ] ; then
60
+ continue
61
+ fi
62
fi
63
64
$BINFMT_SET
65
@@ -XXX,XX +XXX,XX @@ CREDENTIAL=no
66
PERSISTENT=no
67
PRESERVE_ARG0=no
68
QEMU_SUFFIX=""
69
+IGNORE_FAMILY=no
70
71
_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
72
-persistent:,preserve-argv0:"
73
-options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
74
+persistent:,preserve-argv0:,ignore-family:"
75
+options=$(getopt -o ds:Q:S:e:hc:p:g:F:i: -l ${_longopts} -- "$@")
76
eval set -- "$options"
77
78
while true ; do
79
@@ -XXX,XX +XXX,XX @@ while true ; do
80
shift
81
PRESERVE_ARG0="$1"
82
;;
83
+ -i|--ignore-family)
84
+ shift
85
+ IGNORE_FAMILY="$1"
86
+ ;;
87
*)
88
break
89
;;
90
--
91
2.48.1
92
93
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
When the IMSIC is emulated in the kernel, the GPIO output lines to CPUs
4
and aia_ireg_rmw_fn setting can be remove. In this case the IMSIC
5
trigger CPU interrupts by KVM APIs, and the RMW of IREG is handled in
6
kernel.
7
8
This patch also move the code that claim the CPU interrupts to the
9
beginning of IMSIC realization. This can avoid the unnecessary resource
10
allocation before checking failed.
11
12
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250224025722.3999-2-yongxuan.wang@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
hw/intc/riscv_imsic.c | 47 ++++++++++++++++++++++++-------------------
18
1 file changed, 26 insertions(+), 21 deletions(-)
19
20
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/riscv_imsic.c
23
+++ b/hw/intc/riscv_imsic.c
24
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
25
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
26
CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
27
28
+ /* Claim the CPU interrupt to be triggered by this IMSIC */
29
+ if (riscv_cpu_claim_interrupts(rcpu,
30
+ (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
31
+ error_setg(errp, "%s already claimed",
32
+ (imsic->mmode) ? "MEIP" : "SEIP");
33
+ return;
34
+ }
35
+
36
if (!kvm_irqchip_in_kernel()) {
37
+ /* Create output IRQ lines */
38
+ imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
39
+ qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
40
+
41
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
42
imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
43
imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
44
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
45
IMSIC_MMIO_SIZE(imsic->num_pages));
46
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
47
48
- /* Claim the CPU interrupt to be triggered by this IMSIC */
49
- if (riscv_cpu_claim_interrupts(rcpu,
50
- (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
51
- error_setg(errp, "%s already claimed",
52
- (imsic->mmode) ? "MEIP" : "SEIP");
53
- return;
54
- }
55
-
56
- /* Create output IRQ lines */
57
- imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
58
- qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
59
-
60
/* Force select AIA feature and setup CSR read-modify-write callback */
61
if (env) {
62
if (!imsic->mmode) {
63
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
64
} else {
65
rcpu->cfg.ext_smaia = true;
66
}
67
- riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
68
- riscv_imsic_rmw, imsic);
69
+
70
+ if (!kvm_irqchip_in_kernel()) {
71
+ riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
72
+ riscv_imsic_rmw, imsic);
73
+ }
74
}
75
76
msi_nonbroken = true;
77
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
78
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
79
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
80
81
- for (i = 0; i < num_pages; i++) {
82
- if (!i) {
83
- qdev_connect_gpio_out_named(dev, NULL, i,
84
- qdev_get_gpio_in(DEVICE(cpu),
85
+ if (!kvm_irqchip_in_kernel()) {
86
+ for (i = 0; i < num_pages; i++) {
87
+ if (!i) {
88
+ qdev_connect_gpio_out_named(dev, NULL, i,
89
+ qdev_get_gpio_in(DEVICE(cpu),
90
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
91
- } else {
92
- qdev_connect_gpio_out_named(dev, NULL, i,
93
- qdev_get_gpio_in(DEVICE(cpu),
94
+ } else {
95
+ qdev_connect_gpio_out_named(dev, NULL, i,
96
+ qdev_get_gpio_in(DEVICE(cpu),
97
IRQ_LOCAL_MAX + i - 1));
98
+ }
99
}
100
}
101
102
--
103
2.48.1
diff view generated by jsdifflib
1
From: Anup Patel <anup.patel@wdc.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
We have two new machine options "aia" and "aia-guests" available
3
When the APLIC is emulated in the kernel, the GPIO output lines to CPUs
4
for the RISC-V virt machine so let's document these options.
4
can be remove. In this case the APLIC trigger CPU interrupts by KVM APIs.
5
5
6
Signed-off-by: Anup Patel <anup.patel@wdc.com>
6
This patch also move the code that claim the CPU interrupts to the
7
Signed-off-by: Anup Patel <anup@brainfault.org>
7
beginning of APLIC realization. This can avoid the unnecessary resource
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
allocation before checking failed.
9
Reviewed-by: Frank Chang <frank.chang@sifive.com>
9
10
Message-Id: <20220220085526.808674-5-anup@brainfault.org>
10
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-ID: <20250224025722.3999-3-yongxuan.wang@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
14
---
13
docs/system/riscv/virt.rst | 16 ++++++++++++++++
15
hw/intc/riscv_aplic.c | 49 +++++++++++++++++++++++--------------------
14
1 file changed, 16 insertions(+)
16
1 file changed, 26 insertions(+), 23 deletions(-)
15
17
16
diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst
18
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/docs/system/riscv/virt.rst
20
--- a/hw/intc/riscv_aplic.c
19
+++ b/docs/system/riscv/virt.rst
21
+++ b/hw/intc/riscv_aplic.c
20
@@ -XXX,XX +XXX,XX @@ The following machine-specific options are supported:
22
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
21
When this option is "on", ACLINT devices will be emulated instead of
23
RISCVAPLICState *aplic = RISCV_APLIC(dev);
22
SiFive CLINT. When not specified, this option is assumed to be "off".
24
23
25
if (riscv_use_emulated_aplic(aplic->msimode)) {
24
+- aia=[none|aplic|aplic-imsic]
26
+ /* Create output IRQ lines for non-MSI mode */
27
+ if (!aplic->msimode) {
28
+ /* Claim the CPU interrupt to be triggered by this APLIC */
29
+ for (i = 0; i < aplic->num_harts; i++) {
30
+ RISCVCPU *cpu;
25
+
31
+
26
+ This option allows selecting interrupt controller defined by the AIA
32
+ cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
27
+ (advanced interrupt architecture) specification. The "aia=aplic" selects
33
+ if (riscv_cpu_claim_interrupts(cpu,
28
+ APLIC (advanced platform level interrupt controller) to handle wired
34
+ (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
29
+ interrupts whereas the "aia=aplic-imsic" selects APLIC and IMSIC (incoming
35
+ error_report("%s already claimed",
30
+ message signaled interrupt controller) to handle both wired interrupts and
36
+ (aplic->mmode) ? "MEIP" : "SEIP");
31
+ MSIs. When not specified, this option is assumed to be "none" which selects
37
+ exit(1);
32
+ SiFive PLIC to handle wired interrupts.
38
+ }
39
+ }
33
+
40
+
34
+- aia-guests=nnn
41
+ aplic->external_irqs = g_malloc(sizeof(qemu_irq) *
42
+ aplic->num_harts);
43
+ qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
44
+ }
35
+
45
+
36
+ The number of per-HART VS-level AIA IMSIC pages to be emulated for a guest
46
aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
37
+ having AIA IMSIC (i.e. "aia=aplic-imsic" selected). When not specified,
47
aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
38
+ the default number of per-HART VS-level AIA IMSIC pages is 0.
48
aplic->state = g_new0(uint32_t, aplic->num_irqs);
39
+
49
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
40
Running Linux kernel
50
}
41
--------------------
51
}
52
53
- /* Create output IRQ lines for non-MSI mode */
54
- if (!aplic->msimode) {
55
- aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts);
56
- qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
57
-
58
- /* Claim the CPU interrupt to be triggered by this APLIC */
59
- for (i = 0; i < aplic->num_harts; i++) {
60
- RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
61
- if (riscv_cpu_claim_interrupts(cpu,
62
- (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
63
- error_report("%s already claimed",
64
- (aplic->mmode) ? "MEIP" : "SEIP");
65
- exit(1);
66
- }
67
- }
68
- }
69
-
70
msi_nonbroken = true;
71
}
72
73
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
74
75
if (riscv_use_emulated_aplic(msimode)) {
76
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
77
- }
78
79
- if (!msimode) {
80
- for (i = 0; i < num_harts; i++) {
81
- CPUState *cpu = cpu_by_arch_id(hartid_base + i);
82
+ if (!msimode) {
83
+ for (i = 0; i < num_harts; i++) {
84
+ CPUState *cpu = cpu_by_arch_id(hartid_base + i);
85
86
- qdev_connect_gpio_out_named(dev, NULL, i,
87
- qdev_get_gpio_in(DEVICE(cpu),
88
+ qdev_connect_gpio_out_named(dev, NULL, i,
89
+ qdev_get_gpio_in(DEVICE(cpu),
90
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
91
+ }
92
}
93
}
42
94
43
--
95
--
44
2.35.1
96
2.48.1
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
Let kvm_msicfgaddr use the same format with mmsicfgaddr and smsicfgaddr.
4
5
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Message-ID: <20250224025722.3999-4-yongxuan.wang@sifive.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
hw/intc/riscv_aplic.c | 24 +++++++++++++-----------
11
1 file changed, 13 insertions(+), 11 deletions(-)
12
13
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/riscv_aplic.c
16
+++ b/hw/intc/riscv_aplic.c
17
@@ -XXX,XX +XXX,XX @@ void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr)
18
{
19
#ifdef CONFIG_KVM
20
if (riscv_use_emulated_aplic(aplic->msimode)) {
21
+ addr >>= APLIC_xMSICFGADDR_PPN_SHIFT;
22
aplic->kvm_msicfgaddr = extract64(addr, 0, 32);
23
- aplic->kvm_msicfgaddrH = extract64(addr, 32, 32);
24
+ aplic->kvm_msicfgaddrH = extract64(addr, 32, 32) &
25
+ APLIC_xMSICFGADDRH_VALID_MASK;
26
}
27
#endif
28
}
29
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
30
}
31
}
32
33
- if (aplic->mmode) {
34
- msicfgaddr = aplic_m->mmsicfgaddr;
35
- msicfgaddrH = aplic_m->mmsicfgaddrH;
36
+ if (aplic->kvm_splitmode) {
37
+ msicfgaddr = aplic->kvm_msicfgaddr;
38
+ msicfgaddrH = ((uint64_t)aplic->kvm_msicfgaddrH << 32);
39
} else {
40
- msicfgaddr = aplic_m->smsicfgaddr;
41
- msicfgaddrH = aplic_m->smsicfgaddrH;
42
+ if (aplic->mmode) {
43
+ msicfgaddr = aplic_m->mmsicfgaddr;
44
+ msicfgaddrH = aplic_m->mmsicfgaddrH;
45
+ } else {
46
+ msicfgaddr = aplic_m->smsicfgaddr;
47
+ msicfgaddrH = aplic_m->smsicfgaddrH;
48
+ }
49
}
50
51
lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) &
52
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
53
addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs));
54
addr <<= APLIC_xMSICFGADDR_PPN_SHIFT;
55
56
- if (aplic->kvm_splitmode) {
57
- addr |= aplic->kvm_msicfgaddr;
58
- addr |= ((uint64_t)aplic->kvm_msicfgaddrH << 32);
59
- }
60
-
61
address_space_stl_le(&address_space_memory, addr,
62
eiid, MEMTXATTRS_UNSPECIFIED, &result);
63
if (result != MEMTX_OK) {
64
--
65
2.48.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Remove the !kvm_enabled() check in kvm_riscv_reset_vcpu() since the
4
function is already being gated by kvm_enabled() in
5
riscv_cpu_reset_hold().
6
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250224123120.1644186-2-dbarboza@ventanamicro.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/kvm/kvm-cpu.c | 3 ---
14
1 file changed, 3 deletions(-)
15
16
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/kvm/kvm-cpu.c
19
+++ b/target/riscv/kvm/kvm-cpu.c
20
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
21
CPURISCVState *env = &cpu->env;
22
int i;
23
24
- if (!kvm_enabled()) {
25
- return;
26
- }
27
for (i = 0; i < 32; i++) {
28
env->gpr[i] = 0;
29
}
30
--
31
2.48.1
diff view generated by jsdifflib
1
From: Wilfred Mallawa <wilfred.mallawa@wdc.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
This patch updates the SPI_DEVICE, SPI_HOST0, SPI_HOST1
3
We're setting reset vals for KVM csrs during kvm_riscv_reset_vcpu(), but
4
base addresses. Also adds these as unimplemented devices.
4
in no particular order and missing some of them (like env->mstatus).
5
5
6
The address references can be found [1].
6
Create a helper to do that, unclogging reset_vcpu(), and initialize
7
env->mstatus as well. Keep the regs in the same order they appear in
8
struct kvm_riscv_csr from the KVM UAPI, similar to what
9
kvm_riscv_(get|put)_regs_csr are doing. This will make a bit easier to
10
add new KVM CSRs and to verify which values we're writing back to KVM
11
during vcpu reset.
7
12
8
[1] https://github.com/lowRISC/opentitan/blob/6c317992fbd646818b34f2a2dbf44bc850e461e4/hw/top_earlgrey/sw/autogen/top_earlgrey_memory.h#L107
13
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
14
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
15
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
16
Message-ID: <20250224123120.1644186-3-dbarboza@ventanamicro.com>
13
Message-Id: <20220218063839.405082-1-alistair.francis@opensource.wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
18
---
16
include/hw/riscv/opentitan.h | 4 +++-
19
target/riscv/kvm/kvm-cpu.c | 23 +++++++++++++++--------
17
hw/riscv/opentitan.c | 12 +++++++++---
20
1 file changed, 15 insertions(+), 8 deletions(-)
18
2 files changed, 12 insertions(+), 4 deletions(-)
19
21
20
diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h
22
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
21
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/riscv/opentitan.h
24
--- a/target/riscv/kvm/kvm-cpu.c
23
+++ b/include/hw/riscv/opentitan.h
25
+++ b/target/riscv/kvm/kvm-cpu.c
24
@@ -XXX,XX +XXX,XX @@ enum {
26
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_put_regs_core(CPUState *cs)
25
IBEX_DEV_FLASH,
27
return ret;
26
IBEX_DEV_FLASH_VIRTUAL,
28
}
27
IBEX_DEV_UART,
29
28
+ IBEX_DEV_SPI_DEVICE,
30
+static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
29
+ IBEX_DEV_SPI_HOST0,
31
+{
30
+ IBEX_DEV_SPI_HOST1,
32
+ env->mstatus = 0;
31
IBEX_DEV_GPIO,
33
+ env->mie = 0;
32
- IBEX_DEV_SPI,
34
+ env->stvec = 0;
33
IBEX_DEV_I2C,
35
+ env->sscratch = 0;
34
IBEX_DEV_PATTGEN,
36
+ env->sepc = 0;
35
IBEX_DEV_TIMER,
37
+ env->scause = 0;
36
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
38
+ env->stval = 0;
37
index XXXXXXX..XXXXXXX 100644
39
+ env->mip = 0;
38
--- a/hw/riscv/opentitan.c
40
+ env->satp = 0;
39
+++ b/hw/riscv/opentitan.c
41
+}
40
@@ -XXX,XX +XXX,XX @@ static const MemMapEntry ibex_memmap[] = {
42
+
41
[IBEX_DEV_FLASH] = { 0x20000000, 0x80000 },
43
static int kvm_riscv_get_regs_csr(CPUState *cs)
42
[IBEX_DEV_UART] = { 0x40000000, 0x1000 },
44
{
43
[IBEX_DEV_GPIO] = { 0x40040000, 0x1000 },
45
CPURISCVState *env = &RISCV_CPU(cs)->env;
44
- [IBEX_DEV_SPI] = { 0x40050000, 0x1000 },
46
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
45
+ [IBEX_DEV_SPI_DEVICE] = { 0x40050000, 0x1000 },
47
env->pc = cpu->env.kernel_addr;
46
[IBEX_DEV_I2C] = { 0x40080000, 0x1000 },
48
env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
47
[IBEX_DEV_PATTGEN] = { 0x400e0000, 0x1000 },
49
env->gpr[11] = cpu->env.fdt_addr; /* a1 */
48
[IBEX_DEV_TIMER] = { 0x40100000, 0x1000 },
50
- env->satp = 0;
49
[IBEX_DEV_SENSOR_CTRL] = { 0x40110000, 0x1000 },
51
- env->mie = 0;
50
[IBEX_DEV_OTP_CTRL] = { 0x40130000, 0x4000 },
52
- env->stvec = 0;
51
[IBEX_DEV_USBDEV] = { 0x40150000, 0x1000 },
53
- env->sscratch = 0;
52
+ [IBEX_DEV_SPI_HOST0] = { 0x40300000, 0x1000 },
54
- env->sepc = 0;
53
+ [IBEX_DEV_SPI_HOST1] = { 0x40310000, 0x1000 },
55
- env->scause = 0;
54
[IBEX_DEV_PWRMGR] = { 0x40400000, 0x1000 },
56
- env->stval = 0;
55
[IBEX_DEV_RSTMGR] = { 0x40410000, 0x1000 },
57
- env->mip = 0;
56
[IBEX_DEV_CLKMGR] = { 0x40420000, 0x1000 },
58
+
57
@@ -XXX,XX +XXX,XX @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
59
+ kvm_riscv_reset_regs_csr(env);
58
60
}
59
create_unimplemented_device("riscv.lowrisc.ibex.gpio",
61
60
memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size);
62
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
61
- create_unimplemented_device("riscv.lowrisc.ibex.spi",
62
- memmap[IBEX_DEV_SPI].base, memmap[IBEX_DEV_SPI].size);
63
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_device",
64
+ memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size);
65
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_host0",
66
+ memmap[IBEX_DEV_SPI_HOST0].base, memmap[IBEX_DEV_SPI_HOST0].size);
67
+ create_unimplemented_device("riscv.lowrisc.ibex.spi_host1",
68
+ memmap[IBEX_DEV_SPI_HOST1].base, memmap[IBEX_DEV_SPI_HOST1].size);
69
create_unimplemented_device("riscv.lowrisc.ibex.i2c",
70
memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size);
71
create_unimplemented_device("riscv.lowrisc.ibex.pattgen",
72
--
63
--
73
2.35.1
64
2.48.1
diff view generated by jsdifflib
1
From: Philipp Tomsich <philipp.tomsich@vrull.eu>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
While changing to the use of cfg_ptr, the conditions for REQUIRE_ZB[ABCS]
3
We're missing scounteren and senvcfg CSRs, both already present in the
4
inadvertently became inverted and slipped through the initial testing (which
4
KVM UAPI.
5
used RV64GC_XVentanaCondOps as a target).
6
This fixes the regression.
7
5
8
Tested against SPEC2017 w/ GCC 12 (prerelease) for RV64GC_zba_zbb_zbc_zbs.
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
7
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Fixes: f2a32bec8f0da99 ("target/riscv: access cfg structure through DisasContext")
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Philipp Tomsich <philipp.tomsich@vrull.eu>
9
Message-ID: <20250224123120.1644186-4-dbarboza@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-Id: <20220203153946.2676353-1-philipp.tomsich@vrull.eu>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
11
---
16
target/riscv/insn_trans/trans_rvb.c.inc | 8 ++++----
12
target/riscv/kvm/kvm-cpu.c | 6 ++++++
17
1 file changed, 4 insertions(+), 4 deletions(-)
13
1 file changed, 6 insertions(+)
18
14
19
diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc
15
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/insn_trans/trans_rvb.c.inc
17
--- a/target/riscv/kvm/kvm-cpu.c
22
+++ b/target/riscv/insn_trans/trans_rvb.c.inc
18
+++ b/target/riscv/kvm/kvm-cpu.c
23
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
24
*/
20
env->stval = 0;
25
21
env->mip = 0;
26
#define REQUIRE_ZBA(ctx) do { \
22
env->satp = 0;
27
- if (ctx->cfg_ptr->ext_zba) { \
23
+ env->scounteren = 0;
28
+ if (!ctx->cfg_ptr->ext_zba) { \
24
+ env->senvcfg = 0;
29
return false; \
25
}
30
} \
26
31
} while (0)
27
static int kvm_riscv_get_regs_csr(CPUState *cs)
32
28
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
33
#define REQUIRE_ZBB(ctx) do { \
29
KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
34
- if (ctx->cfg_ptr->ext_zbb) { \
30
KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
35
+ if (!ctx->cfg_ptr->ext_zbb) { \
31
KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
36
return false; \
32
+ KVM_RISCV_GET_CSR(cs, env, scounteren, env->scounteren);
37
} \
33
+ KVM_RISCV_GET_CSR(cs, env, senvcfg, env->senvcfg);
38
} while (0)
34
39
35
return 0;
40
#define REQUIRE_ZBC(ctx) do { \
36
}
41
- if (ctx->cfg_ptr->ext_zbc) { \
37
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_put_regs_csr(CPUState *cs)
42
+ if (!ctx->cfg_ptr->ext_zbc) { \
38
KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
43
return false; \
39
KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
44
} \
40
KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
45
} while (0)
41
+ KVM_RISCV_SET_CSR(cs, env, scounteren, env->scounteren);
46
42
+ KVM_RISCV_SET_CSR(cs, env, senvcfg, env->senvcfg);
47
#define REQUIRE_ZBS(ctx) do { \
43
48
- if (ctx->cfg_ptr->ext_zbs) { \
44
return 0;
49
+ if (!ctx->cfg_ptr->ext_zbs) { \
45
}
50
return false; \
51
} \
52
} while (0)
53
--
46
--
54
2.35.1
47
2.48.1
diff view generated by jsdifflib