1
target-arm queue. This has the "plumb txattrs through various
1
The following changes since commit 64ada298b98a51eb2512607f6e6180cb330c47b1:
2
bits of exec.c" patches, and a collection of bug fixes from
3
various people.
4
2
5
thanks
3
Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220302' into staging (2022-03-02 12:38:46 +0000)
6
-- PMM
7
8
9
10
The following changes since commit a3ac12fba028df90f7b3dbec924995c126c41022:
11
12
Merge remote-tracking branch 'remotes/ehabkost/tags/numa-next-pull-request' into staging (2018-05-31 11:12:36 +0100)
13
4
14
are available in the Git repository at:
5
are available in the Git repository at:
15
6
16
git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180531
7
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220302
17
8
18
for you to fetch changes up to 49d1dca0520ea71bc21867fab6647f474fcf857b:
9
for you to fetch changes up to 268c11984e67867c22f53beb3c7f8b98900d66b2:
19
10
20
KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice (2018-05-31 14:52:53 +0100)
11
ui/cocoa.m: Remove unnecessary NSAutoreleasePools (2022-03-02 19:27:37 +0000)
21
12
22
----------------------------------------------------------------
13
----------------------------------------------------------------
23
target-arm queue:
14
target-arm queue:
24
* target/arm: Honour FPCR.FZ in FRECPX
15
* mps3-an547: Add missing user ahb interfaces
25
* MAINTAINERS: Add entries for newer MPS2 boards and devices
16
* hw/arm/mps2-tz.c: Update AN547 documentation URL
26
* hw/intc/arm_gicv3: Fix APxR<n> register dispatching
17
* hw/input/tsc210x: Don't abort on bad SPI word widths
27
* arm_gicv3_kvm: fix bug in writing zero bits back to the in-kernel
18
* hw/i2c: flatten pca954x mux device
28
GIC state
19
* target/arm: Support PSCI 1.1 and SMCCC 1.0
29
* tcg: Fix helper function vs host abi for float16
20
* target/arm: Fix early free of TCG temp in handle_simd_shift_fpint_conv()
30
* arm: fix qemu crash on startup with -bios option
21
* tests/qtest: add qtests for npcm7xx sdhci
31
* arm: fix malloc type mismatch
22
* Implement FEAT_LVA
32
* xlnx-zdma: Correct mem leaks and memset to zero on desc unaligned errors
23
* Implement FEAT_LPA
33
* Correct CPACR reset value for v7 cores
24
* Implement FEAT_LPA2 (but do not enable it yet)
34
* memory.h: Improve IOMMU related documentation
25
* Report KVM's actual PSCI version to guest in dtb
35
* exec: Plumb transaction attributes through various functions in
26
* ui/cocoa.m: Fix updateUIInfo threading issues
36
preparation for allowing IOMMUs to see them
27
* ui/cocoa.m: Remove unnecessary NSAutoreleasePools
37
* vmstate.h: Provide VMSTATE_BOOL_SUB_ARRAY
38
* ARM: ACPI: Fix use-after-free due to memory realloc
39
* KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice
40
28
41
----------------------------------------------------------------
29
----------------------------------------------------------------
42
Francisco Iglesias (1):
30
Akihiko Odaki (1):
43
xlnx-zdma: Correct mem leaks and memset to zero on desc unaligned errors
31
target/arm: Support PSCI 1.1 and SMCCC 1.0
44
32
45
Igor Mammedov (1):
33
Jimmy Brisson (1):
46
arm: fix qemu crash on startup with -bios option
34
mps3-an547: Add missing user ahb interfaces
47
35
48
Jan Kiszka (1):
36
Patrick Venture (1):
49
hw/intc/arm_gicv3: Fix APxR<n> register dispatching
37
hw/i2c: flatten pca954x mux device
50
38
51
Paolo Bonzini (1):
39
Peter Maydell (5):
52
arm: fix malloc type mismatch
40
hw/arm/mps2-tz.c: Update AN547 documentation URL
41
hw/input/tsc210x: Don't abort on bad SPI word widths
42
target/arm: Report KVM's actual PSCI version to guest in dtb
43
ui/cocoa.m: Fix updateUIInfo threading issues
44
ui/cocoa.m: Remove unnecessary NSAutoreleasePools
53
45
54
Peter Maydell (17):
46
Richard Henderson (16):
55
target/arm: Honour FPCR.FZ in FRECPX
47
hw/registerfields: Add FIELD_SEX<N> and FIELD_SDP<N>
56
MAINTAINERS: Add entries for newer MPS2 boards and devices
48
target/arm: Set TCR_EL1.TSZ for user-only
57
Correct CPACR reset value for v7 cores
49
target/arm: Fault on invalid TCR_ELx.TxSZ
58
memory.h: Improve IOMMU related documentation
50
target/arm: Move arm_pamax out of line
59
Make tb_invalidate_phys_addr() take a MemTxAttrs argument
51
target/arm: Pass outputsize down to check_s2_mmu_setup
60
Make address_space_translate{, _cached}() take a MemTxAttrs argument
52
target/arm: Use MAKE_64BIT_MASK to compute indexmask
61
Make address_space_map() take a MemTxAttrs argument
53
target/arm: Honor TCR_ELx.{I}PS
62
Make address_space_access_valid() take a MemTxAttrs argument
54
target/arm: Prepare DBGBVR and DBGWVR for FEAT_LVA
63
Make flatview_extend_translation() take a MemTxAttrs argument
55
target/arm: Implement FEAT_LVA
64
Make memory_region_access_valid() take a MemTxAttrs argument
56
target/arm: Implement FEAT_LPA
65
Make MemoryRegion valid.accepts callback take a MemTxAttrs argument
57
target/arm: Extend arm_fi_to_lfsc to level -1
66
Make flatview_access_valid() take a MemTxAttrs argument
58
target/arm: Introduce tlbi_aa64_get_range
67
Make flatview_translate() take a MemTxAttrs argument
59
target/arm: Fix TLBIRange.base for 16k and 64k pages
68
Make address_space_get_iotlb_entry() take a MemTxAttrs argument
60
target/arm: Validate tlbi TG matches translation granule in use
69
Make flatview_do_translate() take a MemTxAttrs argument
61
target/arm: Advertise all page sizes for -cpu max
70
Make address_space_translate_iommu take a MemTxAttrs argument
62
target/arm: Implement FEAT_LPA2
71
vmstate.h: Provide VMSTATE_BOOL_SUB_ARRAY
72
63
73
Richard Henderson (1):
64
Shengtan Mao (1):
74
tcg: Fix helper function vs host abi for float16
65
tests/qtest: add qtests for npcm7xx sdhci
75
66
76
Shannon Zhao (3):
67
Wentao_Liang (1):
77
arm_gicv3_kvm: increase clroffset accordingly
68
target/arm: Fix early free of TCG temp in handle_simd_shift_fpint_conv()
78
ARM: ACPI: Fix use-after-free due to memory realloc
79
KVM: GIC: Fix memory leak due to calling kvm_init_irq_routing twice
80
69
81
include/exec/exec-all.h | 5 +-
70
docs/system/arm/emulation.rst | 3 +
82
include/exec/helper-head.h | 2 +-
71
include/hw/registerfields.h | 48 +++++-
83
include/exec/memory-internal.h | 3 +-
72
target/arm/cpu-param.h | 4 +-
84
include/exec/memory.h | 128 +++++++++++++++++++++++++++++++++++------
73
target/arm/cpu.h | 27 ++++
85
include/migration/vmstate.h | 3 +
74
target/arm/internals.h | 58 ++++---
86
include/sysemu/dma.h | 6 +-
75
target/arm/kvm-consts.h | 14 +-
87
accel/tcg/translate-all.c | 4 +-
76
hw/arm/boot.c | 11 +-
88
exec.c | 95 ++++++++++++++++++------------
77
hw/arm/mps2-tz.c | 6 +-
89
hw/arm/boot.c | 18 +++---
78
hw/i2c/i2c_mux_pca954x.c | 77 ++-------
90
hw/arm/virt-acpi-build.c | 20 +++++--
79
hw/input/tsc210x.c | 8 +-
91
hw/dma/xlnx-zdma.c | 10 +++-
80
target/arm/cpu.c | 8 +-
92
hw/hppa/dino.c | 3 +-
81
target/arm/cpu64.c | 7 +-
93
hw/intc/arm_gic_kvm.c | 1 -
82
target/arm/helper.c | 332 ++++++++++++++++++++++++++++++---------
94
hw/intc/arm_gicv3_cpuif.c | 12 ++--
83
target/arm/hvf/hvf.c | 27 +++-
95
hw/intc/arm_gicv3_kvm.c | 2 +-
84
target/arm/kvm64.c | 14 +-
96
hw/nvram/fw_cfg.c | 12 ++--
85
target/arm/psci.c | 35 ++++-
97
hw/s390x/s390-pci-inst.c | 3 +-
86
target/arm/translate-a64.c | 2 +-
98
hw/scsi/esp.c | 3 +-
87
tests/qtest/npcm7xx_sdhci-test.c | 215 +++++++++++++++++++++++++
99
hw/vfio/common.c | 3 +-
88
tests/qtest/meson.build | 1 +
100
hw/virtio/vhost.c | 3 +-
89
ui/cocoa.m | 31 ++--
101
hw/xen/xen_pt_msi.c | 3 +-
90
20 files changed, 736 insertions(+), 192 deletions(-)
102
memory.c | 12 ++--
91
create mode 100644 tests/qtest/npcm7xx_sdhci-test.c
103
memory_ldst.inc.c | 18 +++---
104
target/arm/gdbstub.c | 3 +-
105
target/arm/helper-a64.c | 41 +++++++------
106
target/arm/helper.c | 90 ++++++++++++++++-------------
107
target/ppc/mmu-hash64.c | 3 +-
108
target/riscv/helper.c | 2 +-
109
target/s390x/diag.c | 6 +-
110
target/s390x/excp_helper.c | 3 +-
111
target/s390x/mmu_helper.c | 3 +-
112
target/s390x/sigp.c | 3 +-
113
target/xtensa/op_helper.c | 3 +-
114
MAINTAINERS | 9 ++-
115
34 files changed, 353 insertions(+), 182 deletions(-)
116
diff view generated by jsdifflib
1
Provide a VMSTATE_BOOL_SUB_ARRAY to go with VMSTATE_UINT8_SUB_ARRAY
1
From: Jimmy Brisson <jimmy.brisson@linaro.org>
2
and friends.
3
2
3
With these interfaces missing, TFM would delegate peripherals 0, 1,
4
2, 3 and 8, and qemu would ignore the delegation of interface 8, as
5
it thought interface 4 was eth & USB.
6
7
This patch corrects this behavior and allows TFM to delegate the
8
eth & USB peripheral to NS mode.
9
10
(The old QEMU behaviour was based on revision B of the AN547
11
appnote; revision C corrects this error in the documentation,
12
and this commit brings QEMU in to line with how the FPGA
13
image really behaves.)
14
15
Signed-off-by: Jimmy Brisson <jimmy.brisson@linaro.org>
16
Message-id: 20220210210227.3203883-1-jimmy.brisson@linaro.org
17
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
[PMM: added commit message note clarifying that the old behaviour
19
was a docs issue, not because there were two different versions
20
of the FPGA image]
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Message-id: 20180521140402.23318-23-peter.maydell@linaro.org
7
---
22
---
8
include/migration/vmstate.h | 3 +++
23
hw/arm/mps2-tz.c | 4 ++++
9
1 file changed, 3 insertions(+)
24
1 file changed, 4 insertions(+)
10
25
11
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
26
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
12
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
13
--- a/include/migration/vmstate.h
28
--- a/hw/arm/mps2-tz.c
14
+++ b/include/migration/vmstate.h
29
+++ b/hw/arm/mps2-tz.c
15
@@ -XXX,XX +XXX,XX @@ extern const VMStateInfo vmstate_info_qtailq;
30
@@ -XXX,XX +XXX,XX @@ static void mps2tz_common_init(MachineState *machine)
16
#define VMSTATE_BOOL_ARRAY(_f, _s, _n) \
31
{ "gpio1", make_unimp_dev, &mms->gpio[1], 0x41101000, 0x1000 },
17
VMSTATE_BOOL_ARRAY_V(_f, _s, _n, 0)
32
{ "gpio2", make_unimp_dev, &mms->gpio[2], 0x41102000, 0x1000 },
18
33
{ "gpio3", make_unimp_dev, &mms->gpio[3], 0x41103000, 0x1000 },
19
+#define VMSTATE_BOOL_SUB_ARRAY(_f, _s, _start, _num) \
34
+ { /* port 4 USER AHB interface 0 */ },
20
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_bool, bool)
35
+ { /* port 5 USER AHB interface 1 */ },
21
+
36
+ { /* port 6 USER AHB interface 2 */ },
22
#define VMSTATE_UINT16_ARRAY_V(_f, _s, _n, _v) \
37
+ { /* port 7 USER AHB interface 3 */ },
23
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_uint16, uint16_t)
38
{ "eth-usb", make_eth_usb, NULL, 0x41400000, 0x200000, { 49 } },
24
39
},
40
},
25
--
41
--
26
2.17.1
42
2.25.1
27
28
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
The AN547 application note URL has changed: update our comment
2
add MemTxAttrs as an argument to flatview_do_translate().
2
accordingly. (Rev B is still downloadable from the old URL,
3
but there is a new Rev C of the document now.)
3
4
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Message-id: 20180521140402.23318-13-peter.maydell@linaro.org
8
Message-id: 20220221094144.426191-1-peter.maydell@linaro.org
8
---
9
---
9
exec.c | 9 ++++++---
10
hw/arm/mps2-tz.c | 2 +-
10
1 file changed, 6 insertions(+), 3 deletions(-)
11
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
12
diff --git a/exec.c b/exec.c
13
diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/exec.c
15
--- a/hw/arm/mps2-tz.c
15
+++ b/exec.c
16
+++ b/hw/arm/mps2-tz.c
16
@@ -XXX,XX +XXX,XX @@ unassigned:
17
@@ -XXX,XX +XXX,XX @@
17
* @is_write: whether the translation operation is for write
18
* Application Note AN524:
18
* @is_mmio: whether this can be MMIO, set true if it can
19
* https://developer.arm.com/documentation/dai0524/latest/
19
* @target_as: the address space targeted by the IOMMU
20
* Application Note AN547:
20
+ * @attrs: memory transaction attributes
21
- * https://developer.arm.com/-/media/Arm%20Developer%20Community/PDF/DAI0547B_SSE300_PLUS_U55_FPGA_for_mps3.pdf
22
+ * https://developer.arm.com/documentation/dai0547/latest/
21
*
23
*
22
* This function is called from RCU critical section
24
* The AN505 defers to the Cortex-M33 processor ARMv8M IoT Kit FVP User Guide
23
*/
25
* (ARM ECM0601256) for the details of some of the device layout:
24
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
25
hwaddr *page_mask_out,
26
bool is_write,
27
bool is_mmio,
28
- AddressSpace **target_as)
29
+ AddressSpace **target_as,
30
+ MemTxAttrs attrs)
31
{
32
MemoryRegionSection *section;
33
IOMMUMemoryRegion *iommu_mr;
34
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
35
* but page mask.
36
*/
37
section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
38
- NULL, &page_mask, is_write, false, &as);
39
+ NULL, &page_mask, is_write, false, &as,
40
+ attrs);
41
42
/* Illegal translation */
43
if (section.mr == &io_mem_unassigned) {
44
@@ -XXX,XX +XXX,XX @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
45
46
/* This can be MMIO, so setup MMIO bit. */
47
section = flatview_do_translate(fv, addr, xlat, plen, NULL,
48
- is_write, true, &as);
49
+ is_write, true, &as, attrs);
50
mr = section.mr;
51
52
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
53
--
26
--
54
2.17.1
27
2.25.1
55
28
56
29
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
The tsc210x doesn't support anything other than 16-bit reads on the
2
add MemTxAttrs as an argument to address_space_get_iotlb_entry().
2
SPI bus, but the guest can program the SPI controller to attempt
3
them anyway. If this happens, don't abort QEMU, just log this as
4
a guest error.
3
5
6
This fixes our machine_arm_n8x0.py:N8x0Machine.test_n800
7
acceptance test, which hits this assertion.
8
9
The reason we hit the assertion is because the guest kernel thinks
10
there is a TSC2005 on this SPI bus address, not a TSC210x. (The n810
11
*does* have a TSC2005 at this address.) The TSC2005 supports the
12
24-bit accesses which the guest driver makes, and the TSC210x does
13
not (that is, our TSC210x emulation is not missing support for a word
14
width the hardware can handle). It's not clear whether the problem
15
here is that the guest kernel incorrectly thinks the n800 has the
16
same device at this SPI bus address as the n810, or that QEMU's n810
17
board model doesn't get the SPI devices right. At this late date
18
there no longer appears to be any reliable information on the web
19
about the hardware behaviour, but I am inclined to think this is a
20
guest kernel bug. In any case, we prefer not to abort QEMU for
21
guest-triggerable conditions, so logging the error is the right thing
22
to do.
23
24
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/736
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
26
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
27
Message-id: 20220221140750.514557-1-peter.maydell@linaro.org
7
Message-id: 20180521140402.23318-12-peter.maydell@linaro.org
8
---
28
---
9
include/exec/memory.h | 2 +-
29
hw/input/tsc210x.c | 8 ++++++--
10
exec.c | 2 +-
30
1 file changed, 6 insertions(+), 2 deletions(-)
11
hw/virtio/vhost.c | 3 ++-
12
3 files changed, 4 insertions(+), 3 deletions(-)
13
31
14
diff --git a/include/exec/memory.h b/include/exec/memory.h
32
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
15
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memory.h
34
--- a/hw/input/tsc210x.c
17
+++ b/include/exec/memory.h
35
+++ b/hw/input/tsc210x.c
18
@@ -XXX,XX +XXX,XX @@ void address_space_cache_destroy(MemoryRegionCache *cache);
36
@@ -XXX,XX +XXX,XX @@
19
* entry. Should be called from an RCU critical section.
37
#include "hw/hw.h"
20
*/
38
#include "audio/audio.h"
21
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
39
#include "qemu/timer.h"
22
- bool is_write);
40
+#include "qemu/log.h"
23
+ bool is_write, MemTxAttrs attrs);
41
#include "sysemu/reset.h"
24
42
#include "ui/console.h"
25
/* address_space_translate: translate an address range into an address space
43
#include "hw/arm/omap.h" /* For I2SCodec */
26
* into a MemoryRegion and an address range into that section. Should be
44
@@ -XXX,XX +XXX,XX @@ uint32_t tsc210x_txrx(void *opaque, uint32_t value, int len)
27
diff --git a/exec.c b/exec.c
45
TSC210xState *s = opaque;
28
index XXXXXXX..XXXXXXX 100644
46
uint32_t ret = 0;
29
--- a/exec.c
47
30
+++ b/exec.c
48
- if (len != 16)
31
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
49
- hw_error("%s: FIXME: bad SPI word width %i\n", __func__, len);
32
50
+ if (len != 16) {
33
/* Called from RCU critical section */
51
+ qemu_log_mask(LOG_GUEST_ERROR,
34
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
52
+ "%s: bad SPI word width %i\n", __func__, len);
35
- bool is_write)
53
+ return 0;
36
+ bool is_write, MemTxAttrs attrs)
54
+ }
37
{
55
38
MemoryRegionSection section;
56
/* TODO: sequential reads etc - how do we make sure the host doesn't
39
hwaddr xlat, page_mask;
57
* unintentionally read out a conversion result from a register while
40
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/hw/virtio/vhost.c
43
+++ b/hw/virtio/vhost.c
44
@@ -XXX,XX +XXX,XX @@ int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
45
trace_vhost_iotlb_miss(dev, 1);
46
47
iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
48
- iova, write);
49
+ iova, write,
50
+ MEMTXATTRS_UNSPECIFIED);
51
if (iotlb.target_as != NULL) {
52
ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
53
&uaddr, &len);
54
--
58
--
55
2.17.1
59
2.25.1
56
60
57
61
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Patrick Venture <venture@google.com>
2
add MemTxAttrs as an argument to tb_invalidate_phys_addr().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
Previously this device created N subdevices which each owned an i2c bus.
4
Now this device simply owns the N i2c busses directly.
5
6
Tested: Verified devices behind mux are still accessible via qmp and i2c
7
from within an arm32 SoC.
8
9
Reviewed-by: Hao Wu <wuhaotsh@google.com>
10
Signed-off-by: Patrick Venture <venture@google.com>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 20220202164533.1283668-1-venture@google.com
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-id: 20180521140402.23318-3-peter.maydell@linaro.org
10
---
15
---
11
include/exec/exec-all.h | 5 +++--
16
hw/i2c/i2c_mux_pca954x.c | 77 +++++++---------------------------------
12
accel/tcg/translate-all.c | 2 +-
17
1 file changed, 13 insertions(+), 64 deletions(-)
13
exec.c | 2 +-
14
target/xtensa/op_helper.c | 3 ++-
15
4 files changed, 7 insertions(+), 5 deletions(-)
16
18
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
19
diff --git a/hw/i2c/i2c_mux_pca954x.c b/hw/i2c/i2c_mux_pca954x.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
21
--- a/hw/i2c/i2c_mux_pca954x.c
20
+++ b/include/exec/exec-all.h
22
+++ b/hw/i2c/i2c_mux_pca954x.c
21
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
23
@@ -XXX,XX +XXX,XX @@
22
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
24
#define PCA9548_CHANNEL_COUNT 8
23
hwaddr paddr, int prot,
25
#define PCA9546_CHANNEL_COUNT 4
24
int mmu_idx, target_ulong size);
26
25
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
27
-/*
26
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
28
- * struct Pca954xChannel - The i2c mux device will have N of these states
27
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
29
- * that own the i2c channel bus.
28
uintptr_t retaddr);
30
- * @bus: The owned channel bus.
29
#else
31
- * @enabled: Is this channel active?
30
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
32
- */
31
uint16_t idxmap)
33
-typedef struct Pca954xChannel {
32
{
34
- SysBusDevice parent;
33
}
35
-
34
-static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
36
- I2CBus *bus;
35
+static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr,
37
-
36
+ MemTxAttrs attrs)
38
- bool enabled;
37
{
39
-} Pca954xChannel;
38
}
40
-
39
#endif
41
-#define TYPE_PCA954X_CHANNEL "pca954x-channel"
40
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
-#define PCA954X_CHANNEL(obj) \
41
index XXXXXXX..XXXXXXX 100644
43
- OBJECT_CHECK(Pca954xChannel, (obj), TYPE_PCA954X_CHANNEL)
42
--- a/accel/tcg/translate-all.c
44
-
43
+++ b/accel/tcg/translate-all.c
45
/*
44
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
46
* struct Pca954xState - The pca954x state object.
45
}
47
* @control: The value written to the mux control.
46
48
@@ -XXX,XX +XXX,XX @@ typedef struct Pca954xState {
47
#if !defined(CONFIG_USER_ONLY)
49
48
-void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
50
uint8_t control;
49
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
51
50
{
52
- /* The channel i2c buses. */
51
ram_addr_t ram_addr;
53
- Pca954xChannel channel[PCA9548_CHANNEL_COUNT];
52
MemoryRegion *mr;
54
+ bool enabled[PCA9548_CHANNEL_COUNT];
53
diff --git a/exec.c b/exec.c
55
+ I2CBus *bus[PCA9548_CHANNEL_COUNT];
54
index XXXXXXX..XXXXXXX 100644
56
} Pca954xState;
55
--- a/exec.c
57
56
+++ b/exec.c
58
/*
57
@@ -XXX,XX +XXX,XX @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
59
@@ -XXX,XX +XXX,XX @@ static bool pca954x_match(I2CSlave *candidate, uint8_t address,
58
if (phys != -1) {
60
}
59
/* Locks grabbed by tb_invalidate_phys_addr */
61
60
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
62
for (i = 0; i < mc->nchans; i++) {
61
- phys | (pc & ~TARGET_PAGE_MASK));
63
- if (!mux->channel[i].enabled) {
62
+ phys | (pc & ~TARGET_PAGE_MASK), attrs);
64
+ if (!mux->enabled[i]) {
65
continue;
66
}
67
68
- if (i2c_scan_bus(mux->channel[i].bus, address, broadcast,
69
+ if (i2c_scan_bus(mux->bus[i], address, broadcast,
70
current_devs)) {
71
if (!broadcast) {
72
return true;
73
@@ -XXX,XX +XXX,XX @@ static void pca954x_enable_channel(Pca954xState *s, uint8_t enable_mask)
74
*/
75
for (i = 0; i < mc->nchans; i++) {
76
if (enable_mask & (1 << i)) {
77
- s->channel[i].enabled = true;
78
+ s->enabled[i] = true;
79
} else {
80
- s->channel[i].enabled = false;
81
+ s->enabled[i] = false;
82
}
63
}
83
}
64
}
84
}
65
#endif
85
@@ -XXX,XX +XXX,XX @@ I2CBus *pca954x_i2c_get_bus(I2CSlave *mux, uint8_t channel)
66
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
86
Pca954xState *pca954x = PCA954X(mux);
67
index XXXXXXX..XXXXXXX 100644
87
68
--- a/target/xtensa/op_helper.c
88
g_assert(channel < pc->nchans);
69
+++ b/target/xtensa/op_helper.c
89
- return I2C_BUS(qdev_get_child_bus(DEVICE(&pca954x->channel[channel]),
70
@@ -XXX,XX +XXX,XX @@ static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
90
- "i2c-bus"));
71
int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
91
-}
72
&paddr, &page_size, &access);
92
-
73
if (ret == 0) {
93
-static void pca954x_channel_init(Object *obj)
74
- tb_invalidate_phys_addr(&address_space_memory, paddr);
94
-{
75
+ tb_invalidate_phys_addr(&address_space_memory, paddr,
95
- Pca954xChannel *s = PCA954X_CHANNEL(obj);
76
+ MEMTXATTRS_UNSPECIFIED);
96
- s->bus = i2c_init_bus(DEVICE(s), "i2c-bus");
97
-
98
- /* Start all channels as disabled. */
99
- s->enabled = false;
100
-}
101
-
102
-static void pca954x_channel_class_init(ObjectClass *klass, void *data)
103
-{
104
- DeviceClass *dc = DEVICE_CLASS(klass);
105
- dc->desc = "Pca954x Channel";
106
+ return pca954x->bus[channel];
107
}
108
109
static void pca9546_class_init(ObjectClass *klass, void *data)
110
@@ -XXX,XX +XXX,XX @@ static void pca9548_class_init(ObjectClass *klass, void *data)
111
s->nchans = PCA9548_CHANNEL_COUNT;
112
}
113
114
-static void pca954x_realize(DeviceState *dev, Error **errp)
115
-{
116
- Pca954xState *s = PCA954X(dev);
117
- Pca954xClass *c = PCA954X_GET_CLASS(s);
118
- int i;
119
-
120
- /* SMBus modules. Cannot fail. */
121
- for (i = 0; i < c->nchans; i++) {
122
- sysbus_realize(SYS_BUS_DEVICE(&s->channel[i]), &error_abort);
123
- }
124
-}
125
-
126
static void pca954x_init(Object *obj)
127
{
128
Pca954xState *s = PCA954X(obj);
129
Pca954xClass *c = PCA954X_GET_CLASS(obj);
130
int i;
131
132
- /* Only initialize the children we expect. */
133
+ /* SMBus modules. Cannot fail. */
134
for (i = 0; i < c->nchans; i++) {
135
- object_initialize_child(obj, "channel[*]", &s->channel[i],
136
- TYPE_PCA954X_CHANNEL);
137
+ g_autofree gchar *bus_name = g_strdup_printf("i2c.%d", i);
138
+
139
+ /* start all channels as disabled. */
140
+ s->enabled[i] = false;
141
+ s->bus[i] = i2c_init_bus(DEVICE(s), bus_name);
77
}
142
}
78
}
143
}
79
144
145
@@ -XXX,XX +XXX,XX @@ static void pca954x_class_init(ObjectClass *klass, void *data)
146
rc->phases.enter = pca954x_enter_reset;
147
148
dc->desc = "Pca954x i2c-mux";
149
- dc->realize = pca954x_realize;
150
151
k->write_data = pca954x_write_data;
152
k->receive_byte = pca954x_read_byte;
153
@@ -XXX,XX +XXX,XX @@ static const TypeInfo pca954x_info[] = {
154
.parent = TYPE_PCA954X,
155
.class_init = pca9548_class_init,
156
},
157
- {
158
- .name = TYPE_PCA954X_CHANNEL,
159
- .parent = TYPE_SYS_BUS_DEVICE,
160
- .class_init = pca954x_channel_class_init,
161
- .instance_size = sizeof(Pca954xChannel),
162
- .instance_init = pca954x_channel_init,
163
- }
164
};
165
166
DEFINE_TYPES(pca954x_info)
80
--
167
--
81
2.17.1
168
2.25.1
82
169
83
170
diff view generated by jsdifflib
1
From: Igor Mammedov <imammedo@redhat.com>
1
From: Akihiko Odaki <akihiko.odaki@gmail.com>
2
2
3
When QEMU is started with following CLI
3
Support the latest PSCI on TCG and HVF. A 64-bit function called from
4
-machine virt,gic-version=3,accel=kvm -cpu host -bios AAVMF_CODE.fd
4
AArch32 now returns NOT_SUPPORTED, which is necessary to adhere to SMC
5
it crashes with abort at
5
Calling Convention 1.0. It is still not compliant with SMCCC 1.3 since
6
accel/kvm/kvm-all.c:2164:
6
they do not implement mandatory functions.
7
KVM_SET_DEVICE_ATTR failed: Group 6 attr 0x000000000000c665: Invalid argument
8
7
9
Which is caused by implicit dependency of kvm_arm_gicv3_reset() on
8
Signed-off-by: Akihiko Odaki <akihiko.odaki@gmail.com>
10
arm_gicv3_icc_reset() where the later is called by CPU reset
9
Message-id: 20220213035753.34577-1-akihiko.odaki@gmail.com
11
reset callback.
12
13
However commit:
14
3b77f6c arm/boot: split load_dtb() from arm_load_kernel()
15
broke CPU reset callback registration in case
16
17
arm_load_kernel()
18
...
19
if (!info->kernel_filename || info->firmware_loaded)
20
21
branch is taken, i.e. it's sufficient to provide a firmware
22
or do not provide kernel on CLI to skip cpu reset callback
23
registration, where before offending commit the callback
24
has been registered unconditionally.
25
26
Fix it by registering the callback right at the beginning of
27
arm_load_kernel() unconditionally instead of doing it at the end.
28
29
NOTE:
30
we probably should eliminate that dependency anyways as well as
31
separate arch CPU reset parts from arm_load_kernel() into CPU
32
itself, but that refactoring that I probably would have to do
33
anyways later for CPU hotplug to work.
34
35
Reported-by: Auger Eric <eric.auger@redhat.com>
36
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
37
Reviewed-by: Eric Auger <eric.auger@redhat.com>
38
Tested-by: Eric Auger <eric.auger@redhat.com>
39
Message-id: 1527070950-208350-1-git-send-email-imammedo@redhat.com
40
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
[PMM: update MISMATCH_CHECK checks on PSCI_VERSION macros to match]
41
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
42
---
13
---
43
hw/arm/boot.c | 18 +++++++++---------
14
target/arm/kvm-consts.h | 13 +++++++++----
44
1 file changed, 9 insertions(+), 9 deletions(-)
15
hw/arm/boot.c | 12 +++++++++---
16
target/arm/cpu.c | 5 +++--
17
target/arm/hvf/hvf.c | 27 ++++++++++++++++++++++++++-
18
target/arm/kvm64.c | 2 +-
19
target/arm/psci.c | 35 ++++++++++++++++++++++++++++++++---
20
6 files changed, 80 insertions(+), 14 deletions(-)
45
21
22
diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/arm/kvm-consts.h
25
+++ b/target/arm/kvm-consts.h
26
@@ -XXX,XX +XXX,XX @@ MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE);
27
#define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4)
28
#define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5)
29
30
+#define QEMU_PSCI_1_0_FN_PSCI_FEATURES QEMU_PSCI_0_2_FN(10)
31
+
32
MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND);
33
MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF);
34
MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON);
35
@@ -XXX,XX +XXX,XX @@ MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE);
36
MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND);
37
MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON);
38
MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE);
39
+MISMATCH_CHECK(QEMU_PSCI_1_0_FN_PSCI_FEATURES, PSCI_1_0_FN_PSCI_FEATURES);
40
41
/* PSCI v0.2 return values used by TCG emulation of PSCI */
42
43
/* No Trusted OS migration to worry about when offlining CPUs */
44
#define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2
45
46
-/* We implement version 0.2 only */
47
-#define QEMU_PSCI_0_2_RET_VERSION_0_2 2
48
+#define QEMU_PSCI_VERSION_0_1 0x00001
49
+#define QEMU_PSCI_VERSION_0_2 0x00002
50
+#define QEMU_PSCI_VERSION_1_1 0x10001
51
52
MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP);
53
-MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2,
54
- (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2)));
55
+/* We don't bother to check every possible version value */
56
+MISMATCH_CHECK(QEMU_PSCI_VERSION_0_2, PSCI_VERSION(0, 2));
57
+MISMATCH_CHECK(QEMU_PSCI_VERSION_1_1, PSCI_VERSION(1, 1));
58
59
/* PSCI return values (inclusive of all PSCI versions) */
60
#define QEMU_PSCI_RET_SUCCESS 0
46
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
61
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
47
index XXXXXXX..XXXXXXX 100644
62
index XXXXXXX..XXXXXXX 100644
48
--- a/hw/arm/boot.c
63
--- a/hw/arm/boot.c
49
+++ b/hw/arm/boot.c
64
+++ b/hw/arm/boot.c
50
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
65
@@ -XXX,XX +XXX,XX @@ static void fdt_add_psci_node(void *fdt)
51
static const ARMInsnFixup *primary_loader;
66
}
52
AddressSpace *as = arm_boot_address_space(cpu, info);
67
53
68
qemu_fdt_add_subnode(fdt, "/psci");
54
+ /* CPU objects (unlike devices) are not automatically reset on system
69
- if (armcpu->psci_version == 2) {
55
+ * reset, so we must always register a handler to do so. If we're
70
- const char comp[] = "arm,psci-0.2\0arm,psci";
56
+ * actually loading a kernel, the handler is also responsible for
71
- qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
57
+ * arranging that we start it correctly.
72
+ if (armcpu->psci_version == QEMU_PSCI_VERSION_0_2 ||
58
+ */
73
+ armcpu->psci_version == QEMU_PSCI_VERSION_1_1) {
59
+ for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
74
+ if (armcpu->psci_version == QEMU_PSCI_VERSION_0_2) {
60
+ qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
75
+ const char comp[] = "arm,psci-0.2\0arm,psci";
61
+ }
76
+ qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
62
+
77
+ } else {
63
/* The board code is not supposed to set secure_board_setup unless
78
+ const char comp[] = "arm,psci-1.0\0arm,psci-0.2\0arm,psci";
64
* running its code in secure mode is actually possible, and KVM
79
+ qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
65
* doesn't support secure.
80
+ }
66
@@ -XXX,XX +XXX,XX @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
81
67
ARM_CPU(cs)->env.boot_info = info;
82
cpu_off_fn = QEMU_PSCI_0_2_FN_CPU_OFF;
68
}
83
if (arm_feature(&armcpu->env, ARM_FEATURE_AARCH64)) {
69
84
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
70
- /* CPU objects (unlike devices) are not automatically reset on system
85
index XXXXXXX..XXXXXXX 100644
71
- * reset, so we must always register a handler to do so. If we're
86
--- a/target/arm/cpu.c
72
- * actually loading a kernel, the handler is also responsible for
87
+++ b/target/arm/cpu.c
73
- * arranging that we start it correctly.
88
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj)
74
- */
89
* picky DTB consumer will also provide a helpful error message.
75
- for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
90
*/
76
- qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
91
cpu->dtb_compatible = "qemu,unknown";
77
- }
92
- cpu->psci_version = 1; /* By default assume PSCI v0.1 */
78
-
93
+ cpu->psci_version = QEMU_PSCI_VERSION_0_1; /* By default assume PSCI v0.1 */
79
if (!info->skip_dtb_autoload && have_dtb(info)) {
94
cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
80
if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
95
81
exit(1);
96
if (tcg_enabled() || hvf_enabled()) {
97
- cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */
98
+ /* TCG and HVF implement PSCI 1.1 */
99
+ cpu->psci_version = QEMU_PSCI_VERSION_1_1;
100
}
101
}
102
103
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/arm/hvf/hvf.c
106
+++ b/target/arm/hvf/hvf.c
107
@@ -XXX,XX +XXX,XX @@ static bool hvf_handle_psci_call(CPUState *cpu)
108
109
switch (param[0]) {
110
case QEMU_PSCI_0_2_FN_PSCI_VERSION:
111
- ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
112
+ ret = QEMU_PSCI_VERSION_1_1;
113
break;
114
case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
115
ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
116
@@ -XXX,XX +XXX,XX @@ static bool hvf_handle_psci_call(CPUState *cpu)
117
case QEMU_PSCI_0_2_FN_MIGRATE:
118
ret = QEMU_PSCI_RET_NOT_SUPPORTED;
119
break;
120
+ case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
121
+ switch (param[1]) {
122
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
123
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
124
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
125
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
126
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
127
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
128
+ case QEMU_PSCI_0_1_FN_CPU_ON:
129
+ case QEMU_PSCI_0_2_FN_CPU_ON:
130
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
131
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
132
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
133
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
134
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
135
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
136
+ case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
137
+ ret = 0;
138
+ break;
139
+ case QEMU_PSCI_0_1_FN_MIGRATE:
140
+ case QEMU_PSCI_0_2_FN_MIGRATE:
141
+ default:
142
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
143
+ }
144
+ break;
145
default:
146
return false;
147
}
148
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
149
index XXXXXXX..XXXXXXX 100644
150
--- a/target/arm/kvm64.c
151
+++ b/target/arm/kvm64.c
152
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
153
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
154
}
155
if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
156
- cpu->psci_version = 2;
157
+ cpu->psci_version = QEMU_PSCI_VERSION_0_2;
158
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
159
}
160
if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
161
diff --git a/target/arm/psci.c b/target/arm/psci.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/target/arm/psci.c
164
+++ b/target/arm/psci.c
165
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
166
{
167
/*
168
* This function partially implements the logic for dispatching Power State
169
- * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b),
170
+ * Coordination Interface (PSCI) calls (as described in ARM DEN 0022D.b),
171
* to the extent required for bringing up and taking down secondary cores,
172
* and for handling reset and poweroff requests.
173
* Additional information about the calling convention used is available in
174
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
175
}
176
177
if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) {
178
- ret = QEMU_PSCI_RET_INVALID_PARAMS;
179
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
180
goto err;
181
}
182
183
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
184
ARMCPU *target_cpu;
185
186
case QEMU_PSCI_0_2_FN_PSCI_VERSION:
187
- ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
188
+ ret = QEMU_PSCI_VERSION_1_1;
189
break;
190
case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
191
ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
192
@@ -XXX,XX +XXX,XX @@ void arm_handle_psci_call(ARMCPU *cpu)
193
}
194
helper_wfi(env, 4);
195
break;
196
+ case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
197
+ switch (param[1]) {
198
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
199
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
200
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
201
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
202
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
203
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
204
+ case QEMU_PSCI_0_1_FN_CPU_ON:
205
+ case QEMU_PSCI_0_2_FN_CPU_ON:
206
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
207
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
208
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
209
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
210
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
211
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
212
+ case QEMU_PSCI_1_0_FN_PSCI_FEATURES:
213
+ if (!(param[1] & QEMU_PSCI_0_2_64BIT) || is_a64(env)) {
214
+ ret = 0;
215
+ break;
216
+ }
217
+ /* fallthrough */
218
+ case QEMU_PSCI_0_1_FN_MIGRATE:
219
+ case QEMU_PSCI_0_2_FN_MIGRATE:
220
+ default:
221
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
222
+ break;
223
+ }
224
+ break;
225
case QEMU_PSCI_0_1_FN_MIGRATE:
226
case QEMU_PSCI_0_2_FN_MIGRATE:
227
default:
82
--
228
--
83
2.17.1
229
2.25.1
84
85
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Wentao_Liang <Wentao_Liang_g@163.com>
2
add MemTxAttrs as an argument to address_space_translate_iommu().
3
2
3
handle_simd_shift_fpint_conv() was accidentally freeing the TCG
4
temporary tcg_fpstatus too early, before the last use of it. Move
5
the free down to where it belongs.
6
7
Signed-off-by: Wentao_Liang <Wentao_Liang_g@163.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
[PMM: cleaned up commit message]
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20180521140402.23318-14-peter.maydell@linaro.org
8
---
11
---
9
exec.c | 8 +++++---
12
target/arm/translate-a64.c | 2 +-
10
1 file changed, 5 insertions(+), 3 deletions(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
11
14
12
diff --git a/exec.c b/exec.c
15
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/exec.c
17
--- a/target/arm/translate-a64.c
15
+++ b/exec.c
18
+++ b/target/arm/translate-a64.c
16
@@ -XXX,XX +XXX,XX @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
19
@@ -XXX,XX +XXX,XX @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
17
* @is_write: whether the translation operation is for write
20
}
18
* @is_mmio: whether this can be MMIO, set true if it can
19
* @target_as: the address space targeted by the IOMMU
20
+ * @attrs: transaction attributes
21
*
22
* This function is called from RCU critical section. It is the common
23
* part of flatview_do_translate and address_space_translate_cached.
24
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iomm
25
hwaddr *page_mask_out,
26
bool is_write,
27
bool is_mmio,
28
- AddressSpace **target_as)
29
+ AddressSpace **target_as,
30
+ MemTxAttrs attrs)
31
{
32
MemoryRegionSection *section;
33
hwaddr page_mask = (hwaddr)-1;
34
@@ -XXX,XX +XXX,XX @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
35
return address_space_translate_iommu(iommu_mr, xlat,
36
plen_out, page_mask_out,
37
is_write, is_mmio,
38
- target_as);
39
+ target_as, attrs);
40
}
21
}
41
if (page_mask_out) {
22
42
/* Not behind an IOMMU, use default page size. */
23
- tcg_temp_free_ptr(tcg_fpstatus);
43
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate_cached(
24
tcg_temp_free_i32(tcg_shift);
44
25
gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
45
section = address_space_translate_iommu(iommu_mr, xlat, plen,
26
+ tcg_temp_free_ptr(tcg_fpstatus);
46
NULL, is_write, true,
27
tcg_temp_free_i32(tcg_rmode);
47
- &target_as);
48
+ &target_as, attrs);
49
return section.mr;
50
}
28
}
51
29
52
--
30
--
53
2.17.1
31
2.25.1
54
55
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Shengtan Mao <stmao@google.com>
2
add MemTxAttrs as an argument to memory_region_access_valid().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
6
The callsite in flatview_access_valid() is part of a recursive
3
Reviewed-by: Hao Wu <wuhaotsh@google.com>
7
loop flatview_access_valid() -> memory_region_access_valid() ->
4
Reviewed-by: Chris Rauer <crauer@google.com>
8
subpage_accepts() -> flatview_access_valid(); we make it pass
5
Signed-off-by: Shengtan Mao <stmao@google.com>
9
MEMTXATTRS_UNSPECIFIED for now, until the next several commits
6
Signed-off-by: Patrick Venture <venture@google.com>
10
have plumbed an attrs parameter through the rest of the loop
7
Message-id: 20220225174451.192304-1-wuhaotsh@google.com
11
and we can add an attrs parameter to flatview_access_valid().
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
10
tests/qtest/npcm7xx_sdhci-test.c | 215 +++++++++++++++++++++++++++++++
11
tests/qtest/meson.build | 1 +
12
2 files changed, 216 insertions(+)
13
create mode 100644 tests/qtest/npcm7xx_sdhci-test.c
12
14
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
diff --git a/tests/qtest/npcm7xx_sdhci-test.c b/tests/qtest/npcm7xx_sdhci-test.c
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
new file mode 100644
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
index XXXXXXX..XXXXXXX
16
Message-id: 20180521140402.23318-8-peter.maydell@linaro.org
18
--- /dev/null
17
---
19
+++ b/tests/qtest/npcm7xx_sdhci-test.c
18
include/exec/memory-internal.h | 3 ++-
20
@@ -XXX,XX +XXX,XX @@
19
exec.c | 4 +++-
21
+/*
20
hw/s390x/s390-pci-inst.c | 3 ++-
22
+ * QTests for NPCM7xx SD-3.0 / MMC-4.51 Host Controller
21
memory.c | 7 ++++---
23
+ *
22
4 files changed, 11 insertions(+), 6 deletions(-)
24
+ * Copyright (c) 2022 Google LLC
23
25
+ *
24
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
26
+ * This program is free software; you can redistribute it and/or modify it
27
+ * under the terms of the GNU General Public License as published by the
28
+ * Free Software Foundation; either version 2 of the License, or
29
+ * (at your option) any later version.
30
+ *
31
+ * This program is distributed in the hope that it will be useful, but WITHOUT
32
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
33
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
34
+ * for more details.
35
+ */
36
+
37
+#include "qemu/osdep.h"
38
+#include "hw/sd/npcm7xx_sdhci.h"
39
+
40
+#include "libqos/libqtest.h"
41
+#include "libqtest-single.h"
42
+#include "libqos/sdhci-cmd.h"
43
+
44
+#define NPCM7XX_REG_SIZE 0x100
45
+#define NPCM7XX_MMC_BA 0xF0842000
46
+#define NPCM7XX_BLK_SIZE 512
47
+#define NPCM7XX_TEST_IMAGE_SIZE (1 << 30)
48
+
49
+char *sd_path;
50
+
51
+static QTestState *setup_sd_card(void)
52
+{
53
+ QTestState *qts = qtest_initf(
54
+ "-machine kudo-bmc "
55
+ "-device sd-card,drive=drive0 "
56
+ "-drive id=drive0,if=none,file=%s,format=raw,auto-read-only=off",
57
+ sd_path);
58
+
59
+ qtest_writew(qts, NPCM7XX_MMC_BA + SDHC_SWRST, SDHC_RESET_ALL);
60
+ qtest_writew(qts, NPCM7XX_MMC_BA + SDHC_CLKCON,
61
+ SDHC_CLOCK_SDCLK_EN | SDHC_CLOCK_INT_STABLE |
62
+ SDHC_CLOCK_INT_EN);
63
+ sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_APP_CMD);
64
+ sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0x41200000, 0, (41 << 8));
65
+ sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_ALL_SEND_CID);
66
+ sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0, 0, SDHC_SEND_RELATIVE_ADDR);
67
+ sdhci_cmd_regs(qts, NPCM7XX_MMC_BA, 0, 0, 0x45670000, 0,
68
+ SDHC_SELECT_DESELECT_CARD);
69
+
70
+ return qts;
71
+}
72
+
73
+static void write_sdread(QTestState *qts, const char *msg)
74
+{
75
+ int fd, ret;
76
+ size_t len = strlen(msg);
77
+ char *rmsg = g_malloc(len);
78
+
79
+ /* write message to sd */
80
+ fd = open(sd_path, O_WRONLY);
81
+ g_assert(fd >= 0);
82
+ ret = write(fd, msg, len);
83
+ close(fd);
84
+ g_assert(ret == len);
85
+
86
+ /* read message using sdhci */
87
+ ret = sdhci_read_cmd(qts, NPCM7XX_MMC_BA, rmsg, len);
88
+ g_assert(ret == len);
89
+ g_assert(!memcmp(rmsg, msg, len));
90
+
91
+ g_free(rmsg);
92
+}
93
+
94
+/* Check MMC can read values from sd */
95
+static void test_read_sd(void)
96
+{
97
+ QTestState *qts = setup_sd_card();
98
+
99
+ write_sdread(qts, "hello world");
100
+ write_sdread(qts, "goodbye");
101
+
102
+ qtest_quit(qts);
103
+}
104
+
105
+static void sdwrite_read(QTestState *qts, const char *msg)
106
+{
107
+ int fd, ret;
108
+ size_t len = strlen(msg);
109
+ char *rmsg = g_malloc(len);
110
+
111
+ /* write message using sdhci */
112
+ sdhci_write_cmd(qts, NPCM7XX_MMC_BA, msg, len, NPCM7XX_BLK_SIZE);
113
+
114
+ /* read message from sd */
115
+ fd = open(sd_path, O_RDONLY);
116
+ g_assert(fd >= 0);
117
+ ret = read(fd, rmsg, len);
118
+ close(fd);
119
+ g_assert(ret == len);
120
+
121
+ g_assert(!memcmp(rmsg, msg, len));
122
+
123
+ g_free(rmsg);
124
+}
125
+
126
+/* Check MMC can write values to sd */
127
+static void test_write_sd(void)
128
+{
129
+ QTestState *qts = setup_sd_card();
130
+
131
+ sdwrite_read(qts, "hello world");
132
+ sdwrite_read(qts, "goodbye");
133
+
134
+ qtest_quit(qts);
135
+}
136
+
137
+/* Check SDHCI has correct default values. */
138
+static void test_reset(void)
139
+{
140
+ QTestState *qts = qtest_init("-machine kudo-bmc");
141
+ uint64_t addr = NPCM7XX_MMC_BA;
142
+ uint64_t end_addr = addr + NPCM7XX_REG_SIZE;
143
+ uint16_t prstvals_resets[] = {NPCM7XX_PRSTVALS_0_RESET,
144
+ NPCM7XX_PRSTVALS_1_RESET,
145
+ 0,
146
+ NPCM7XX_PRSTVALS_3_RESET,
147
+ 0,
148
+ 0};
149
+ int i;
150
+ uint32_t mask;
151
+
152
+ while (addr < end_addr) {
153
+ switch (addr - NPCM7XX_MMC_BA) {
154
+ case SDHC_PRNSTS:
155
+ /*
156
+ * ignores bits 20 to 24: they are changed when reading registers
157
+ */
158
+ mask = 0x1f00000;
159
+ g_assert_cmphex(qtest_readl(qts, addr) | mask, ==,
160
+ NPCM7XX_PRSNTS_RESET | mask);
161
+ addr += 4;
162
+ break;
163
+ case SDHC_BLKGAP:
164
+ g_assert_cmphex(qtest_readb(qts, addr), ==, NPCM7XX_BLKGAP_RESET);
165
+ addr += 1;
166
+ break;
167
+ case SDHC_CAPAB:
168
+ g_assert_cmphex(qtest_readq(qts, addr), ==, NPCM7XX_CAPAB_RESET);
169
+ addr += 8;
170
+ break;
171
+ case SDHC_MAXCURR:
172
+ g_assert_cmphex(qtest_readq(qts, addr), ==, NPCM7XX_MAXCURR_RESET);
173
+ addr += 8;
174
+ break;
175
+ case SDHC_HCVER:
176
+ g_assert_cmphex(qtest_readw(qts, addr), ==, NPCM7XX_HCVER_RESET);
177
+ addr += 2;
178
+ break;
179
+ case NPCM7XX_PRSTVALS:
180
+ for (i = 0; i < NPCM7XX_PRSTVALS_SIZE; ++i) {
181
+ g_assert_cmphex(qtest_readw(qts, addr + 2 * i), ==,
182
+ prstvals_resets[i]);
183
+ }
184
+ addr += NPCM7XX_PRSTVALS_SIZE * 2;
185
+ break;
186
+ default:
187
+ g_assert_cmphex(qtest_readb(qts, addr), ==, 0);
188
+ addr += 1;
189
+ }
190
+ }
191
+
192
+ qtest_quit(qts);
193
+}
194
+
195
+static void drive_destroy(void)
196
+{
197
+ unlink(sd_path);
198
+ g_free(sd_path);
199
+}
200
+
201
+static void drive_create(void)
202
+{
203
+ int fd, ret;
204
+ GError *error = NULL;
205
+
206
+ /* Create a temporary raw image */
207
+ fd = g_file_open_tmp("sdhci_XXXXXX", &sd_path, &error);
208
+ if (fd == -1) {
209
+ fprintf(stderr, "unable to create sdhci file: %s\n", error->message);
210
+ g_error_free(error);
211
+ }
212
+ g_assert(sd_path != NULL);
213
+
214
+ ret = ftruncate(fd, NPCM7XX_TEST_IMAGE_SIZE);
215
+ g_assert_cmpint(ret, ==, 0);
216
+ g_message("%s", sd_path);
217
+ close(fd);
218
+}
219
+
220
+int main(int argc, char **argv)
221
+{
222
+ int ret;
223
+
224
+ drive_create();
225
+
226
+ g_test_init(&argc, &argv, NULL);
227
+
228
+ qtest_add_func("npcm7xx_sdhci/reset", test_reset);
229
+ qtest_add_func("npcm7xx_sdhci/write_sd", test_write_sd);
230
+ qtest_add_func("npcm7xx_sdhci/read_sd", test_read_sd);
231
+
232
+ ret = g_test_run();
233
+ drive_destroy();
234
+ return ret;
235
+}
236
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
25
index XXXXXXX..XXXXXXX 100644
237
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/memory-internal.h
238
--- a/tests/qtest/meson.build
27
+++ b/include/exec/memory-internal.h
239
+++ b/tests/qtest/meson.build
28
@@ -XXX,XX +XXX,XX @@ void flatview_unref(FlatView *view);
240
@@ -XXX,XX +XXX,XX @@ qtests_npcm7xx = \
29
extern const MemoryRegionOps unassigned_mem_ops;
241
'npcm7xx_gpio-test',
30
242
'npcm7xx_pwm-test',
31
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
243
'npcm7xx_rng-test',
32
- unsigned size, bool is_write);
244
+ 'npcm7xx_sdhci-test',
33
+ unsigned size, bool is_write,
245
'npcm7xx_smbus-test',
34
+ MemTxAttrs attrs);
246
'npcm7xx_timer-test',
35
247
'npcm7xx_watchdog_timer-test'] + \
36
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
37
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
38
diff --git a/exec.c b/exec.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/exec.c
41
+++ b/exec.c
42
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
43
mr = flatview_translate(fv, addr, &xlat, &l, is_write);
44
if (!memory_access_is_direct(mr, is_write)) {
45
l = memory_access_size(mr, l, addr);
46
- if (!memory_region_access_valid(mr, xlat, l, is_write)) {
47
+ /* When our callers all have attrs we'll pass them through here */
48
+ if (!memory_region_access_valid(mr, xlat, l, is_write,
49
+ MEMTXATTRS_UNSPECIFIED)) {
50
return false;
51
}
52
}
53
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/s390x/s390-pci-inst.c
56
+++ b/hw/s390x/s390-pci-inst.c
57
@@ -XXX,XX +XXX,XX @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
58
mr = s390_get_subregion(mr, offset, len);
59
offset -= mr->addr;
60
61
- if (!memory_region_access_valid(mr, offset, len, true)) {
62
+ if (!memory_region_access_valid(mr, offset, len, true,
63
+ MEMTXATTRS_UNSPECIFIED)) {
64
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
65
return 0;
66
}
67
diff --git a/memory.c b/memory.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/memory.c
70
+++ b/memory.c
71
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps ram_device_mem_ops = {
72
bool memory_region_access_valid(MemoryRegion *mr,
73
hwaddr addr,
74
unsigned size,
75
- bool is_write)
76
+ bool is_write,
77
+ MemTxAttrs attrs)
78
{
79
int access_size_min, access_size_max;
80
int access_size, i;
81
@@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
82
{
83
MemTxResult r;
84
85
- if (!memory_region_access_valid(mr, addr, size, false)) {
86
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
87
*pval = unassigned_mem_read(mr, addr, size);
88
return MEMTX_DECODE_ERROR;
89
}
90
@@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
91
unsigned size,
92
MemTxAttrs attrs)
93
{
94
- if (!memory_region_access_valid(mr, addr, size, true)) {
95
+ if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
96
unassigned_mem_write(mr, addr, data, size);
97
return MEMTX_DECODE_ERROR;
98
}
99
--
248
--
100
2.17.1
249
2.25.1
101
102
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to flatview_extend_translation().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
Add new macros to manipulate signed fields within the register.
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-id: 20220301215958.157011-2-richard.henderson@linaro.org
8
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-7-peter.maydell@linaro.org
10
---
11
---
11
exec.c | 15 ++++++++++-----
12
include/hw/registerfields.h | 48 ++++++++++++++++++++++++++++++++++++-
12
1 file changed, 10 insertions(+), 5 deletions(-)
13
1 file changed, 47 insertions(+), 1 deletion(-)
13
14
14
diff --git a/exec.c b/exec.c
15
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/exec.c
17
--- a/include/hw/registerfields.h
17
+++ b/exec.c
18
+++ b/include/hw/registerfields.h
18
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
19
@@ -XXX,XX +XXX,XX @@
19
20
extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
20
static hwaddr
21
R_ ## reg ## _ ## field ## _LENGTH)
21
flatview_extend_translation(FlatView *fv, hwaddr addr,
22
22
- hwaddr target_len,
23
+#define FIELD_SEX8(storage, reg, field) \
23
- MemoryRegion *mr, hwaddr base, hwaddr len,
24
+ sextract8((storage), R_ ## reg ## _ ## field ## _SHIFT, \
24
- bool is_write)
25
+ R_ ## reg ## _ ## field ## _LENGTH)
25
+ hwaddr target_len,
26
+#define FIELD_SEX16(storage, reg, field) \
26
+ MemoryRegion *mr, hwaddr base, hwaddr len,
27
+ sextract16((storage), R_ ## reg ## _ ## field ## _SHIFT, \
27
+ bool is_write, MemTxAttrs attrs)
28
+ R_ ## reg ## _ ## field ## _LENGTH)
28
{
29
+#define FIELD_SEX32(storage, reg, field) \
29
hwaddr done = 0;
30
+ sextract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
30
hwaddr xlat;
31
+ R_ ## reg ## _ ## field ## _LENGTH)
31
@@ -XXX,XX +XXX,XX @@ void *address_space_map(AddressSpace *as,
32
+#define FIELD_SEX64(storage, reg, field) \
32
33
+ sextract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
33
memory_region_ref(mr);
34
+ R_ ## reg ## _ ## field ## _LENGTH)
34
*plen = flatview_extend_translation(fv, addr, len, mr, xlat,
35
+
35
- l, is_write);
36
/* Extract a field from an array of registers */
36
+ l, is_write, attrs);
37
#define ARRAY_FIELD_EX32(regs, reg, field) \
37
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
38
FIELD_EX32((regs)[R_ ## reg], reg, field)
38
rcu_read_unlock();
39
@@ -XXX,XX +XXX,XX @@
39
40
_d; })
40
@@ -XXX,XX +XXX,XX @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
41
#define FIELD_DP64(storage, reg, field, val) ({ \
41
mr = cache->mrs.mr;
42
struct { \
42
memory_region_ref(mr);
43
- uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
43
if (memory_access_is_direct(mr, is_write)) {
44
+ uint64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
44
+ /* We don't care about the memory attributes here as we're only
45
+ } _v = { .v = val }; \
45
+ * doing this if we found actual RAM, which behaves the same
46
+ uint64_t _d; \
46
+ * regardless of attributes; so UNSPECIFIED is fine.
47
+ _d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
47
+ */
48
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
48
l = flatview_extend_translation(cache->fv, addr, len, mr,
49
+ _d; })
49
- cache->xlat, l, is_write);
50
+
50
+ cache->xlat, l, is_write,
51
+#define FIELD_SDP8(storage, reg, field, val) ({ \
51
+ MEMTXATTRS_UNSPECIFIED);
52
+ struct { \
52
cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
53
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
53
} else {
54
+ } _v = { .v = val }; \
54
cache->ptr = NULL;
55
+ uint8_t _d; \
56
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
57
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
58
+ _d; })
59
+#define FIELD_SDP16(storage, reg, field, val) ({ \
60
+ struct { \
61
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
62
+ } _v = { .v = val }; \
63
+ uint16_t _d; \
64
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
65
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
66
+ _d; })
67
+#define FIELD_SDP32(storage, reg, field, val) ({ \
68
+ struct { \
69
+ signed int v:R_ ## reg ## _ ## field ## _LENGTH; \
70
+ } _v = { .v = val }; \
71
+ uint32_t _d; \
72
+ _d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
73
+ R_ ## reg ## _ ## field ## _LENGTH, _v.v); \
74
+ _d; })
75
+#define FIELD_SDP64(storage, reg, field, val) ({ \
76
+ struct { \
77
+ int64_t v:R_ ## reg ## _ ## field ## _LENGTH; \
78
} _v = { .v = val }; \
79
uint64_t _d; \
80
_d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
55
--
81
--
56
2.17.1
82
2.25.1
57
83
58
84
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to address_space_translate()
3
and address_space_translate_cached(). Callers either have an
4
attrs value to hand, or don't care and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
Set this as the kernel would, to 48 bits, to keep the computation
4
of the address space correct for PAuth.
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20220301215958.157011-3-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-4-peter.maydell@linaro.org
10
---
10
---
11
include/exec/memory.h | 4 +++-
11
target/arm/cpu.c | 3 ++-
12
accel/tcg/translate-all.c | 2 +-
12
1 file changed, 2 insertions(+), 1 deletion(-)
13
exec.c | 14 +++++++++-----
14
hw/vfio/common.c | 3 ++-
15
memory_ldst.inc.c | 18 +++++++++---------
16
target/riscv/helper.c | 2 +-
17
6 files changed, 25 insertions(+), 18 deletions(-)
18
13
19
diff --git a/include/exec/memory.h b/include/exec/memory.h
14
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
20
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/memory.h
16
--- a/target/arm/cpu.c
22
+++ b/include/exec/memory.h
17
+++ b/target/arm/cpu.c
23
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
18
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev)
24
* #MemoryRegion.
19
aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1);
25
* @len: pointer to length
20
}
26
* @is_write: indicates the transfer direction
21
/*
27
+ * @attrs: memory attributes
22
+ * Enable 48-bit address space (TODO: take reserved_va into account).
28
*/
23
* Enable TBI0 but not TBI1.
29
MemoryRegion *flatview_translate(FlatView *fv,
24
* Note that this must match useronly_clean_ptr.
30
hwaddr addr, hwaddr *xlat,
25
*/
31
@@ -XXX,XX +XXX,XX @@ MemoryRegion *flatview_translate(FlatView *fv,
26
- env->cp15.tcr_el[1].raw_tcr = (1ULL << 37);
32
27
+ env->cp15.tcr_el[1].raw_tcr = 5 | (1ULL << 37);
33
static inline MemoryRegion *address_space_translate(AddressSpace *as,
28
34
hwaddr addr, hwaddr *xlat,
29
/* Enable MTE */
35
- hwaddr *len, bool is_write)
30
if (cpu_isar_feature(aa64_mte, cpu)) {
36
+ hwaddr *len, bool is_write,
37
+ MemTxAttrs attrs)
38
{
39
return flatview_translate(address_space_to_flatview(as),
40
addr, xlat, len, is_write);
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
46
hwaddr l = 1;
47
48
rcu_read_lock();
49
- mr = address_space_translate(as, addr, &addr, &l, false);
50
+ mr = address_space_translate(as, addr, &addr, &l, false, attrs);
51
if (!(memory_region_is_ram(mr)
52
|| memory_region_is_romd(mr))) {
53
rcu_read_unlock();
54
diff --git a/exec.c b/exec.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/exec.c
57
+++ b/exec.c
58
@@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
59
rcu_read_lock();
60
while (len > 0) {
61
l = len;
62
- mr = address_space_translate(as, addr, &addr1, &l, true);
63
+ mr = address_space_translate(as, addr, &addr1, &l, true,
64
+ MEMTXATTRS_UNSPECIFIED);
65
66
if (!(memory_region_is_ram(mr) ||
67
memory_region_is_romd(mr))) {
68
@@ -XXX,XX +XXX,XX @@ void address_space_cache_destroy(MemoryRegionCache *cache)
69
*/
70
static inline MemoryRegion *address_space_translate_cached(
71
MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
72
- hwaddr *plen, bool is_write)
73
+ hwaddr *plen, bool is_write, MemTxAttrs attrs)
74
{
75
MemoryRegionSection section;
76
MemoryRegion *mr;
77
@@ -XXX,XX +XXX,XX @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr,
78
MemoryRegion *mr;
79
80
l = len;
81
- mr = address_space_translate_cached(cache, addr, &addr1, &l, false);
82
+ mr = address_space_translate_cached(cache, addr, &addr1, &l, false,
83
+ MEMTXATTRS_UNSPECIFIED);
84
flatview_read_continue(cache->fv,
85
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
86
addr1, l, mr);
87
@@ -XXX,XX +XXX,XX @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr,
88
MemoryRegion *mr;
89
90
l = len;
91
- mr = address_space_translate_cached(cache, addr, &addr1, &l, true);
92
+ mr = address_space_translate_cached(cache, addr, &addr1, &l, true,
93
+ MEMTXATTRS_UNSPECIFIED);
94
flatview_write_continue(cache->fv,
95
addr, MEMTXATTRS_UNSPECIFIED, buf, len,
96
addr1, l, mr);
97
@@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_is_io(hwaddr phys_addr)
98
99
rcu_read_lock();
100
mr = address_space_translate(&address_space_memory,
101
- phys_addr, &phys_addr, &l, false);
102
+ phys_addr, &phys_addr, &l, false,
103
+ MEMTXATTRS_UNSPECIFIED);
104
105
res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
106
rcu_read_unlock();
107
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/hw/vfio/common.c
110
+++ b/hw/vfio/common.c
111
@@ -XXX,XX +XXX,XX @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr,
112
*/
113
mr = address_space_translate(&address_space_memory,
114
iotlb->translated_addr,
115
- &xlat, &len, writable);
116
+ &xlat, &len, writable,
117
+ MEMTXATTRS_UNSPECIFIED);
118
if (!memory_region_is_ram(mr)) {
119
error_report("iommu map to non memory area %"HWADDR_PRIx"",
120
xlat);
121
diff --git a/memory_ldst.inc.c b/memory_ldst.inc.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/memory_ldst.inc.c
124
+++ b/memory_ldst.inc.c
125
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
126
bool release_lock = false;
127
128
RCU_READ_LOCK();
129
- mr = TRANSLATE(addr, &addr1, &l, false);
130
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
131
if (l < 4 || !IS_DIRECT(mr, false)) {
132
release_lock |= prepare_mmio_access(mr);
133
134
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
135
bool release_lock = false;
136
137
RCU_READ_LOCK();
138
- mr = TRANSLATE(addr, &addr1, &l, false);
139
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
140
if (l < 8 || !IS_DIRECT(mr, false)) {
141
release_lock |= prepare_mmio_access(mr);
142
143
@@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
144
bool release_lock = false;
145
146
RCU_READ_LOCK();
147
- mr = TRANSLATE(addr, &addr1, &l, false);
148
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
149
if (!IS_DIRECT(mr, false)) {
150
release_lock |= prepare_mmio_access(mr);
151
152
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
153
bool release_lock = false;
154
155
RCU_READ_LOCK();
156
- mr = TRANSLATE(addr, &addr1, &l, false);
157
+ mr = TRANSLATE(addr, &addr1, &l, false, attrs);
158
if (l < 2 || !IS_DIRECT(mr, false)) {
159
release_lock |= prepare_mmio_access(mr);
160
161
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
162
bool release_lock = false;
163
164
RCU_READ_LOCK();
165
- mr = TRANSLATE(addr, &addr1, &l, true);
166
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
167
if (l < 4 || !IS_DIRECT(mr, true)) {
168
release_lock |= prepare_mmio_access(mr);
169
170
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
171
bool release_lock = false;
172
173
RCU_READ_LOCK();
174
- mr = TRANSLATE(addr, &addr1, &l, true);
175
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
176
if (l < 4 || !IS_DIRECT(mr, true)) {
177
release_lock |= prepare_mmio_access(mr);
178
179
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
180
bool release_lock = false;
181
182
RCU_READ_LOCK();
183
- mr = TRANSLATE(addr, &addr1, &l, true);
184
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
185
if (!IS_DIRECT(mr, true)) {
186
release_lock |= prepare_mmio_access(mr);
187
r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
188
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
189
bool release_lock = false;
190
191
RCU_READ_LOCK();
192
- mr = TRANSLATE(addr, &addr1, &l, true);
193
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
194
if (l < 2 || !IS_DIRECT(mr, true)) {
195
release_lock |= prepare_mmio_access(mr);
196
197
@@ -XXX,XX +XXX,XX @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
198
bool release_lock = false;
199
200
RCU_READ_LOCK();
201
- mr = TRANSLATE(addr, &addr1, &l, true);
202
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
203
if (l < 8 || !IS_DIRECT(mr, true)) {
204
release_lock |= prepare_mmio_access(mr);
205
206
diff --git a/target/riscv/helper.c b/target/riscv/helper.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/riscv/helper.c
209
+++ b/target/riscv/helper.c
210
@@ -XXX,XX +XXX,XX @@ restart:
211
MemoryRegion *mr;
212
hwaddr l = sizeof(target_ulong), addr1;
213
mr = address_space_translate(cs->as, pte_addr,
214
- &addr1, &l, false);
215
+ &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
216
if (memory_access_is_direct(mr, true)) {
217
target_ulong *pte_pa =
218
qemu_map_ram_ptr(mr->ram_block, addr1);
219
--
31
--
220
2.17.1
32
2.25.1
221
222
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
cpregs_keys is an uint32_t* so the allocation should use uint32_t.
3
Without FEAT_LVA, the behaviour of programming an invalid value
4
g_new is even better because it is type-safe.
4
is IMPLEMENTATION DEFINED. With FEAT_LVA, programming an invalid
5
minimum value requires a Translation fault.
5
6
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
It is most self-consistent to choose to generate the fault always.
8
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 20220301215958.157011-4-richard.henderson@linaro.org
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
---
13
---
11
target/arm/gdbstub.c | 3 +--
14
target/arm/internals.h | 1 +
12
1 file changed, 1 insertion(+), 2 deletions(-)
15
target/arm/helper.c | 32 ++++++++++++++++++++++++++++----
16
2 files changed, 29 insertions(+), 4 deletions(-)
13
17
14
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
18
diff --git a/target/arm/internals.h b/target/arm/internals.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/gdbstub.c
20
--- a/target/arm/internals.h
17
+++ b/target/arm/gdbstub.c
21
+++ b/target/arm/internals.h
18
@@ -XXX,XX +XXX,XX @@ int arm_gen_dynamic_xml(CPUState *cs)
22
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
19
RegisterSysregXmlParam param = {cs, s};
23
bool hpd : 1;
20
24
bool using16k : 1;
21
cpu->dyn_xml.num_cpregs = 0;
25
bool using64k : 1;
22
- cpu->dyn_xml.cpregs_keys = g_malloc(sizeof(uint32_t *) *
26
+ bool tsz_oob : 1; /* tsz has been clamped to legal range */
23
- g_hash_table_size(cpu->cp_regs));
27
} ARMVAParameters;
24
+ cpu->dyn_xml.cpregs_keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs));
28
25
g_string_printf(s, "<?xml version=\"1.0\"?>");
29
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
26
g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
30
diff --git a/target/arm/helper.c b/target/arm/helper.c
27
g_string_append_printf(s, "<feature name=\"org.qemu.gdb.arm.sys.regs\">");
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/arm/helper.c
33
+++ b/target/arm/helper.c
34
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
35
ARMMMUIdx mmu_idx, bool data)
36
{
37
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
38
- bool epd, hpd, using16k, using64k;
39
- int select, tsz, tbi, max_tsz;
40
+ bool epd, hpd, using16k, using64k, tsz_oob;
41
+ int select, tsz, tbi, max_tsz, min_tsz;
42
43
if (!regime_has_2_ranges(mmu_idx)) {
44
select = 0;
45
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
46
} else {
47
max_tsz = 39;
48
}
49
+ min_tsz = 16; /* TODO: ARMv8.2-LVA */
50
51
- tsz = MIN(tsz, max_tsz);
52
- tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
53
+ if (tsz > max_tsz) {
54
+ tsz = max_tsz;
55
+ tsz_oob = true;
56
+ } else if (tsz < min_tsz) {
57
+ tsz = min_tsz;
58
+ tsz_oob = true;
59
+ } else {
60
+ tsz_oob = false;
61
+ }
62
63
/* Present TBI as a composite with TBID. */
64
tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
65
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
66
.hpd = hpd,
67
.using16k = using16k,
68
.using64k = using64k,
69
+ .tsz_oob = tsz_oob,
70
};
71
}
72
73
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
74
param = aa64_va_parameters(env, address, mmu_idx,
75
access_type != MMU_INST_FETCH);
76
level = 0;
77
+
78
+ /*
79
+ * If TxSZ is programmed to a value larger than the maximum,
80
+ * or smaller than the effective minimum, it is IMPLEMENTATION
81
+ * DEFINED whether we behave as if the field were programmed
82
+ * within bounds, or if a level 0 Translation fault is generated.
83
+ *
84
+ * With FEAT_LVA, fault on less than minimum becomes required,
85
+ * so our choice is to always raise the fault.
86
+ */
87
+ if (param.tsz_oob) {
88
+ fault_type = ARMFault_Translation;
89
+ goto do_fault;
90
+ }
91
+
92
addrsize = 64 - 8 * param.tbi;
93
inputsize = 64 - param.tsz;
94
} else {
28
--
95
--
29
2.17.1
96
2.25.1
30
31
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
It forgot to increase clroffset during the loop. So it only clear the
3
We will shortly share parts of this function with other portions
4
first 4 bytes.
4
of address translation.
5
5
6
Fixes: 367b9f527becdd20ddf116e17a3c0c2bbc486920
7
Cc: qemu-stable@nongnu.org
8
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
9
Reviewed-by: Eric Auger <eric.auger@redhat.com>
10
Message-id: 1527047633-12368-1-git-send-email-zhaoshenglong@huawei.com
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220301215958.157011-5-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
12
---
14
hw/intc/arm_gicv3_kvm.c | 1 +
13
target/arm/internals.h | 19 +------------------
15
1 file changed, 1 insertion(+)
14
target/arm/helper.c | 22 ++++++++++++++++++++++
15
2 files changed, 23 insertions(+), 18 deletions(-)
16
16
17
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
17
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/intc/arm_gicv3_kvm.c
19
--- a/target/arm/internals.h
20
+++ b/hw/intc/arm_gicv3_kvm.c
20
+++ b/target/arm/internals.h
21
@@ -XXX,XX +XXX,XX @@ static void kvm_dist_putbmp(GICv3State *s, uint32_t offset,
21
@@ -XXX,XX +XXX,XX @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
22
if (clroffset != 0) {
22
* Returns the implementation defined bit-width of physical addresses.
23
reg = 0;
23
* The ARMv8 reference manuals refer to this as PAMax().
24
kvm_gicd_access(s, clroffset, &reg, true);
24
*/
25
+ clroffset += 4;
25
-static inline unsigned int arm_pamax(ARMCPU *cpu)
26
}
26
-{
27
reg = *gic_bmp_ptr32(bmp, irq);
27
- static const unsigned int pamax_map[] = {
28
kvm_gicd_access(s, offset, &reg, true);
28
- [0] = 32,
29
- [1] = 36,
30
- [2] = 40,
31
- [3] = 42,
32
- [4] = 44,
33
- [5] = 48,
34
- };
35
- unsigned int parange =
36
- FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
37
-
38
- /* id_aa64mmfr0 is a read-only register so values outside of the
39
- * supported mappings can be considered an implementation error. */
40
- assert(parange < ARRAY_SIZE(pamax_map));
41
- return pamax_map[parange];
42
-}
43
+unsigned int arm_pamax(ARMCPU *cpu);
44
45
/* Return true if extended addresses are enabled.
46
* This is always the case if our translation regime is 64 bit,
47
diff --git a/target/arm/helper.c b/target/arm/helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/arm/helper.c
50
+++ b/target/arm/helper.c
51
@@ -XXX,XX +XXX,XX @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
52
}
53
#endif /* !CONFIG_USER_ONLY */
54
55
+/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
56
+unsigned int arm_pamax(ARMCPU *cpu)
57
+{
58
+ static const unsigned int pamax_map[] = {
59
+ [0] = 32,
60
+ [1] = 36,
61
+ [2] = 40,
62
+ [3] = 42,
63
+ [4] = 44,
64
+ [5] = 48,
65
+ };
66
+ unsigned int parange =
67
+ FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
68
+
69
+ /*
70
+ * id_aa64mmfr0 is a read-only register so values outside of the
71
+ * supported mappings can be considered an implementation error.
72
+ */
73
+ assert(parange < ARRAY_SIZE(pamax_map));
74
+ return pamax_map[parange];
75
+}
76
+
77
static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
78
{
79
if (regime_has_2_ranges(mmu_idx)) {
29
--
80
--
30
2.17.1
81
2.25.1
31
82
32
83
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to flatview_translate(); all its
3
callers now have attrs available.
4
2
3
Pass down the width of the output address from translation.
4
For now this is still just PAMax, but a subsequent patch will
5
compute the correct value from TCR_ELx.{I}PS.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20220301215958.157011-6-richard.henderson@linaro.org
5
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: 20180521140402.23318-11-peter.maydell@linaro.org
9
---
11
---
10
include/exec/memory.h | 7 ++++---
12
target/arm/helper.c | 21 ++++++++++-----------
11
exec.c | 17 +++++++++--------
13
1 file changed, 10 insertions(+), 11 deletions(-)
12
2 files changed, 13 insertions(+), 11 deletions(-)
13
14
14
diff --git a/include/exec/memory.h b/include/exec/memory.h
15
diff --git a/target/arm/helper.c b/target/arm/helper.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memory.h
17
--- a/target/arm/helper.c
17
+++ b/include/exec/memory.h
18
+++ b/target/arm/helper.c
18
@@ -XXX,XX +XXX,XX @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
19
@@ -XXX,XX +XXX,XX @@ do_fault:
20
* false otherwise.
19
*/
21
*/
20
MemoryRegion *flatview_translate(FlatView *fv,
22
static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
21
hwaddr addr, hwaddr *xlat,
23
- int inputsize, int stride)
22
- hwaddr *len, bool is_write);
24
+ int inputsize, int stride, int outputsize)
23
+ hwaddr *len, bool is_write,
24
+ MemTxAttrs attrs);
25
26
static inline MemoryRegion *address_space_translate(AddressSpace *as,
27
hwaddr addr, hwaddr *xlat,
28
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
29
MemTxAttrs attrs)
30
{
25
{
31
return flatview_translate(address_space_to_flatview(as),
26
const int grainsize = stride + 3;
32
- addr, xlat, len, is_write);
27
int startsizecheck;
33
+ addr, xlat, len, is_write, attrs);
28
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
34
}
29
}
35
30
36
/* address_space_access_valid: check for validity of accessing an address
31
if (is_aa64) {
37
@@ -XXX,XX +XXX,XX @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
32
- CPUARMState *env = &cpu->env;
38
rcu_read_lock();
33
- unsigned int pamax = arm_pamax(cpu);
39
fv = address_space_to_flatview(as);
34
-
40
l = len;
35
switch (stride) {
41
- mr = flatview_translate(fv, addr, &addr1, &l, false);
36
case 13: /* 64KB Pages. */
42
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
37
- if (level == 0 || (level == 1 && pamax <= 42)) {
43
if (len == l && memory_access_is_direct(mr, false)) {
38
+ if (level == 0 || (level == 1 && outputsize <= 42)) {
44
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
39
return false;
45
memcpy(buf, ptr, len);
40
}
46
diff --git a/exec.c b/exec.c
41
break;
47
index XXXXXXX..XXXXXXX 100644
42
case 11: /* 16KB Pages. */
48
--- a/exec.c
43
- if (level == 0 || (level == 1 && pamax <= 40)) {
49
+++ b/exec.c
44
+ if (level == 0 || (level == 1 && outputsize <= 40)) {
50
@@ -XXX,XX +XXX,XX @@ iotlb_fail:
45
return false;
51
46
}
52
/* Called from RCU critical section */
47
break;
53
MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
48
case 9: /* 4KB Pages. */
54
- hwaddr *plen, bool is_write)
49
- if (level == 0 && pamax <= 42) {
55
+ hwaddr *plen, bool is_write,
50
+ if (level == 0 && outputsize <= 42) {
56
+ MemTxAttrs attrs)
51
return false;
57
{
52
}
58
MemoryRegion *mr;
53
break;
59
MemoryRegionSection section;
54
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
60
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
61
}
55
}
62
56
63
l = len;
57
/* Inputsize checks. */
64
- mr = flatview_translate(fv, addr, &addr1, &l, true);
58
- if (inputsize > pamax &&
65
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
59
- (arm_el_is_aa64(env, 1) || inputsize > 40)) {
60
+ if (inputsize > outputsize &&
61
+ (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
62
/* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
63
return false;
64
}
65
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
66
target_ulong page_size;
67
uint32_t attrs;
68
int32_t stride;
69
- int addrsize, inputsize;
70
+ int addrsize, inputsize, outputsize;
71
TCR *tcr = regime_tcr(env, mmu_idx);
72
int ap, ns, xn, pxn;
73
uint32_t el = regime_el(env, mmu_idx);
74
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
75
76
addrsize = 64 - 8 * param.tbi;
77
inputsize = 64 - param.tsz;
78
+ outputsize = arm_pamax(cpu);
79
} else {
80
param = aa32_va_parameters(env, address, mmu_idx);
81
level = 1;
82
addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
83
inputsize = addrsize - param.tsz;
84
+ outputsize = 40;
66
}
85
}
67
86
68
return result;
87
/*
69
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
88
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
70
MemTxResult result = MEMTX_OK;
89
71
90
/* Check that the starting level is valid. */
72
l = len;
91
ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
73
- mr = flatview_translate(fv, addr, &addr1, &l, true);
92
- inputsize, stride);
74
+ mr = flatview_translate(fv, addr, &addr1, &l, true, attrs);
93
+ inputsize, stride, outputsize);
75
result = flatview_write_continue(fv, addr, attrs, buf, len,
94
if (!ok) {
76
addr1, l, mr);
95
fault_type = ARMFault_Translation;
77
96
goto do_fault;
78
@@ -XXX,XX +XXX,XX @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
79
}
80
81
l = len;
82
- mr = flatview_translate(fv, addr, &addr1, &l, false);
83
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
84
}
85
86
return result;
87
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
88
MemoryRegion *mr;
89
90
l = len;
91
- mr = flatview_translate(fv, addr, &addr1, &l, false);
92
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
93
return flatview_read_continue(fv, addr, attrs, buf, len,
94
addr1, l, mr);
95
}
96
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
97
98
while (len > 0) {
99
l = len;
100
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
101
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
102
if (!memory_access_is_direct(mr, is_write)) {
103
l = memory_access_size(mr, l, addr);
104
if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
105
@@ -XXX,XX +XXX,XX @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
106
107
len = target_len;
108
this_mr = flatview_translate(fv, addr, &xlat,
109
- &len, is_write);
110
+ &len, is_write, attrs);
111
if (this_mr != mr || xlat != base + done) {
112
return done;
113
}
114
@@ -XXX,XX +XXX,XX @@ void *address_space_map(AddressSpace *as,
115
l = len;
116
rcu_read_lock();
117
fv = address_space_to_flatview(as);
118
- mr = flatview_translate(fv, addr, &xlat, &l, is_write);
119
+ mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
120
121
if (!memory_access_is_direct(mr, is_write)) {
122
if (atomic_xchg(&bounce.in_use, true)) {
123
--
97
--
124
2.17.1
98
2.25.1
125
126
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
acpi_data_push uses g_array_set_size to resize the memory size. If there
3
The macro is a bit more readable than the inlined computation.
4
is no enough contiguous memory, the address will be changed. So previous
5
pointer could not be used any more. It must update the pointer and use
6
the new one.
7
4
8
Also, previous codes wrongly use le32 conversion of iort->node_offset
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
for subsequent computations that will result incorrect value if host is
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
not litlle endian. So use the non-converted one instead.
7
Message-id: 20220301215958.157011-7-richard.henderson@linaro.org
11
12
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
13
Reviewed-by: Eric Auger <eric.auger@redhat.com>
14
Message-id: 1527663951-14552-1-git-send-email-zhaoshenglong@huawei.com
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
9
---
17
hw/arm/virt-acpi-build.c | 20 +++++++++++++++-----
10
target/arm/helper.c | 4 ++--
18
1 file changed, 15 insertions(+), 5 deletions(-)
11
1 file changed, 2 insertions(+), 2 deletions(-)
19
12
20
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
13
diff --git a/target/arm/helper.c b/target/arm/helper.c
21
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/arm/virt-acpi-build.c
15
--- a/target/arm/helper.c
23
+++ b/hw/arm/virt-acpi-build.c
16
+++ b/target/arm/helper.c
24
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
17
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
25
AcpiIortItsGroup *its;
18
level = startlevel;
26
AcpiIortTable *iort;
27
AcpiIortSmmu3 *smmu;
28
- size_t node_size, iort_length, smmu_offset = 0;
29
+ size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
30
AcpiIortRC *rc;
31
32
iort = acpi_data_push(table_data, sizeof(*iort));
33
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
34
35
iort_length = sizeof(*iort);
36
iort->node_count = cpu_to_le32(nb_nodes);
37
- iort->node_offset = cpu_to_le32(sizeof(*iort));
38
+ /*
39
+ * Use a copy in case table_data->data moves during acpi_data_push
40
+ * operations.
41
+ */
42
+ iort_node_offset = sizeof(*iort);
43
+ iort->node_offset = cpu_to_le32(iort_node_offset);
44
45
/* ITS group node */
46
node_size = sizeof(*its) + sizeof(uint32_t);
47
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
48
int irq = vms->irqmap[VIRT_SMMU];
49
50
/* SMMUv3 node */
51
- smmu_offset = iort->node_offset + node_size;
52
+ smmu_offset = iort_node_offset + node_size;
53
node_size = sizeof(*smmu) + sizeof(*idmap);
54
iort_length += node_size;
55
smmu = acpi_data_push(table_data, node_size);
56
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
57
idmap->id_count = cpu_to_le32(0xFFFF);
58
idmap->output_base = 0;
59
/* output IORT node is the ITS group node (the first node) */
60
- idmap->output_reference = cpu_to_le32(iort->node_offset);
61
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
62
}
19
}
63
20
64
/* Root Complex Node */
21
- indexmask_grainsize = (1ULL << (stride + 3)) - 1;
65
@@ -XXX,XX +XXX,XX @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
22
- indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
66
idmap->output_reference = cpu_to_le32(smmu_offset);
23
+ indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
67
} else {
24
+ indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
68
/* output IORT node is the ITS group node (the first node) */
25
69
- idmap->output_reference = cpu_to_le32(iort->node_offset);
26
/* Now we can extract the actual base address from the TTBR */
70
+ idmap->output_reference = cpu_to_le32(iort_node_offset);
27
descaddr = extract64(ttbr, 0, 48);
71
}
72
73
+ /*
74
+ * Update the pointer address in case table_data->data moves during above
75
+ * acpi_data_push operations.
76
+ */
77
+ iort = (AcpiIortTable *)(table_data->data + iort_start);
78
iort->length = cpu_to_le32(iort_length);
79
80
build_header(linker, table_data, (void *)(table_data->data + iort_start),
81
--
28
--
82
2.17.1
29
2.25.1
83
30
84
31
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to address_space_map().
3
Its callers either have an attrs value to hand, or don't care
4
and can use MEMTXATTRS_UNSPECIFIED.
5
2
3
This field controls the output (intermediate) physical address size
4
of the translation process. V8 requires to raise an AddressSize
5
fault if the page tables are programmed incorrectly, such that any
6
intermediate descriptor address, or the final translated address,
7
is out of range.
8
9
Add a PS field to ARMVAParameters, and properly compute outputsize
10
in get_phys_addr_lpae. Test the descaddr as extracted from TTBR
11
and from page table entries.
12
13
Restrict descaddrmask so that we won't raise the fault for v7.
14
15
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-id: 20220301215958.157011-8-richard.henderson@linaro.org
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20180521140402.23318-5-peter.maydell@linaro.org
10
---
20
---
11
include/exec/memory.h | 3 ++-
21
target/arm/internals.h | 1 +
12
include/sysemu/dma.h | 3 ++-
22
target/arm/helper.c | 72 ++++++++++++++++++++++++++++++++----------
13
exec.c | 6 ++++--
23
2 files changed, 57 insertions(+), 16 deletions(-)
14
target/ppc/mmu-hash64.c | 3 ++-
15
4 files changed, 10 insertions(+), 5 deletions(-)
16
24
17
diff --git a/include/exec/memory.h b/include/exec/memory.h
25
diff --git a/target/arm/internals.h b/target/arm/internals.h
18
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/memory.h
27
--- a/target/arm/internals.h
20
+++ b/include/exec/memory.h
28
+++ b/target/arm/internals.h
21
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
29
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
22
* @addr: address within that address space
23
* @plen: pointer to length of buffer; updated on return
24
* @is_write: indicates the transfer direction
25
+ * @attrs: memory attributes
26
*/
30
*/
27
void *address_space_map(AddressSpace *as, hwaddr addr,
31
typedef struct ARMVAParameters {
28
- hwaddr *plen, bool is_write);
32
unsigned tsz : 8;
29
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
33
+ unsigned ps : 3;
30
34
unsigned select : 1;
31
/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
35
bool tbi : 1;
32
*
36
bool epd : 1;
33
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
37
diff --git a/target/arm/helper.c b/target/arm/helper.c
34
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
35
--- a/include/sysemu/dma.h
39
--- a/target/arm/helper.c
36
+++ b/include/sysemu/dma.h
40
+++ b/target/arm/helper.c
37
@@ -XXX,XX +XXX,XX @@ static inline void *dma_memory_map(AddressSpace *as,
41
@@ -XXX,XX +XXX,XX @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
38
hwaddr xlen = *len;
39
void *p;
40
41
- p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
42
+ p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
43
+ MEMTXATTRS_UNSPECIFIED);
44
*len = xlen;
45
return p;
46
}
42
}
47
diff --git a/exec.c b/exec.c
43
#endif /* !CONFIG_USER_ONLY */
48
index XXXXXXX..XXXXXXX 100644
44
49
--- a/exec.c
45
+/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
50
+++ b/exec.c
46
+static const uint8_t pamax_map[] = {
51
@@ -XXX,XX +XXX,XX @@ flatview_extend_translation(FlatView *fv, hwaddr addr,
47
+ [0] = 32,
52
void *address_space_map(AddressSpace *as,
48
+ [1] = 36,
53
hwaddr addr,
49
+ [2] = 40,
54
hwaddr *plen,
50
+ [3] = 42,
55
- bool is_write)
51
+ [4] = 44,
56
+ bool is_write,
52
+ [5] = 48,
57
+ MemTxAttrs attrs)
53
+};
54
+
55
/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
56
unsigned int arm_pamax(ARMCPU *cpu)
58
{
57
{
59
hwaddr len = *plen;
58
- static const unsigned int pamax_map[] = {
60
hwaddr l, xlat;
59
- [0] = 32,
61
@@ -XXX,XX +XXX,XX @@ void *cpu_physical_memory_map(hwaddr addr,
60
- [1] = 36,
62
hwaddr *plen,
61
- [2] = 40,
63
int is_write)
62
- [3] = 42,
63
- [4] = 44,
64
- [5] = 48,
65
- };
66
unsigned int parange =
67
FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
68
69
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
64
{
70
{
65
- return address_space_map(&address_space_memory, addr, plen, is_write);
71
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
66
+ return address_space_map(&address_space_memory, addr, plen, is_write,
72
bool epd, hpd, using16k, using64k, tsz_oob;
67
+ MEMTXATTRS_UNSPECIFIED);
73
- int select, tsz, tbi, max_tsz, min_tsz;
68
}
74
+ int select, tsz, tbi, max_tsz, min_tsz, ps;
69
75
70
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
76
if (!regime_has_2_ranges(mmu_idx)) {
71
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
77
select = 0;
72
index XXXXXXX..XXXXXXX 100644
78
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
73
--- a/target/ppc/mmu-hash64.c
79
hpd = extract32(tcr, 24, 1);
74
+++ b/target/ppc/mmu-hash64.c
80
}
75
@@ -XXX,XX +XXX,XX @@ const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
81
epd = false;
76
return NULL;
82
+ ps = extract32(tcr, 16, 3);
83
} else {
84
/*
85
* Bit 55 is always between the two regions, and is canonical for
86
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
87
epd = extract32(tcr, 23, 1);
88
hpd = extract64(tcr, 42, 1);
89
}
90
+ ps = extract64(tcr, 32, 3);
77
}
91
}
78
92
79
- hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false);
93
if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
80
+ hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
94
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
81
+ MEMTXATTRS_UNSPECIFIED);
95
82
if (plen < (n * HASH_PTE_SIZE_64)) {
96
return (ARMVAParameters) {
83
hw_error("%s: Unable to map all requested HPTEs\n", __func__);
97
.tsz = tsz,
84
}
98
+ .ps = ps,
99
.select = select,
100
.tbi = tbi,
101
.epd = epd,
102
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
103
104
/* TODO: This code does not support shareability levels. */
105
if (aarch64) {
106
+ int ps;
107
+
108
param = aa64_va_parameters(env, address, mmu_idx,
109
access_type != MMU_INST_FETCH);
110
level = 0;
111
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
112
113
addrsize = 64 - 8 * param.tbi;
114
inputsize = 64 - param.tsz;
115
- outputsize = arm_pamax(cpu);
116
+
117
+ /*
118
+ * Bound PS by PARANGE to find the effective output address size.
119
+ * ID_AA64MMFR0 is a read-only register so values outside of the
120
+ * supported mappings can be considered an implementation error.
121
+ */
122
+ ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
123
+ ps = MIN(ps, param.ps);
124
+ assert(ps < ARRAY_SIZE(pamax_map));
125
+ outputsize = pamax_map[ps];
126
} else {
127
param = aa32_va_parameters(env, address, mmu_idx);
128
level = 1;
129
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
130
131
/* Now we can extract the actual base address from the TTBR */
132
descaddr = extract64(ttbr, 0, 48);
133
+
134
+ /*
135
+ * If the base address is out of range, raise AddressSizeFault.
136
+ * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
137
+ * but we've just cleared the bits above 47, so simplify the test.
138
+ */
139
+ if (descaddr >> outputsize) {
140
+ level = 0;
141
+ fault_type = ARMFault_AddressSize;
142
+ goto do_fault;
143
+ }
144
+
145
/*
146
* We rely on this masking to clear the RES0 bits at the bottom of the TTBR
147
* and also to mask out CnP (bit 0) which could validly be non-zero.
148
*/
149
descaddr &= ~indexmask;
150
151
- /* The address field in the descriptor goes up to bit 39 for ARMv7
152
- * but up to bit 47 for ARMv8, but we use the descaddrmask
153
- * up to bit 39 for AArch32, because we don't need other bits in that case
154
- * to construct next descriptor address (anyway they should be all zeroes).
155
+ /*
156
+ * For AArch32, the address field in the descriptor goes up to bit 39
157
+ * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
158
+ * or an AddressSize fault is raised. So for v8 we extract those SBZ
159
+ * bits as part of the address, which will be checked via outputsize.
160
+ * For AArch64, the address field always goes up to bit 47 (with extra
161
+ * bits for FEAT_LPA placed elsewhere). AArch64 implies v8.
162
*/
163
- descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
164
- ~indexmask_grainsize;
165
+ if (arm_feature(env, ARM_FEATURE_V8)) {
166
+ descaddrmask = MAKE_64BIT_MASK(0, 48);
167
+ } else {
168
+ descaddrmask = MAKE_64BIT_MASK(0, 40);
169
+ }
170
+ descaddrmask &= ~indexmask_grainsize;
171
172
/* Secure accesses start with the page table in secure memory and
173
* can be downgraded to non-secure at any step. Non-secure accesses
174
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
175
/* Invalid, or the Reserved level 3 encoding */
176
goto do_fault;
177
}
178
+
179
descaddr = descriptor & descaddrmask;
180
+ if (descaddr >> outputsize) {
181
+ fault_type = ARMFault_AddressSize;
182
+ goto do_fault;
183
+ }
184
185
if ((descriptor & 2) && (level < 3)) {
186
/* Table entry. The top five bits are attributes which may
85
--
187
--
86
2.17.1
188
2.25.1
87
189
88
190
diff view generated by jsdifflib
1
From: Shannon Zhao <zhaoshenglong@huawei.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
kvm_irqchip_create called by kvm_init will call kvm_init_irq_routing to
3
The original A.a revision of the AArch64 ARM required that we
4
initialize global capability variables. If we call kvm_init_irq_routing in
4
force-extend the addresses in these registers from 49 bits.
5
GIC realize function, previous allocated memory will leak.
5
This language has been loosened via a combination of IMPLEMENTATION
6
DEFINED and CONSTRAINTED UNPREDICTABLE to allow consideration of
7
the entire aligned address.
6
8
7
Fix this by deleting the unnecessary call.
9
This means that we do not have to consider whether or not FEAT_LVA
10
is enabled, and decide from which bit an address might need to be
11
extended.
8
12
9
Signed-off-by: Shannon Zhao <zhaoshenglong@huawei.com>
13
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Eric Auger <eric.auger@redhat.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-id: 1527750994-14360-1-git-send-email-zhaoshenglong@huawei.com
15
Message-id: 20220301215958.157011-9-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
17
---
14
hw/intc/arm_gic_kvm.c | 1 -
18
target/arm/helper.c | 32 ++++++++++++++++++++++++--------
15
hw/intc/arm_gicv3_kvm.c | 1 -
19
1 file changed, 24 insertions(+), 8 deletions(-)
16
2 files changed, 2 deletions(-)
17
20
18
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
21
diff --git a/target/arm/helper.c b/target/arm/helper.c
19
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/intc/arm_gic_kvm.c
23
--- a/target/arm/helper.c
21
+++ b/hw/intc/arm_gic_kvm.c
24
+++ b/target/arm/helper.c
22
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
25
@@ -XXX,XX +XXX,XX @@ static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
23
26
ARMCPU *cpu = env_archcpu(env);
24
if (kvm_has_gsi_routing()) {
27
int i = ri->crm;
25
/* set up irq routing */
28
26
- kvm_init_irq_routing(kvm_state);
29
- /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
27
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
30
- * register reads and behaves as if values written are sign extended.
28
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
31
+ /*
29
}
32
* Bits [1:0] are RES0.
30
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
33
+ *
31
index XXXXXXX..XXXXXXX 100644
34
+ * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA)
32
--- a/hw/intc/arm_gicv3_kvm.c
35
+ * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if
33
+++ b/hw/intc/arm_gicv3_kvm.c
36
+ * they contain the value written. It is CONSTRAINED UNPREDICTABLE
34
@@ -XXX,XX +XXX,XX @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
37
+ * whether the RESS bits are ignored when comparing an address.
35
38
+ *
36
if (kvm_has_gsi_routing()) {
39
+ * Therefore we are allowed to compare the entire register, which lets
37
/* set up irq routing */
40
+ * us avoid considering whether or not FEAT_LVA is actually enabled.
38
- kvm_init_irq_routing(kvm_state);
41
*/
39
for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
42
- value = sextract64(value, 0, 49) & ~3ULL;
40
kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
43
+ value &= ~3ULL;
44
45
raw_write(env, ri, value);
46
hw_watchpoint_update(cpu, i);
47
@@ -XXX,XX +XXX,XX @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
48
case 0: /* unlinked address match */
49
case 1: /* linked address match */
50
{
51
- /* Bits [63:49] are hardwired to the value of bit [48]; that is,
52
- * we behave as if the register was sign extended. Bits [1:0] are
53
- * RES0. The BAS field is used to allow setting breakpoints on 16
54
- * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
55
+ /*
56
+ * Bits [1:0] are RES0.
57
+ *
58
+ * It is IMPLEMENTATION DEFINED whether bits [63:49]
59
+ * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit
60
+ * of the VA field ([48] or [52] for FEAT_LVA), or whether the
61
+ * value is read as written. It is CONSTRAINED UNPREDICTABLE
62
+ * whether the RESS bits are ignored when comparing an address.
63
+ * Therefore we are allowed to compare the entire register, which
64
+ * lets us avoid considering whether FEAT_LVA is actually enabled.
65
+ *
66
+ * The BAS field is used to allow setting breakpoints on 16-bit
67
+ * wide instructions; it is CONSTRAINED UNPREDICTABLE whether
68
* a bp will fire if the addresses covered by the bp and the addresses
69
* covered by the insn overlap but the insn doesn't start at the
70
* start of the bp address range. We choose to require the insn and
71
@@ -XXX,XX +XXX,XX @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
72
* See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
73
*/
74
int bas = extract64(bcr, 5, 4);
75
- addr = sextract64(bvr, 0, 49) & ~3ULL;
76
+ addr = bvr & ~3ULL;
77
if (bas == 0) {
78
return;
41
}
79
}
42
--
80
--
43
2.17.1
81
2.25.1
44
45
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
From: Richard Henderson <richard.henderson@linaro.org>
2
add MemTxAttrs as an argument to the MemoryRegion valid.accepts
3
callback. We'll need this for subpage_accepts().
4
2
5
We could take the approach we used with the read and write
3
This feature is relatively small, as it applies only to
6
callbacks and add new a new _with_attrs version, but since there
4
64k pages and thus requires no additional changes to the
7
are so few implementations of the accepts hook we just change
5
table descriptor walking algorithm, only a change to the
8
them all.
6
minimum TSZ (which is the inverse of the maximum virtual
7
address space size).
9
8
9
Note that this feature widens VBAR_ELx, but we already
10
treat the register as being 64 bits wide.
11
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220301215958.157011-10-richard.henderson@linaro.org
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20180521140402.23318-9-peter.maydell@linaro.org
14
---
16
---
15
include/exec/memory.h | 3 ++-
17
docs/system/arm/emulation.rst | 1 +
16
exec.c | 9 ++++++---
18
target/arm/cpu-param.h | 2 +-
17
hw/hppa/dino.c | 3 ++-
19
target/arm/cpu.h | 5 +++++
18
hw/nvram/fw_cfg.c | 12 ++++++++----
20
target/arm/cpu64.c | 1 +
19
hw/scsi/esp.c | 3 ++-
21
target/arm/helper.c | 9 ++++++++-
20
hw/xen/xen_pt_msi.c | 3 ++-
22
5 files changed, 16 insertions(+), 2 deletions(-)
21
memory.c | 5 +++--
22
7 files changed, 25 insertions(+), 13 deletions(-)
23
23
24
diff --git a/include/exec/memory.h b/include/exec/memory.h
24
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
25
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/memory.h
26
--- a/docs/system/arm/emulation.rst
27
+++ b/include/exec/memory.h
27
+++ b/docs/system/arm/emulation.rst
28
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionOps {
28
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
29
* as a machine check exception).
29
- FEAT_LRCPC (Load-acquire RCpc instructions)
30
*/
30
- FEAT_LRCPC2 (Load-acquire RCpc instructions v2)
31
bool (*accepts)(void *opaque, hwaddr addr,
31
- FEAT_LSE (Large System Extensions)
32
- unsigned size, bool is_write);
32
+- FEAT_LVA (Large Virtual Address space)
33
+ unsigned size, bool is_write,
33
- FEAT_MTE (Memory Tagging Extension)
34
+ MemTxAttrs attrs);
34
- FEAT_MTE2 (Memory Tagging Extension)
35
} valid;
35
- FEAT_MTE3 (MTE Asymmetric Fault Handling)
36
/* Internal implementation constraints: */
36
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
37
struct {
38
diff --git a/exec.c b/exec.c
39
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
40
--- a/exec.c
38
--- a/target/arm/cpu-param.h
41
+++ b/exec.c
39
+++ b/target/arm/cpu-param.h
42
@@ -XXX,XX +XXX,XX @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
40
@@ -XXX,XX +XXX,XX @@
41
#ifdef TARGET_AARCH64
42
# define TARGET_LONG_BITS 64
43
# define TARGET_PHYS_ADDR_SPACE_BITS 48
44
-# define TARGET_VIRT_ADDR_SPACE_BITS 48
45
+# define TARGET_VIRT_ADDR_SPACE_BITS 52
46
#else
47
# define TARGET_LONG_BITS 32
48
# define TARGET_PHYS_ADDR_SPACE_BITS 40
49
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/target/arm/cpu.h
52
+++ b/target/arm/cpu.h
53
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
54
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
43
}
55
}
44
56
45
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
57
+static inline bool isar_feature_aa64_lva(const ARMISARegisters *id)
46
- unsigned size, bool is_write)
58
+{
47
+ unsigned size, bool is_write,
59
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0;
48
+ MemTxAttrs attrs)
60
+}
61
+
62
static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
49
{
63
{
50
return is_write;
64
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
51
}
65
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
52
@@ -XXX,XX +XXX,XX @@ static MemTxResult subpage_write(void *opaque, hwaddr addr,
53
}
54
55
static bool subpage_accepts(void *opaque, hwaddr addr,
56
- unsigned len, bool is_write)
57
+ unsigned len, bool is_write,
58
+ MemTxAttrs attrs)
59
{
60
subpage_t *subpage = opaque;
61
#if defined(DEBUG_SUBPAGE)
62
@@ -XXX,XX +XXX,XX @@ static void readonly_mem_write(void *opaque, hwaddr addr,
63
}
64
65
static bool readonly_mem_accepts(void *opaque, hwaddr addr,
66
- unsigned size, bool is_write)
67
+ unsigned size, bool is_write,
68
+ MemTxAttrs attrs)
69
{
70
return is_write;
71
}
72
diff --git a/hw/hppa/dino.c b/hw/hppa/dino.c
73
index XXXXXXX..XXXXXXX 100644
66
index XXXXXXX..XXXXXXX 100644
74
--- a/hw/hppa/dino.c
67
--- a/target/arm/cpu64.c
75
+++ b/hw/hppa/dino.c
68
+++ b/target/arm/cpu64.c
76
@@ -XXX,XX +XXX,XX @@ static void gsc_to_pci_forwarding(DinoState *s)
69
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
77
}
70
t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1);
78
71
t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */
79
static bool dino_chip_mem_valid(void *opaque, hwaddr addr,
72
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */
80
- unsigned size, bool is_write)
73
+ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
81
+ unsigned size, bool is_write,
74
cpu->isar.id_aa64mmfr2 = t;
82
+ MemTxAttrs attrs)
75
83
{
76
t = cpu->isar.id_aa64zfr0;
84
switch (addr) {
77
diff --git a/target/arm/helper.c b/target/arm/helper.c
85
case DINO_IAR0:
86
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
87
index XXXXXXX..XXXXXXX 100644
78
index XXXXXXX..XXXXXXX 100644
88
--- a/hw/nvram/fw_cfg.c
79
--- a/target/arm/helper.c
89
+++ b/hw/nvram/fw_cfg.c
80
+++ b/target/arm/helper.c
90
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
81
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
91
}
82
} else {
92
83
max_tsz = 39;
93
static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
94
- unsigned size, bool is_write)
95
+ unsigned size, bool is_write,
96
+ MemTxAttrs attrs)
97
{
98
return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
99
(size == 8 && addr == 0));
100
}
101
102
static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
103
- unsigned size, bool is_write)
104
+ unsigned size, bool is_write,
105
+ MemTxAttrs attrs)
106
{
107
return addr == 0;
108
}
109
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
110
}
111
112
static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
113
- unsigned size, bool is_write)
114
+ unsigned size, bool is_write,
115
+ MemTxAttrs attrs)
116
{
117
return is_write && size == 2;
118
}
119
@@ -XXX,XX +XXX,XX @@ static void fw_cfg_comb_write(void *opaque, hwaddr addr,
120
}
121
122
static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
123
- unsigned size, bool is_write)
124
+ unsigned size, bool is_write,
125
+ MemTxAttrs attrs)
126
{
127
return (size == 1) || (is_write && size == 2);
128
}
129
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/hw/scsi/esp.c
132
+++ b/hw/scsi/esp.c
133
@@ -XXX,XX +XXX,XX @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
134
}
135
136
static bool esp_mem_accepts(void *opaque, hwaddr addr,
137
- unsigned size, bool is_write)
138
+ unsigned size, bool is_write,
139
+ MemTxAttrs attrs)
140
{
141
return (size == 1) || (is_write && size == 4);
142
}
143
diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/hw/xen/xen_pt_msi.c
146
+++ b/hw/xen/xen_pt_msi.c
147
@@ -XXX,XX +XXX,XX @@ static uint64_t pci_msix_read(void *opaque, hwaddr addr,
148
}
149
150
static bool pci_msix_accepts(void *opaque, hwaddr addr,
151
- unsigned size, bool is_write)
152
+ unsigned size, bool is_write,
153
+ MemTxAttrs attrs)
154
{
155
return !(addr & (size - 1));
156
}
157
diff --git a/memory.c b/memory.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/memory.c
160
+++ b/memory.c
161
@@ -XXX,XX +XXX,XX @@ static void unassigned_mem_write(void *opaque, hwaddr addr,
162
}
163
164
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
165
- unsigned size, bool is_write)
166
+ unsigned size, bool is_write,
167
+ MemTxAttrs attrs)
168
{
169
return false;
170
}
171
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
172
access_size = MAX(MIN(size, access_size_max), access_size_min);
173
for (i = 0; i < size; i += access_size) {
174
if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
175
- is_write)) {
176
+ is_write, attrs)) {
177
return false;
178
}
179
}
84
}
85
- min_tsz = 16; /* TODO: ARMv8.2-LVA */
86
+
87
+ min_tsz = 16;
88
+ if (using64k) {
89
+ if (cpu_isar_feature(aa64_lva, env_archcpu(env))) {
90
+ min_tsz = 12;
91
+ }
92
+ }
93
+ /* TODO: FEAT_LPA2 */
94
95
if (tsz > max_tsz) {
96
tsz = max_tsz;
180
--
97
--
181
2.17.1
98
2.25.1
182
183
diff view generated by jsdifflib
1
Add more detail to the documentation for memory_region_init_iommu()
1
From: Richard Henderson <richard.henderson@linaro.org>
2
and other IOMMU-related functions and data structures.
3
2
3
This feature widens physical addresses (and intermediate physical
4
addresses for 2-stage translation) from 48 to 52 bits, when using
5
64k pages. The only thing left at this point is to handle the
6
extra bits in the TTBR and in the table descriptors.
7
8
Note that PAR_EL1 and HPFAR_EL2 are nominally extended, but we don't
9
mask out the high bits when writing to those registers, so no changes
10
are required there.
11
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20220301215958.157011-11-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Eric Auger <eric.auger@redhat.com>
8
Message-id: 20180521140402.23318-2-peter.maydell@linaro.org
9
---
16
---
10
include/exec/memory.h | 105 ++++++++++++++++++++++++++++++++++++++----
17
docs/system/arm/emulation.rst | 1 +
11
1 file changed, 95 insertions(+), 10 deletions(-)
18
target/arm/cpu-param.h | 2 +-
19
target/arm/cpu64.c | 2 +-
20
target/arm/helper.c | 19 ++++++++++++++++---
21
4 files changed, 19 insertions(+), 5 deletions(-)
12
22
13
diff --git a/include/exec/memory.h b/include/exec/memory.h
23
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
14
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/memory.h
25
--- a/docs/system/arm/emulation.rst
16
+++ b/include/exec/memory.h
26
+++ b/docs/system/arm/emulation.rst
17
@@ -XXX,XX +XXX,XX @@ enum IOMMUMemoryRegionAttr {
27
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
18
IOMMU_ATTR_SPAPR_TCE_FD
28
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
29
- FEAT_JSCVT (JavaScript conversion instructions)
30
- FEAT_LOR (Limited ordering regions)
31
+- FEAT_LPA (Large Physical Address space)
32
- FEAT_LRCPC (Load-acquire RCpc instructions)
33
- FEAT_LRCPC2 (Load-acquire RCpc instructions v2)
34
- FEAT_LSE (Large System Extensions)
35
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/cpu-param.h
38
+++ b/target/arm/cpu-param.h
39
@@ -XXX,XX +XXX,XX @@
40
41
#ifdef TARGET_AARCH64
42
# define TARGET_LONG_BITS 64
43
-# define TARGET_PHYS_ADDR_SPACE_BITS 48
44
+# define TARGET_PHYS_ADDR_SPACE_BITS 52
45
# define TARGET_VIRT_ADDR_SPACE_BITS 52
46
#else
47
# define TARGET_LONG_BITS 32
48
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/arm/cpu64.c
51
+++ b/target/arm/cpu64.c
52
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
53
cpu->isar.id_aa64pfr1 = t;
54
55
t = cpu->isar.id_aa64mmfr0;
56
- t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */
57
+ t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
58
cpu->isar.id_aa64mmfr0 = t;
59
60
t = cpu->isar.id_aa64mmfr1;
61
diff --git a/target/arm/helper.c b/target/arm/helper.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/arm/helper.c
64
+++ b/target/arm/helper.c
65
@@ -XXX,XX +XXX,XX @@ static const uint8_t pamax_map[] = {
66
[3] = 42,
67
[4] = 44,
68
[5] = 48,
69
+ [6] = 52,
19
};
70
};
20
71
21
+/**
72
/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
22
+ * IOMMUMemoryRegionClass:
73
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
23
+ *
74
descaddr = extract64(ttbr, 0, 48);
24
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
25
+ * and provide an implementation of at least the @translate method here
26
+ * to handle requests to the memory region. Other methods are optional.
27
+ *
28
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
29
+ * to report whenever mappings are changed, by calling
30
+ * memory_region_notify_iommu() (or, if necessary, by calling
31
+ * memory_region_notify_one() for each registered notifier).
32
+ */
33
typedef struct IOMMUMemoryRegionClass {
34
/* private */
35
struct DeviceClass parent_class;
36
75
37
/*
76
/*
38
- * Return a TLB entry that contains a given address. Flag should
77
- * If the base address is out of range, raise AddressSizeFault.
39
- * be the access permission of this translation operation. We can
78
+ * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
40
- * set flag to IOMMU_NONE to mean that we don't need any
41
- * read/write permission checks, like, when for region replay.
42
+ * Return a TLB entry that contains a given address.
43
+ *
79
+ *
44
+ * The IOMMUAccessFlags indicated via @flag are optional and may
80
+ * Otherwise, if the base address is out of range, raise AddressSizeFault.
45
+ * be specified as IOMMU_NONE to indicate that the caller needs
81
* In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
46
+ * the full translation information for both reads and writes. If
82
* but we've just cleared the bits above 47, so simplify the test.
47
+ * the access flags are specified then the IOMMU implementation
48
+ * may use this as an optimization, to stop doing a page table
49
+ * walk as soon as it knows that the requested permissions are not
50
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
51
+ * full page table walk and report the permissions in the returned
52
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
53
+ * return different mappings for reads and writes.)
54
+ *
55
+ * The returned information remains valid while the caller is
56
+ * holding the big QEMU lock or is inside an RCU critical section;
57
+ * if the caller wishes to cache the mapping beyond that it must
58
+ * register an IOMMU notifier so it can invalidate its cached
59
+ * information when the IOMMU mapping changes.
60
+ *
61
+ * @iommu: the IOMMUMemoryRegion
62
+ * @hwaddr: address to be translated within the memory region
63
+ * @flag: requested access permissions
64
*/
83
*/
65
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
84
- if (descaddr >> outputsize) {
66
IOMMUAccessFlags flag);
85
+ if (outputsize > 48) {
67
- /* Returns minimum supported page size */
86
+ descaddr |= extract64(ttbr, 2, 4) << 48;
68
+ /* Returns minimum supported page size in bytes.
87
+ } else if (descaddr >> outputsize) {
69
+ * If this method is not provided then the minimum is assumed to
88
level = 0;
70
+ * be TARGET_PAGE_SIZE.
89
fault_type = ARMFault_AddressSize;
71
+ *
90
goto do_fault;
72
+ * @iommu: the IOMMUMemoryRegion
91
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
73
+ */
92
}
74
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
93
75
- /* Called when IOMMU Notifier flag changed */
94
descaddr = descriptor & descaddrmask;
76
+ /* Called when IOMMU Notifier flag changes (ie when the set of
95
- if (descaddr >> outputsize) {
77
+ * events which IOMMU users are requesting notification for changes).
96
+
78
+ * Optional method -- need not be provided if the IOMMU does not
97
+ /*
79
+ * need to know exactly which events must be notified.
98
+ * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
80
+ *
99
+ * of descriptor. Otherwise, if descaddr is out of range, raise
81
+ * @iommu: the IOMMUMemoryRegion
100
+ * AddressSizeFault.
82
+ * @old_flags: events which previously needed to be notified
101
+ */
83
+ * @new_flags: events which now need to be notified
102
+ if (outputsize > 48) {
84
+ */
103
+ descaddr |= extract64(descriptor, 12, 4) << 48;
85
void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
104
+ } else if (descaddr >> outputsize) {
86
IOMMUNotifierFlag old_flags,
105
fault_type = ARMFault_AddressSize;
87
IOMMUNotifierFlag new_flags);
106
goto do_fault;
88
- /* Set this up to provide customized IOMMU replay function */
107
}
89
+ /* Called to handle memory_region_iommu_replay().
90
+ *
91
+ * The default implementation of memory_region_iommu_replay() is to
92
+ * call the IOMMU translate method for every page in the address space
93
+ * with flag == IOMMU_NONE and then call the notifier if translate
94
+ * returns a valid mapping. If this method is implemented then it
95
+ * overrides the default behaviour, and must provide the full semantics
96
+ * of memory_region_iommu_replay(), by calling @notifier for every
97
+ * translation present in the IOMMU.
98
+ *
99
+ * Optional method -- an IOMMU only needs to provide this method
100
+ * if the default is inefficient or produces undesirable side effects.
101
+ *
102
+ * Note: this is not related to record-and-replay functionality.
103
+ */
104
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
105
106
- /* Get IOMMU misc attributes */
107
- int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr,
108
+ /* Get IOMMU misc attributes. This is an optional method that
109
+ * can be used to allow users of the IOMMU to get implementation-specific
110
+ * information. The IOMMU implements this method to handle calls
111
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
112
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
113
+ * the IOMMU supports. If the method is unimplemented then
114
+ * memory_region_iommu_get_attr() will always return -EINVAL.
115
+ *
116
+ * @iommu: the IOMMUMemoryRegion
117
+ * @attr: attribute being queried
118
+ * @data: memory to fill in with the attribute data
119
+ *
120
+ * Returns 0 on success, or a negative errno; in particular
121
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
122
+ */
123
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
124
void *data);
125
} IOMMUMemoryRegionClass;
126
127
@@ -XXX,XX +XXX,XX @@ static inline void memory_region_init_reservation(MemoryRegion *mr,
128
* An IOMMU region translates addresses and forwards accesses to a target
129
* memory region.
130
*
131
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
132
+ * @_iommu_mr should be a pointer to enough memory for an instance of
133
+ * that subclass, @instance_size is the size of that subclass, and
134
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
135
+ * instance of the subclass, and its methods will then be called to handle
136
+ * accesses to the memory region. See the documentation of
137
+ * #IOMMUMemoryRegionClass for further details.
138
+ *
139
* @_iommu_mr: the #IOMMUMemoryRegion to be initialized
140
* @instance_size: the IOMMUMemoryRegion subclass instance size
141
* @mrtypename: the type name of the #IOMMUMemoryRegion
142
@@ -XXX,XX +XXX,XX @@ void memory_region_register_iommu_notifier(MemoryRegion *mr,
143
* a notifier with the minimum page granularity returned by
144
* mr->iommu_ops->get_page_size().
145
*
146
+ * Note: this is not related to record-and-replay functionality.
147
+ *
148
* @iommu_mr: the memory region to observe
149
* @n: the notifier to which to replay iommu mappings
150
*/
151
@@ -XXX,XX +XXX,XX @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
152
* memory_region_iommu_replay_all: replay existing IOMMU translations
153
* to all the notifiers registered.
154
*
155
+ * Note: this is not related to record-and-replay functionality.
156
+ *
157
* @iommu_mr: the memory region to observe
158
*/
159
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
160
@@ -XXX,XX +XXX,XX @@ void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
161
* memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
162
* defined on the IOMMU.
163
*
164
- * Returns 0 if succeded, error code otherwise.
165
+ * Returns 0 on success, or a negative errno otherwise. In particular,
166
+ * -EINVAL indicates that the IOMMU does not support the requested
167
+ * attribute.
168
*
169
* @iommu_mr: the memory region
170
* @attr: the requested attribute
171
--
108
--
172
2.17.1
109
2.25.1
173
174
diff view generated by jsdifflib
1
From: Jan Kiszka <jan.kiszka@siemens.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
There was a nasty flip in identifying which register group an access is
3
With FEAT_LPA2, rather than introducing translation level 4,
4
targeting. The issue caused spuriously raised priorities of the guest
4
we introduce level -1, below the current level 0. Extend
5
when handing CPUs over in the Jailhouse hypervisor.
5
arm_fi_to_lfsc to handle these faults.
6
6
7
Cc: qemu-stable@nongnu.org
7
Assert that this new translation level does not leak into
8
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
8
fault types for which it is not defined, which allows some
9
Message-id: 28b927d3-da58-bce4-cc13-bfec7f9b1cb9@siemens.com
9
masking of fi->level to be removed.
10
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-id: 20220301215958.157011-12-richard.henderson@linaro.org
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
14
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
---
15
---
13
hw/intc/arm_gicv3_cpuif.c | 12 ++++++------
16
target/arm/internals.h | 35 +++++++++++++++++++++++++++++------
14
1 file changed, 6 insertions(+), 6 deletions(-)
17
1 file changed, 29 insertions(+), 6 deletions(-)
15
18
16
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
19
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/arm_gicv3_cpuif.c
21
--- a/target/arm/internals.h
19
+++ b/hw/intc/arm_gicv3_cpuif.c
22
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
23
@@ -XXX,XX +XXX,XX @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
21
{
24
case ARMFault_None:
22
GICv3CPUState *cs = icc_cs_from_env(env);
25
return 0;
23
int regno = ri->opc2 & 3;
26
case ARMFault_AddressSize:
24
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
27
- fsc = fi->level & 3;
25
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
28
+ assert(fi->level >= -1 && fi->level <= 3);
26
uint64_t value = cs->ich_apr[grp][regno];
29
+ if (fi->level < 0) {
27
30
+ fsc = 0b101001;
28
trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
31
+ } else {
29
@@ -XXX,XX +XXX,XX @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
32
+ fsc = fi->level;
30
{
33
+ }
31
GICv3CPUState *cs = icc_cs_from_env(env);
34
break;
32
int regno = ri->opc2 & 3;
35
case ARMFault_AccessFlag:
33
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
36
- fsc = (fi->level & 3) | (0x2 << 2);
34
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
37
+ assert(fi->level >= 0 && fi->level <= 3);
35
38
+ fsc = 0b001000 | fi->level;
36
trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
39
break;
37
40
case ARMFault_Permission:
38
@@ -XXX,XX +XXX,XX @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
41
- fsc = (fi->level & 3) | (0x3 << 2);
39
uint64_t value;
42
+ assert(fi->level >= 0 && fi->level <= 3);
40
43
+ fsc = 0b001100 | fi->level;
41
int regno = ri->opc2 & 3;
44
break;
42
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
45
case ARMFault_Translation:
43
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
46
- fsc = (fi->level & 3) | (0x1 << 2);
44
47
+ assert(fi->level >= -1 && fi->level <= 3);
45
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
48
+ if (fi->level < 0) {
46
return icv_ap_read(env, ri);
49
+ fsc = 0b101011;
47
@@ -XXX,XX +XXX,XX @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
50
+ } else {
48
GICv3CPUState *cs = icc_cs_from_env(env);
51
+ fsc = 0b000100 | fi->level;
49
52
+ }
50
int regno = ri->opc2 & 3;
53
break;
51
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
54
case ARMFault_SyncExternal:
52
+ int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
55
fsc = 0x10 | (fi->ea << 12);
53
56
break;
54
if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
57
case ARMFault_SyncExternalOnWalk:
55
icv_ap_write(env, ri, value);
58
- fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
56
@@ -XXX,XX +XXX,XX @@ static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
59
+ assert(fi->level >= -1 && fi->level <= 3);
57
{
60
+ if (fi->level < 0) {
58
GICv3CPUState *cs = icc_cs_from_env(env);
61
+ fsc = 0b010011;
59
int regno = ri->opc2 & 3;
62
+ } else {
60
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
63
+ fsc = 0b010100 | fi->level;
61
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
64
+ }
62
uint64_t value;
65
+ fsc |= fi->ea << 12;
63
66
break;
64
value = cs->ich_apr[grp][regno];
67
case ARMFault_SyncParity:
65
@@ -XXX,XX +XXX,XX @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
68
fsc = 0x18;
66
{
69
break;
67
GICv3CPUState *cs = icc_cs_from_env(env);
70
case ARMFault_SyncParityOnWalk:
68
int regno = ri->opc2 & 3;
71
- fsc = (fi->level & 3) | (0x7 << 2);
69
- int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
72
+ assert(fi->level >= -1 && fi->level <= 3);
70
+ int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
73
+ if (fi->level < 0) {
71
74
+ fsc = 0b011011;
72
trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
75
+ } else {
73
76
+ fsc = 0b011100 | fi->level;
77
+ }
78
break;
79
case ARMFault_AsyncParity:
80
fsc = 0x19;
74
--
81
--
75
2.17.1
82
2.25.1
76
77
diff view generated by jsdifflib
1
From: Richard Henderson <richard.henderson@linaro.org>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Depending on the host abi, float16, aka uint16_t, values are
3
Merge tlbi_aa64_range_get_length and tlbi_aa64_range_get_base,
4
passed and returned either zero-extended in the host register
4
returning a structure containing both results. Pass in the
5
or with garbage at the top of the host register.
5
ARMMMUIdx, rather than the digested two_ranges boolean.
6
6
7
The tcg code generator has so far been assuming garbage, as that
7
This is in preparation for FEAT_LPA2, where the interpretation
8
matches the x86 abi, but this is incorrect for other host abis.
8
of 'value' depends on the effective value of DS for the regime.
9
Further, target/arm has so far been assuming zero-extended results,
10
so that it may store the 16-bit value into a 32-bit slot with the
11
high 16-bits already clear.
12
9
13
Rectify both problems by mapping "f16" in the helper definition
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
14
to uint32_t instead of (a typedef for) uint16_t. This forces
15
the host compiler to assume garbage in the upper 16 bits on input
16
and to zero-extend the result on output.
17
18
Cc: qemu-stable@nongnu.org
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Message-id: 20220301215958.157011-13-richard.henderson@linaro.org
21
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
22
Message-id: 20180522175629.24932-1-richard.henderson@linaro.org
23
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
14
---
26
include/exec/helper-head.h | 2 +-
15
target/arm/helper.c | 58 +++++++++++++++++++--------------------------
27
target/arm/helper-a64.c | 35 +++++++++--------
16
1 file changed, 24 insertions(+), 34 deletions(-)
28
target/arm/helper.c | 80 +++++++++++++++++++-------------------
29
3 files changed, 59 insertions(+), 58 deletions(-)
30
17
31
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/include/exec/helper-head.h
34
+++ b/include/exec/helper-head.h
35
@@ -XXX,XX +XXX,XX @@
36
#define dh_ctype_int int
37
#define dh_ctype_i64 uint64_t
38
#define dh_ctype_s64 int64_t
39
-#define dh_ctype_f16 float16
40
+#define dh_ctype_f16 uint32_t
41
#define dh_ctype_f32 float32
42
#define dh_ctype_f64 float64
43
#define dh_ctype_ptr void *
44
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/helper-a64.c
47
+++ b/target/arm/helper-a64.c
48
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float_rel_to_flags(int res)
49
return flags;
50
}
51
52
-uint64_t HELPER(vfp_cmph_a64)(float16 x, float16 y, void *fp_status)
53
+uint64_t HELPER(vfp_cmph_a64)(uint32_t x, uint32_t y, void *fp_status)
54
{
55
return float_rel_to_flags(float16_compare_quiet(x, y, fp_status));
56
}
57
58
-uint64_t HELPER(vfp_cmpeh_a64)(float16 x, float16 y, void *fp_status)
59
+uint64_t HELPER(vfp_cmpeh_a64)(uint32_t x, uint32_t y, void *fp_status)
60
{
61
return float_rel_to_flags(float16_compare(x, y, fp_status));
62
}
63
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
64
#define float64_three make_float64(0x4008000000000000ULL)
65
#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
66
67
-float16 HELPER(recpsf_f16)(float16 a, float16 b, void *fpstp)
68
+uint32_t HELPER(recpsf_f16)(uint32_t a, uint32_t b, void *fpstp)
69
{
70
float_status *fpst = fpstp;
71
72
@@ -XXX,XX +XXX,XX @@ float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
73
return float64_muladd(a, b, float64_two, 0, fpst);
74
}
75
76
-float16 HELPER(rsqrtsf_f16)(float16 a, float16 b, void *fpstp)
77
+uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, void *fpstp)
78
{
79
float_status *fpst = fpstp;
80
81
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(neon_addlp_u16)(uint64_t a)
82
}
83
84
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
85
-float16 HELPER(frecpx_f16)(float16 a, void *fpstp)
86
+uint32_t HELPER(frecpx_f16)(uint32_t a, void *fpstp)
87
{
88
float_status *fpst = fpstp;
89
uint16_t val16, sbit;
90
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
91
#define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
92
93
#define ADVSIMD_HALFOP(name) \
94
-float16 ADVSIMD_HELPER(name, h)(float16 a, float16 b, void *fpstp) \
95
+uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
96
{ \
97
float_status *fpst = fpstp; \
98
return float16_ ## name(a, b, fpst); \
99
@@ -XXX,XX +XXX,XX @@ ADVSIMD_HALFOP(mulx)
100
ADVSIMD_TWOHALFOP(mulx)
101
102
/* fused multiply-accumulate */
103
-float16 HELPER(advsimd_muladdh)(float16 a, float16 b, float16 c, void *fpstp)
104
+uint32_t HELPER(advsimd_muladdh)(uint32_t a, uint32_t b, uint32_t c,
105
+ void *fpstp)
106
{
107
float_status *fpst = fpstp;
108
return float16_muladd(a, b, c, 0, fpst);
109
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_muladd2h)(uint32_t two_a, uint32_t two_b,
110
111
#define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
112
113
-uint32_t HELPER(advsimd_ceq_f16)(float16 a, float16 b, void *fpstp)
114
+uint32_t HELPER(advsimd_ceq_f16)(uint32_t a, uint32_t b, void *fpstp)
115
{
116
float_status *fpst = fpstp;
117
int compare = float16_compare_quiet(a, b, fpst);
118
return ADVSIMD_CMPRES(compare == float_relation_equal);
119
}
120
121
-uint32_t HELPER(advsimd_cge_f16)(float16 a, float16 b, void *fpstp)
122
+uint32_t HELPER(advsimd_cge_f16)(uint32_t a, uint32_t b, void *fpstp)
123
{
124
float_status *fpst = fpstp;
125
int compare = float16_compare(a, b, fpst);
126
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_cge_f16)(float16 a, float16 b, void *fpstp)
127
compare == float_relation_equal);
128
}
129
130
-uint32_t HELPER(advsimd_cgt_f16)(float16 a, float16 b, void *fpstp)
131
+uint32_t HELPER(advsimd_cgt_f16)(uint32_t a, uint32_t b, void *fpstp)
132
{
133
float_status *fpst = fpstp;
134
int compare = float16_compare(a, b, fpst);
135
return ADVSIMD_CMPRES(compare == float_relation_greater);
136
}
137
138
-uint32_t HELPER(advsimd_acge_f16)(float16 a, float16 b, void *fpstp)
139
+uint32_t HELPER(advsimd_acge_f16)(uint32_t a, uint32_t b, void *fpstp)
140
{
141
float_status *fpst = fpstp;
142
float16 f0 = float16_abs(a);
143
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_acge_f16)(float16 a, float16 b, void *fpstp)
144
compare == float_relation_equal);
145
}
146
147
-uint32_t HELPER(advsimd_acgt_f16)(float16 a, float16 b, void *fpstp)
148
+uint32_t HELPER(advsimd_acgt_f16)(uint32_t a, uint32_t b, void *fpstp)
149
{
150
float_status *fpst = fpstp;
151
float16 f0 = float16_abs(a);
152
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_acgt_f16)(float16 a, float16 b, void *fpstp)
153
}
154
155
/* round to integral */
156
-float16 HELPER(advsimd_rinth_exact)(float16 x, void *fp_status)
157
+uint32_t HELPER(advsimd_rinth_exact)(uint32_t x, void *fp_status)
158
{
159
return float16_round_to_int(x, fp_status);
160
}
161
162
-float16 HELPER(advsimd_rinth)(float16 x, void *fp_status)
163
+uint32_t HELPER(advsimd_rinth)(uint32_t x, void *fp_status)
164
{
165
int old_flags = get_float_exception_flags(fp_status), new_flags;
166
float16 ret;
167
@@ -XXX,XX +XXX,XX @@ float16 HELPER(advsimd_rinth)(float16 x, void *fp_status)
168
* setting the mode appropriately before calling the helper.
169
*/
170
171
-uint32_t HELPER(advsimd_f16tosinth)(float16 a, void *fpstp)
172
+uint32_t HELPER(advsimd_f16tosinth)(uint32_t a, void *fpstp)
173
{
174
float_status *fpst = fpstp;
175
176
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_f16tosinth)(float16 a, void *fpstp)
177
return float16_to_int16(a, fpst);
178
}
179
180
-uint32_t HELPER(advsimd_f16touinth)(float16 a, void *fpstp)
181
+uint32_t HELPER(advsimd_f16touinth)(uint32_t a, void *fpstp)
182
{
183
float_status *fpst = fpstp;
184
185
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(advsimd_f16touinth)(float16 a, void *fpstp)
186
* Square Root and Reciprocal square root
187
*/
188
189
-float16 HELPER(sqrt_f16)(float16 a, void *fpstp)
190
+uint32_t HELPER(sqrt_f16)(uint32_t a, void *fpstp)
191
{
192
float_status *s = fpstp;
193
194
diff --git a/target/arm/helper.c b/target/arm/helper.c
18
diff --git a/target/arm/helper.c b/target/arm/helper.c
195
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
196
--- a/target/arm/helper.c
20
--- a/target/arm/helper.c
197
+++ b/target/arm/helper.c
21
+++ b/target/arm/helper.c
198
@@ -XXX,XX +XXX,XX @@ DO_VFP_cmp(d, float64)
22
@@ -XXX,XX +XXX,XX @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
199
200
/* Integer to float and float to integer conversions */
201
202
-#define CONV_ITOF(name, fsz, sign) \
203
- float##fsz HELPER(name)(uint32_t x, void *fpstp) \
204
-{ \
205
- float_status *fpst = fpstp; \
206
- return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
207
+#define CONV_ITOF(name, ftype, fsz, sign) \
208
+ftype HELPER(name)(uint32_t x, void *fpstp) \
209
+{ \
210
+ float_status *fpst = fpstp; \
211
+ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
212
}
23
}
213
24
214
-#define CONV_FTOI(name, fsz, sign, round) \
25
#ifdef TARGET_AARCH64
215
-uint32_t HELPER(name)(float##fsz x, void *fpstp) \
26
-static uint64_t tlbi_aa64_range_get_length(CPUARMState *env,
216
-{ \
27
- uint64_t value)
217
- float_status *fpst = fpstp; \
28
-{
218
- if (float##fsz##_is_any_nan(x)) { \
29
- unsigned int page_shift;
219
- float_raise(float_flag_invalid, fpst); \
30
- unsigned int page_size_granule;
220
- return 0; \
31
- uint64_t num;
221
- } \
32
- uint64_t scale;
222
- return float##fsz##_to_##sign##int32##round(x, fpst); \
33
- uint64_t exponent;
223
+#define CONV_FTOI(name, ftype, fsz, sign, round) \
34
+typedef struct {
224
+uint32_t HELPER(name)(ftype x, void *fpstp) \
35
+ uint64_t base;
225
+{ \
36
uint64_t length;
226
+ float_status *fpst = fpstp; \
37
+} TLBIRange;
227
+ if (float##fsz##_is_any_nan(x)) { \
38
+
228
+ float_raise(float_flag_invalid, fpst); \
39
+static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
229
+ return 0; \
40
+ uint64_t value)
230
+ } \
41
+{
231
+ return float##fsz##_to_##sign##int32##round(x, fpst); \
42
+ unsigned int page_size_granule, page_shift, num, scale, exponent;
43
+ TLBIRange ret = { };
44
45
- num = extract64(value, 39, 5);
46
- scale = extract64(value, 44, 2);
47
page_size_granule = extract64(value, 46, 2);
48
49
if (page_size_granule == 0) {
50
qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
51
page_size_granule);
52
- return 0;
53
+ return ret;
54
}
55
56
page_shift = (page_size_granule - 1) * 2 + 12;
57
-
58
+ num = extract64(value, 39, 5);
59
+ scale = extract64(value, 44, 2);
60
exponent = (5 * scale) + 1;
61
- length = (num + 1) << (exponent + page_shift);
62
63
- return length;
64
-}
65
+ ret.length = (num + 1) << (exponent + page_shift);
66
67
-static uint64_t tlbi_aa64_range_get_base(CPUARMState *env, uint64_t value,
68
- bool two_ranges)
69
-{
70
- /* TODO: ARMv8.7 FEAT_LPA2 */
71
- uint64_t pageaddr;
72
-
73
- if (two_ranges) {
74
- pageaddr = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
75
+ if (regime_has_2_ranges(mmuidx)) {
76
+ ret.base = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
77
} else {
78
- pageaddr = extract64(value, 0, 37) << TARGET_PAGE_BITS;
79
+ ret.base = extract64(value, 0, 37) << TARGET_PAGE_BITS;
80
}
81
82
- return pageaddr;
83
+ return ret;
232
}
84
}
233
85
234
-#define FLOAT_CONVS(name, p, fsz, sign) \
86
static void do_rvae_write(CPUARMState *env, uint64_t value,
235
-CONV_ITOF(vfp_##name##to##p, fsz, sign) \
87
int idxmap, bool synced)
236
-CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
237
-CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
238
+#define FLOAT_CONVS(name, p, ftype, fsz, sign) \
239
+ CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
240
+ CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
241
+ CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
242
243
-FLOAT_CONVS(si, h, 16, )
244
-FLOAT_CONVS(si, s, 32, )
245
-FLOAT_CONVS(si, d, 64, )
246
-FLOAT_CONVS(ui, h, 16, u)
247
-FLOAT_CONVS(ui, s, 32, u)
248
-FLOAT_CONVS(ui, d, 64, u)
249
+FLOAT_CONVS(si, h, uint32_t, 16, )
250
+FLOAT_CONVS(si, s, float32, 32, )
251
+FLOAT_CONVS(si, d, float64, 64, )
252
+FLOAT_CONVS(ui, h, uint32_t, 16, u)
253
+FLOAT_CONVS(ui, s, float32, 32, u)
254
+FLOAT_CONVS(ui, d, float64, 64, u)
255
256
#undef CONV_ITOF
257
#undef CONV_FTOI
258
@@ -XXX,XX +XXX,XX @@ static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst)
259
return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst);
260
}
261
262
-float16 HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
263
+uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
264
{
88
{
265
return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst);
89
ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
266
}
90
- bool two_ranges = regime_has_2_ranges(one_idx);
267
91
- uint64_t baseaddr, length;
268
-float16 HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
92
+ TLBIRange range;
269
+uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
93
int bits;
270
{
94
271
return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst);
95
- baseaddr = tlbi_aa64_range_get_base(env, value, two_ranges);
272
}
96
- length = tlbi_aa64_range_get_length(env, value);
273
97
- bits = tlbbits_for_regime(env, one_idx, baseaddr);
274
-float16 HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
98
+ range = tlbi_aa64_get_range(env, one_idx, value);
275
+uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
99
+ bits = tlbbits_for_regime(env, one_idx, range.base);
276
{
100
277
return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst);
101
if (synced) {
278
}
102
tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
279
103
- baseaddr,
280
-float16 HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
104
- length,
281
+uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
105
+ range.base,
282
{
106
+ range.length,
283
return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst);
107
idxmap,
284
}
108
bits);
285
@@ -XXX,XX +XXX,XX @@ static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst)
109
} else {
110
- tlb_flush_range_by_mmuidx(env_cpu(env), baseaddr,
111
- length, idxmap, bits);
112
+ tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
113
+ range.length, idxmap, bits);
286
}
114
}
287
}
115
}
288
116
289
-uint32_t HELPER(vfp_toshh)(float16 x, uint32_t shift, void *fpst)
290
+uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
291
{
292
return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst);
293
}
294
295
-uint32_t HELPER(vfp_touhh)(float16 x, uint32_t shift, void *fpst)
296
+uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
297
{
298
return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst);
299
}
300
301
-uint32_t HELPER(vfp_toslh)(float16 x, uint32_t shift, void *fpst)
302
+uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
303
{
304
return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst);
305
}
306
307
-uint32_t HELPER(vfp_toulh)(float16 x, uint32_t shift, void *fpst)
308
+uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
309
{
310
return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst);
311
}
312
313
-uint64_t HELPER(vfp_tosqh)(float16 x, uint32_t shift, void *fpst)
314
+uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
315
{
316
return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst);
317
}
318
319
-uint64_t HELPER(vfp_touqh)(float16 x, uint32_t shift, void *fpst)
320
+uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
321
{
322
return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst);
323
}
324
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
325
}
326
327
/* Half precision conversions. */
328
-float32 HELPER(vfp_fcvt_f16_to_f32)(float16 a, void *fpstp, uint32_t ahp_mode)
329
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
330
{
331
/* Squash FZ16 to 0 for the duration of conversion. In this case,
332
* it would affect flushing input denormals.
333
@@ -XXX,XX +XXX,XX @@ float32 HELPER(vfp_fcvt_f16_to_f32)(float16 a, void *fpstp, uint32_t ahp_mode)
334
return r;
335
}
336
337
-float16 HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
338
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
339
{
340
/* Squash FZ16 to 0 for the duration of conversion. In this case,
341
* it would affect flushing output denormals.
342
@@ -XXX,XX +XXX,XX @@ float16 HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
343
return r;
344
}
345
346
-float64 HELPER(vfp_fcvt_f16_to_f64)(float16 a, void *fpstp, uint32_t ahp_mode)
347
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
348
{
349
/* Squash FZ16 to 0 for the duration of conversion. In this case,
350
* it would affect flushing input denormals.
351
@@ -XXX,XX +XXX,XX @@ float64 HELPER(vfp_fcvt_f16_to_f64)(float16 a, void *fpstp, uint32_t ahp_mode)
352
return r;
353
}
354
355
-float16 HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
356
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
357
{
358
/* Squash FZ16 to 0 for the duration of conversion. In this case,
359
* it would affect flushing output denormals.
360
@@ -XXX,XX +XXX,XX @@ static bool round_to_inf(float_status *fpst, bool sign_bit)
361
g_assert_not_reached();
362
}
363
364
-float16 HELPER(recpe_f16)(float16 input, void *fpstp)
365
+uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
366
{
367
float_status *fpst = fpstp;
368
float16 f16 = float16_squash_input_denormal(input, fpst);
369
@@ -XXX,XX +XXX,XX @@ static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
370
return extract64(estimate, 0, 8) << 44;
371
}
372
373
-float16 HELPER(rsqrte_f16)(float16 input, void *fpstp)
374
+uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
375
{
376
float_status *s = fpstp;
377
float16 f16 = float16_squash_input_denormal(input, s);
378
--
117
--
379
2.17.1
118
2.25.1
380
381
diff view generated by jsdifflib
1
In commit f0aff255700 we made cpacr_write() enforce that some CPACR
1
From: Richard Henderson <richard.henderson@linaro.org>
2
bits are RAZ/WI and some are RAO/WI for ARMv7 cores. Unfortunately
3
we forgot to also update the register's reset value. The effect
4
was that (a) a guest that read CPACR on reset would not see ones in
5
the RAO bits, and (b) if you did a migration before the guest did
6
a write to the CPACR then the migration would fail because the
7
destination would enforce the RAO bits and then complain that they
8
didn't match the zero value from the source.
9
2
10
Implement reset for the CPACR using a custom reset function
3
The shift of the BaseADDR field depends on the translation
11
that just calls cpacr_write(), to avoid having to duplicate
4
granule in use.
12
the logic for which bits are RAO.
13
5
14
This bug would affect migration for TCG CPUs which are ARMv7
6
Fixes: 84940ed8255 ("target/arm: Add support for FEAT_TLBIRANGE")
15
with VFP but without one of Neon or VFPv3.
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
16
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
17
Reported-by: Cédric Le Goater <clg@kaod.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220301215958.157011-14-richard.henderson@linaro.org
18
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
19
Tested-by: Cédric Le Goater <clg@kaod.org>
20
Message-id: 20180522173713.26282-1-peter.maydell@linaro.org
21
---
12
---
22
target/arm/helper.c | 10 +++++++++-
13
target/arm/helper.c | 5 +++--
23
1 file changed, 9 insertions(+), 1 deletion(-)
14
1 file changed, 3 insertions(+), 2 deletions(-)
24
15
25
diff --git a/target/arm/helper.c b/target/arm/helper.c
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
26
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
27
--- a/target/arm/helper.c
18
--- a/target/arm/helper.c
28
+++ b/target/arm/helper.c
19
+++ b/target/arm/helper.c
29
@@ -XXX,XX +XXX,XX @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
20
@@ -XXX,XX +XXX,XX @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
30
env->cp15.cpacr_el1 = value;
21
ret.length = (num + 1) << (exponent + page_shift);
22
23
if (regime_has_2_ranges(mmuidx)) {
24
- ret.base = sextract64(value, 0, 37) << TARGET_PAGE_BITS;
25
+ ret.base = sextract64(value, 0, 37);
26
} else {
27
- ret.base = extract64(value, 0, 37) << TARGET_PAGE_BITS;
28
+ ret.base = extract64(value, 0, 37);
29
}
30
+ ret.base <<= page_shift;
31
32
return ret;
31
}
33
}
32
33
+static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
34
+{
35
+ /* Call cpacr_write() so that we reset with the correct RAO bits set
36
+ * for our CPU features.
37
+ */
38
+ cpacr_write(env, ri, 0);
39
+}
40
+
41
static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
42
bool isread)
43
{
44
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
45
{ .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
46
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
47
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
48
- .resetvalue = 0, .writefn = cpacr_write },
49
+ .resetfn = cpacr_reset, .writefn = cpacr_write },
50
REGINFO_SENTINEL
51
};
52
53
--
34
--
54
2.17.1
35
2.25.1
55
56
diff view generated by jsdifflib
1
Add entries to MAINTAINERS to cover the newer MPS2 boards and
1
From: Richard Henderson <richard.henderson@linaro.org>
2
the new devices they use.
3
2
3
For FEAT_LPA2, we will need other ARMVAParameters, which themselves
4
depend on the translation granule in use. We might as well validate
5
that the given TG matches; the architecture "does not require that
6
the instruction invalidates any entries" if this is not true.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20220301215958.157011-15-richard.henderson@linaro.org
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
5
Message-id: 20180518153157.14899-1-peter.maydell@linaro.org
6
---
12
---
7
MAINTAINERS | 9 +++++++--
13
target/arm/helper.c | 10 +++++++---
8
1 file changed, 7 insertions(+), 2 deletions(-)
14
1 file changed, 7 insertions(+), 3 deletions(-)
9
15
10
diff --git a/MAINTAINERS b/MAINTAINERS
16
diff --git a/target/arm/helper.c b/target/arm/helper.c
11
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
12
--- a/MAINTAINERS
18
--- a/target/arm/helper.c
13
+++ b/MAINTAINERS
19
+++ b/target/arm/helper.c
14
@@ -XXX,XX +XXX,XX @@ F: hw/timer/cmsdk-apb-timer.c
20
@@ -XXX,XX +XXX,XX @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
15
F: include/hw/timer/cmsdk-apb-timer.h
21
uint64_t value)
16
F: hw/char/cmsdk-apb-uart.c
22
{
17
F: include/hw/char/cmsdk-apb-uart.h
23
unsigned int page_size_granule, page_shift, num, scale, exponent;
18
+F: hw/misc/tz-ppc.c
24
+ /* Extract one bit to represent the va selector in use. */
19
+F: include/hw/misc/tz-ppc.h
25
+ uint64_t select = sextract64(value, 36, 1);
20
26
+ ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
21
ARM cores
27
TLBIRange ret = { };
22
M: Peter Maydell <peter.maydell@linaro.org>
28
23
@@ -XXX,XX +XXX,XX @@ M: Peter Maydell <peter.maydell@linaro.org>
29
page_size_granule = extract64(value, 46, 2);
24
L: qemu-arm@nongnu.org
30
25
S: Maintained
31
- if (page_size_granule == 0) {
26
F: hw/arm/mps2.c
32
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid page size granule %d\n",
27
-F: hw/misc/mps2-scc.c
33
+ /* The granule encoded in value must match the granule in use. */
28
-F: include/hw/misc/mps2-scc.h
34
+ if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
29
+F: hw/arm/mps2-tz.c
35
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
30
+F: hw/misc/mps2-*.c
36
page_size_granule);
31
+F: include/hw/misc/mps2-*.h
37
return ret;
32
+F: hw/arm/iotkit.c
38
}
33
+F: include/hw/arm/iotkit.h
39
@@ -XXX,XX +XXX,XX @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
34
40
35
Musicpal
41
ret.length = (num + 1) << (exponent + page_shift);
36
M: Jan Kiszka <jan.kiszka@web.de>
42
43
- if (regime_has_2_ranges(mmuidx)) {
44
+ if (param.select) {
45
ret.base = sextract64(value, 0, 37);
46
} else {
47
ret.base = extract64(value, 0, 37);
37
--
48
--
38
2.17.1
49
2.25.1
39
40
diff view generated by jsdifflib
New patch
1
From: Richard Henderson <richard.henderson@linaro.org>
1
2
3
We support 16k pages, but do not advertize that in ID_AA64MMFR0.
4
5
The value 0 in the TGRAN*_2 fields indicates that stage2 lookups defer
6
to the same support as stage1 lookups. This setting is deprecated, so
7
indicate support for all stage2 page sizes directly.
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
11
Message-id: 20220301215958.157011-16-richard.henderson@linaro.org
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
target/arm/cpu64.c | 4 ++++
15
1 file changed, 4 insertions(+)
16
17
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/cpu64.c
20
+++ b/target/arm/cpu64.c
21
@@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj)
22
23
t = cpu->isar.id_aa64mmfr0;
24
t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
25
+ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */
26
+ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
27
+ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN64_2, 2); /* 64k stage2 supported */
28
+ t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */
29
cpu->isar.id_aa64mmfr0 = t;
30
31
t = cpu->isar.id_aa64mmfr1;
32
--
33
2.25.1
diff view generated by jsdifflib
1
From: Francisco Iglesias <frasse.iglesias@gmail.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
Coverity found that the string return by 'object_get_canonical_path' was not
3
This feature widens physical addresses (and intermediate physical
4
being freed at two locations in the model (CID 1391294 and CID 1391293) and
4
addresses for 2-stage translation) from 48 to 52 bits, when using
5
also that a memset was being called with a value greater than the max of a byte
5
4k or 16k pages.
6
on the second argument (CID 1391286). This patch corrects this by adding the
6
7
freeing of the strings and also changing to memset to zero instead on
7
This introduces the DS bit to TCR_ELx, which is RES0 unless the
8
descriptor unaligned errors.
8
page size is enabled and supports LPA2, resulting in the effective
9
9
value of DS for a given table walk. The DS bit changes the format
10
Signed-off-by: Francisco Iglesias <frasse.iglesias@gmail.com>
10
of the page table descriptor slightly, moving the PS field out to
11
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
11
TCR so that all pages have the same sharability and repurposing
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
those bits of the page table descriptor for the highest bits of
13
Message-id: 20180528184859.3530-1-frasse.iglesias@gmail.com
13
the output address.
14
15
Do not yet enable FEAT_LPA2; we need extra plumbing to avoid
16
tickling an old kernel bug.
17
14
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
18
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-id: 20220301215958.157011-17-richard.henderson@linaro.org
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
---
22
---
17
hw/dma/xlnx-zdma.c | 10 +++++++---
23
docs/system/arm/emulation.rst | 1 +
18
1 file changed, 7 insertions(+), 3 deletions(-)
24
target/arm/cpu.h | 22 ++++++++
19
25
target/arm/internals.h | 2 +
20
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
26
target/arm/helper.c | 102 +++++++++++++++++++++++++++++-----
27
4 files changed, 112 insertions(+), 15 deletions(-)
28
29
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
21
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/dma/xlnx-zdma.c
31
--- a/docs/system/arm/emulation.rst
23
+++ b/hw/dma/xlnx-zdma.c
32
+++ b/docs/system/arm/emulation.rst
24
@@ -XXX,XX +XXX,XX @@ static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
33
@@ -XXX,XX +XXX,XX @@ the following architecture extensions:
25
qemu_log_mask(LOG_GUEST_ERROR,
34
- FEAT_JSCVT (JavaScript conversion instructions)
26
"zdma: unaligned descriptor at %" PRIx64,
35
- FEAT_LOR (Limited ordering regions)
27
addr);
36
- FEAT_LPA (Large Physical Address space)
28
- memset(buf, 0xdeadbeef, sizeof(XlnxZDMADescr));
37
+- FEAT_LPA2 (Large Physical and virtual Address space v2)
29
+ memset(buf, 0x0, sizeof(XlnxZDMADescr));
38
- FEAT_LRCPC (Load-acquire RCpc instructions)
30
s->error = true;
39
- FEAT_LRCPC2 (Load-acquire RCpc instructions v2)
40
- FEAT_LSE (Large System Extensions)
41
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/cpu.h
44
+++ b/target/arm/cpu.h
45
@@ -XXX,XX +XXX,XX @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
46
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
47
}
48
49
+static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
50
+{
51
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1;
52
+}
53
+
54
+static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id)
55
+{
56
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
57
+ return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id));
58
+}
59
+
60
+static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id)
61
+{
62
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2;
63
+}
64
+
65
+static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
66
+{
67
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
68
+ return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
69
+}
70
+
71
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
72
{
73
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
74
diff --git a/target/arm/internals.h b/target/arm/internals.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/arm/internals.h
77
+++ b/target/arm/internals.h
78
@@ -XXX,XX +XXX,XX @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
79
typedef struct ARMVAParameters {
80
unsigned tsz : 8;
81
unsigned ps : 3;
82
+ unsigned sh : 2;
83
unsigned select : 1;
84
bool tbi : 1;
85
bool epd : 1;
86
@@ -XXX,XX +XXX,XX @@ typedef struct ARMVAParameters {
87
bool using16k : 1;
88
bool using64k : 1;
89
bool tsz_oob : 1; /* tsz has been clamped to legal range */
90
+ bool ds : 1;
91
} ARMVAParameters;
92
93
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
94
diff --git a/target/arm/helper.c b/target/arm/helper.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/arm/helper.c
97
+++ b/target/arm/helper.c
98
@@ -XXX,XX +XXX,XX @@ static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
99
} else {
100
ret.base = extract64(value, 0, 37);
101
}
102
+ if (param.ds) {
103
+ /*
104
+ * With DS=1, BaseADDR is always shifted 16 so that it is able
105
+ * to address all 52 va bits. The input address is perforce
106
+ * aligned on a 64k boundary regardless of translation granule.
107
+ */
108
+ page_shift = 16;
109
+ }
110
ret.base <<= page_shift;
111
112
return ret;
113
@@ -XXX,XX +XXX,XX @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
114
const int grainsize = stride + 3;
115
int startsizecheck;
116
117
- /* Negative levels are never allowed. */
118
- if (level < 0) {
119
+ /*
120
+ * Negative levels are usually not allowed...
121
+ * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
122
+ * begins with level -1. Note that previous feature tests will have
123
+ * eliminated this combination if it is not enabled.
124
+ */
125
+ if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
31
return false;
126
return false;
32
}
127
}
33
@@ -XXX,XX +XXX,XX @@ static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
128
34
RegisterInfo *r = &s->regs_info[addr / 4];
129
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
35
130
ARMMMUIdx mmu_idx, bool data)
36
if (!r->data) {
131
{
37
+ gchar *path = object_get_canonical_path(OBJECT(s));
132
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
38
qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
133
- bool epd, hpd, using16k, using64k, tsz_oob;
39
- object_get_canonical_path(OBJECT(s)),
134
- int select, tsz, tbi, max_tsz, min_tsz, ps;
40
+ path,
135
+ bool epd, hpd, using16k, using64k, tsz_oob, ds;
41
addr);
136
+ int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
42
+ g_free(path);
137
+ ARMCPU *cpu = env_archcpu(env);
43
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
138
44
zdma_ch_imr_update_irq(s);
139
if (!regime_has_2_ranges(mmu_idx)) {
45
return 0;
140
select = 0;
46
@@ -XXX,XX +XXX,XX @@ static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
141
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
47
RegisterInfo *r = &s->regs_info[addr / 4];
142
hpd = extract32(tcr, 24, 1);
48
143
}
49
if (!r->data) {
144
epd = false;
50
+ gchar *path = object_get_canonical_path(OBJECT(s));
145
+ sh = extract32(tcr, 12, 2);
51
qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
146
ps = extract32(tcr, 16, 3);
52
- object_get_canonical_path(OBJECT(s)),
147
+ ds = extract64(tcr, 32, 1);
53
+ path,
148
} else {
54
addr, value);
149
/*
55
+ g_free(path);
150
* Bit 55 is always between the two regions, and is canonical for
56
ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
151
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
57
zdma_ch_imr_update_irq(s);
152
if (!select) {
58
return;
153
tsz = extract32(tcr, 0, 6);
154
epd = extract32(tcr, 7, 1);
155
+ sh = extract32(tcr, 12, 2);
156
using64k = extract32(tcr, 14, 1);
157
using16k = extract32(tcr, 15, 1);
158
hpd = extract64(tcr, 41, 1);
159
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
160
using64k = tg == 3;
161
tsz = extract32(tcr, 16, 6);
162
epd = extract32(tcr, 23, 1);
163
+ sh = extract32(tcr, 28, 2);
164
hpd = extract64(tcr, 42, 1);
165
}
166
ps = extract64(tcr, 32, 3);
167
+ ds = extract64(tcr, 59, 1);
168
}
169
170
- if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
171
+ if (cpu_isar_feature(aa64_st, cpu)) {
172
max_tsz = 48 - using64k;
173
} else {
174
max_tsz = 39;
175
}
176
177
+ /*
178
+ * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
179
+ * adjust the effective value of DS, as documented.
180
+ */
181
min_tsz = 16;
182
if (using64k) {
183
- if (cpu_isar_feature(aa64_lva, env_archcpu(env))) {
184
+ if (cpu_isar_feature(aa64_lva, cpu)) {
185
+ min_tsz = 12;
186
+ }
187
+ ds = false;
188
+ } else if (ds) {
189
+ switch (mmu_idx) {
190
+ case ARMMMUIdx_Stage2:
191
+ case ARMMMUIdx_Stage2_S:
192
+ if (using16k) {
193
+ ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
194
+ } else {
195
+ ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
196
+ }
197
+ break;
198
+ default:
199
+ if (using16k) {
200
+ ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
201
+ } else {
202
+ ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
203
+ }
204
+ break;
205
+ }
206
+ if (ds) {
207
min_tsz = 12;
208
}
209
}
210
- /* TODO: FEAT_LPA2 */
211
212
if (tsz > max_tsz) {
213
tsz = max_tsz;
214
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
215
return (ARMVAParameters) {
216
.tsz = tsz,
217
.ps = ps,
218
+ .sh = sh,
219
.select = select,
220
.tbi = tbi,
221
.epd = epd,
222
@@ -XXX,XX +XXX,XX @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
223
.using16k = using16k,
224
.using64k = using64k,
225
.tsz_oob = tsz_oob,
226
+ .ds = ds,
227
};
228
}
229
230
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
231
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
232
*/
233
uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
234
+ uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
235
uint32_t startlevel;
236
bool ok;
237
238
- if (!aarch64 || stride == 9) {
239
+ /* SL2 is RES0 unless DS=1 & 4kb granule. */
240
+ if (param.ds && stride == 9 && sl2) {
241
+ if (sl0 != 0) {
242
+ level = 0;
243
+ fault_type = ARMFault_Translation;
244
+ goto do_fault;
245
+ }
246
+ startlevel = -1;
247
+ } else if (!aarch64 || stride == 9) {
248
/* AArch32 or 4KB pages */
249
startlevel = 2 - sl0;
250
251
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
252
* for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
253
* or an AddressSize fault is raised. So for v8 we extract those SBZ
254
* bits as part of the address, which will be checked via outputsize.
255
- * For AArch64, the address field always goes up to bit 47 (with extra
256
- * bits for FEAT_LPA placed elsewhere). AArch64 implies v8.
257
+ * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
258
+ * the highest bits of a 52-bit output are placed elsewhere.
259
*/
260
- if (arm_feature(env, ARM_FEATURE_V8)) {
261
+ if (param.ds) {
262
+ descaddrmask = MAKE_64BIT_MASK(0, 50);
263
+ } else if (arm_feature(env, ARM_FEATURE_V8)) {
264
descaddrmask = MAKE_64BIT_MASK(0, 48);
265
} else {
266
descaddrmask = MAKE_64BIT_MASK(0, 40);
267
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
268
269
/*
270
* For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
271
- * of descriptor. Otherwise, if descaddr is out of range, raise
272
- * AddressSizeFault.
273
+ * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of
274
+ * descaddr are in [9:8]. Otherwise, if descaddr is out of range,
275
+ * raise AddressSizeFault.
276
*/
277
if (outputsize > 48) {
278
- descaddr |= extract64(descriptor, 12, 4) << 48;
279
+ if (param.ds) {
280
+ descaddr |= extract64(descriptor, 8, 2) << 50;
281
+ } else {
282
+ descaddr |= extract64(descriptor, 12, 4) << 48;
283
+ }
284
} else if (descaddr >> outputsize) {
285
fault_type = ARMFault_AddressSize;
286
goto do_fault;
287
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
288
assert(attrindx <= 7);
289
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
290
}
291
- cacheattrs->shareability = extract32(attrs, 6, 2);
292
+
293
+ /*
294
+ * For FEAT_LPA2 and effective DS, the SH field in the attributes
295
+ * was re-purposed for output address bits. The SH attribute in
296
+ * that case comes from TCR_ELx, which we extracted earlier.
297
+ */
298
+ if (param.ds) {
299
+ cacheattrs->shareability = param.sh;
300
+ } else {
301
+ cacheattrs->shareability = extract32(attrs, 6, 2);
302
+ }
303
304
*phys_ptr = descaddr;
305
*page_size_ptr = page_size;
59
--
306
--
60
2.17.1
307
2.25.1
61
62
diff view generated by jsdifflib
1
The FRECPX instructions should (like most other floating point operations)
1
When we're using KVM, the PSCI implementation is provided by the
2
honour the FPCR.FZ bit which specifies whether input denormals should
2
kernel, but QEMU has to tell the guest about it via the device tree.
3
be flushed to zero (or FZ16 for the half-precision version).
3
Currently we look at the KVM_CAP_ARM_PSCI_0_2 capability to determine
4
We forgot to implement this, which doesn't affect the results (since
4
if the kernel is providing at least PSCI 0.2, but if the kernel
5
the calculation doesn't actually care about the mantissa bits) but did
5
provides a newer version than that we will still only tell the guest
6
mean we were failing to set the FPSR.IDC bit.
6
it has PSCI 0.2. (This is fairly harmless; it just means the guest
7
won't use newer parts of the PSCI API.)
8
9
The kernel exposes the specific PSCI version it is implementing via
10
the ONE_REG API; use this to report in the dtb that the PSCI
11
implementation is 1.0-compatible if appropriate. (The device tree
12
binding currently only distinguishes "pre-0.2", "0.2-compatible" and
13
"1.0-compatible".)
7
14
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
16
Reviewed-by: Marc Zyngier <maz@kernel.org>
17
Reviewed-by: Akihiko Odaki <akihiko.odaki@gmail.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 20180521172712.19930-1-peter.maydell@linaro.org
19
Reviewed-by: Andrew Jones <drjones@redhat.com>
20
Message-id: 20220224134655.1207865-1-peter.maydell@linaro.org
11
---
21
---
12
target/arm/helper-a64.c | 6 ++++++
22
target/arm/kvm-consts.h | 1 +
13
1 file changed, 6 insertions(+)
23
hw/arm/boot.c | 5 ++---
24
target/arm/kvm64.c | 12 ++++++++++++
25
3 files changed, 15 insertions(+), 3 deletions(-)
14
26
15
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
27
diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h
16
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/helper-a64.c
29
--- a/target/arm/kvm-consts.h
18
+++ b/target/arm/helper-a64.c
30
+++ b/target/arm/kvm-consts.h
19
@@ -XXX,XX +XXX,XX @@ float16 HELPER(frecpx_f16)(float16 a, void *fpstp)
31
@@ -XXX,XX +XXX,XX @@ MISMATCH_CHECK(QEMU_PSCI_1_0_FN_PSCI_FEATURES, PSCI_1_0_FN_PSCI_FEATURES);
20
return nan;
32
33
#define QEMU_PSCI_VERSION_0_1 0x00001
34
#define QEMU_PSCI_VERSION_0_2 0x00002
35
+#define QEMU_PSCI_VERSION_1_0 0x10000
36
#define QEMU_PSCI_VERSION_1_1 0x10001
37
38
MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP);
39
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/arm/boot.c
42
+++ b/hw/arm/boot.c
43
@@ -XXX,XX +XXX,XX @@ static void fdt_add_psci_node(void *fdt)
21
}
44
}
22
45
23
+ a = float16_squash_input_denormal(a, fpst);
46
qemu_fdt_add_subnode(fdt, "/psci");
47
- if (armcpu->psci_version == QEMU_PSCI_VERSION_0_2 ||
48
- armcpu->psci_version == QEMU_PSCI_VERSION_1_1) {
49
- if (armcpu->psci_version == QEMU_PSCI_VERSION_0_2) {
50
+ if (armcpu->psci_version >= QEMU_PSCI_VERSION_0_2) {
51
+ if (armcpu->psci_version < QEMU_PSCI_VERSION_1_0) {
52
const char comp[] = "arm,psci-0.2\0arm,psci";
53
qemu_fdt_setprop(fdt, "/psci", "compatible", comp, sizeof(comp));
54
} else {
55
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/arm/kvm64.c
58
+++ b/target/arm/kvm64.c
59
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
60
uint64_t mpidr;
61
ARMCPU *cpu = ARM_CPU(cs);
62
CPUARMState *env = &cpu->env;
63
+ uint64_t psciver;
64
65
if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
66
!object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
67
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
68
}
69
}
70
71
+ /*
72
+ * KVM reports the exact PSCI version it is implementing via a
73
+ * special sysreg. If it is present, use its contents to determine
74
+ * what to report to the guest in the dtb (it is the PSCI version,
75
+ * in the same 15-bits major 16-bits minor format that PSCI_VERSION
76
+ * returns).
77
+ */
78
+ if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) {
79
+ cpu->psci_version = psciver;
80
+ }
24
+
81
+
25
val16 = float16_val(a);
82
/*
26
sbit = 0x8000 & val16;
83
* When KVM is in use, PSCI is emulated in-kernel and not by qemu.
27
exp = extract32(val16, 10, 5);
84
* Currently KVM has its own idea about MPIDR assignment, so we
28
@@ -XXX,XX +XXX,XX @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
29
return nan;
30
}
31
32
+ a = float32_squash_input_denormal(a, fpst);
33
+
34
val32 = float32_val(a);
35
sbit = 0x80000000ULL & val32;
36
exp = extract32(val32, 23, 8);
37
@@ -XXX,XX +XXX,XX @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
38
return nan;
39
}
40
41
+ a = float64_squash_input_denormal(a, fpst);
42
+
43
val64 = float64_val(a);
44
sbit = 0x8000000000000000ULL & val64;
45
exp = extract64(float64_val(a), 52, 11);
46
--
85
--
47
2.17.1
86
2.25.1
48
49
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
The updateUIInfo method makes Cocoa API calls. It also calls back
2
add MemTxAttrs as an argument to flatview_access_valid().
2
into QEMU functions like dpy_set_ui_info(). To do this safely, we
3
Its callers now all have an attrs value to hand, so we can
3
need to follow two rules:
4
correct our earlier temporary use of MEMTXATTRS_UNSPECIFIED.
4
* Cocoa API calls are made on the Cocoa UI thread
5
* When calling back into QEMU we must hold the iothread lock
6
7
Fix the places where we got this wrong, by taking the iothread lock
8
while executing updateUIInfo, and moving the call in cocoa_switch()
9
inside the dispatch_async block.
10
11
Some of the Cocoa UI methods which call updateUIInfo are invoked as
12
part of the initial application startup, while we're still doing the
13
little cross-thread dance described in the comment just above
14
call_qemu_main(). This meant they were calling back into the QEMU UI
15
layer before we'd actually finished initializing our display and
16
registered the DisplayChangeListener, which isn't really valid. Once
17
updateUIInfo takes the iothread lock, we no longer get away with
18
this, because during this startup phase the iothread lock is held by
19
the QEMU main-loop thread which is waiting for us to finish our
20
display initialization. So we must suppress updateUIInfo until
21
applicationDidFinishLaunching allows the QEMU main-loop thread to
22
continue.
5
23
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
25
Reviewed-by: Akihiko Odaki <akihiko.odaki@gmail.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Tested-by: Akihiko Odaki <akihiko.odaki@gmail.com>
9
Message-id: 20180521140402.23318-10-peter.maydell@linaro.org
27
Message-id: 20220224101330.967429-2-peter.maydell@linaro.org
10
---
28
---
11
exec.c | 12 +++++-------
29
ui/cocoa.m | 25 ++++++++++++++++++++++---
12
1 file changed, 5 insertions(+), 7 deletions(-)
30
1 file changed, 22 insertions(+), 3 deletions(-)
13
31
14
diff --git a/exec.c b/exec.c
32
diff --git a/ui/cocoa.m b/ui/cocoa.m
15
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
16
--- a/exec.c
34
--- a/ui/cocoa.m
17
+++ b/exec.c
35
+++ b/ui/cocoa.m
18
@@ -XXX,XX +XXX,XX @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
36
@@ -XXX,XX +XXX,XX @@ QemuCocoaView *cocoaView;
19
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
37
}
20
const uint8_t *buf, int len);
21
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
22
- bool is_write);
23
+ bool is_write, MemTxAttrs attrs);
24
25
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
26
unsigned len, MemTxAttrs attrs)
27
@@ -XXX,XX +XXX,XX @@ static bool subpage_accepts(void *opaque, hwaddr addr,
28
#endif
29
30
return flatview_access_valid(subpage->fv, addr + subpage->base,
31
- len, is_write);
32
+ len, is_write, attrs);
33
}
38
}
34
39
35
static const MemoryRegionOps subpage_ops = {
40
-- (void) updateUIInfo
36
@@ -XXX,XX +XXX,XX @@ static void cpu_notify_map_clients(void)
41
+- (void) updateUIInfoLocked
42
{
43
+ /* Must be called with the iothread lock, i.e. via updateUIInfo */
44
NSSize frameSize;
45
QemuUIInfo info;
46
47
@@ -XXX,XX +XXX,XX @@ QemuCocoaView *cocoaView;
48
dpy_set_ui_info(dcl.con, &info, TRUE);
37
}
49
}
38
50
39
static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
51
+- (void) updateUIInfo
40
- bool is_write)
52
+{
41
+ bool is_write, MemTxAttrs attrs)
53
+ if (!allow_events) {
54
+ /*
55
+ * Don't try to tell QEMU about UI information in the application
56
+ * startup phase -- we haven't yet registered dcl with the QEMU UI
57
+ * layer, and also trying to take the iothread lock would deadlock.
58
+ * When cocoa_display_init() does register the dcl, the UI layer
59
+ * will call cocoa_switch(), which will call updateUIInfo, so
60
+ * we don't lose any information here.
61
+ */
62
+ return;
63
+ }
64
+
65
+ with_iothread_lock(^{
66
+ [self updateUIInfoLocked];
67
+ });
68
+}
69
+
70
- (void)viewDidMoveToWindow
42
{
71
{
43
MemoryRegion *mr;
72
[self updateUIInfo];
44
hwaddr l, xlat;
73
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
45
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
74
46
mr = flatview_translate(fv, addr, &xlat, &l, is_write);
75
COCOA_DEBUG("qemu_cocoa: cocoa_switch\n");
47
if (!memory_access_is_direct(mr, is_write)) {
76
48
l = memory_access_size(mr, l, addr);
77
- [cocoaView updateUIInfo];
49
- /* When our callers all have attrs we'll pass them through here */
78
-
50
- if (!memory_region_access_valid(mr, xlat, l, is_write,
79
// The DisplaySurface will be freed as soon as this callback returns.
51
- MEMTXATTRS_UNSPECIFIED)) {
80
// We take a reference to the underlying pixman image here so it does
52
+ if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) {
81
// not disappear from under our feet; the switchSurface method will
53
return false;
82
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
54
}
83
pixman_image_ref(image);
55
}
84
56
@@ -XXX,XX +XXX,XX @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
85
dispatch_async(dispatch_get_main_queue(), ^{
57
86
+ [cocoaView updateUIInfo];
58
rcu_read_lock();
87
[cocoaView switchSurface:image];
59
fv = address_space_to_flatview(as);
88
});
60
- result = flatview_access_valid(fv, addr, len, is_write);
89
[pool release];
61
+ result = flatview_access_valid(fv, addr, len, is_write, attrs);
62
rcu_read_unlock();
63
return result;
64
}
65
--
90
--
66
2.17.1
91
2.25.1
67
68
diff view generated by jsdifflib
1
As part of plumbing MemTxAttrs down to the IOMMU translate method,
1
In commit 6e657e64cdc478 in 2013 we added some autorelease pools to
2
add MemTxAttrs as an argument to address_space_access_valid().
2
deal with complaints from macOS when we made calls into Cocoa from
3
Its callers either have an attrs value to hand, or don't care
3
threads that didn't have automatically created autorelease pools.
4
and can use MEMTXATTRS_UNSPECIFIED.
4
Later on, macOS got stricter about forbidding cross-thread Cocoa
5
calls, and in commit 5588840ff77800e839d8 we restructured the code to
6
avoid them. This left the autorelease pool creation in several
7
functions without any purpose; delete it.
8
9
We still need the pool in cocoa_refresh() for the clipboard related
10
code which is called directly there.
5
11
6
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Akihiko Odaki <akihiko.odaki@gmail.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Tested-by: Akihiko Odaki <akihiko.odaki@gmail.com>
9
Message-id: 20180521140402.23318-6-peter.maydell@linaro.org
15
Message-id: 20220224101330.967429-3-peter.maydell@linaro.org
10
---
16
---
11
include/exec/memory.h | 4 +++-
17
ui/cocoa.m | 6 ------
12
include/sysemu/dma.h | 3 ++-
18
1 file changed, 6 deletions(-)
13
exec.c | 3 ++-
14
target/s390x/diag.c | 6 ++++--
15
target/s390x/excp_helper.c | 3 ++-
16
target/s390x/mmu_helper.c | 3 ++-
17
target/s390x/sigp.c | 3 ++-
18
7 files changed, 17 insertions(+), 8 deletions(-)
19
19
20
diff --git a/include/exec/memory.h b/include/exec/memory.h
20
diff --git a/ui/cocoa.m b/ui/cocoa.m
21
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/memory.h
22
--- a/ui/cocoa.m
23
+++ b/include/exec/memory.h
23
+++ b/ui/cocoa.m
24
@@ -XXX,XX +XXX,XX @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
24
@@ -XXX,XX +XXX,XX @@ int main (int argc, char **argv) {
25
* @addr: address within that address space
25
static void cocoa_update(DisplayChangeListener *dcl,
26
* @len: length of the area to be checked
26
int x, int y, int w, int h)
27
* @is_write: indicates the transfer direction
28
+ * @attrs: memory attributes
29
*/
30
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
31
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len,
32
+ bool is_write, MemTxAttrs attrs);
33
34
/* address_space_map: map a physical memory region into a host virtual address
35
*
36
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/sysemu/dma.h
39
+++ b/include/sysemu/dma.h
40
@@ -XXX,XX +XXX,XX @@ static inline bool dma_memory_valid(AddressSpace *as,
41
DMADirection dir)
42
{
27
{
43
return address_space_access_valid(as, addr, len,
28
- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
44
- dir == DMA_DIRECTION_FROM_DEVICE);
29
-
45
+ dir == DMA_DIRECTION_FROM_DEVICE,
30
COCOA_DEBUG("qemu_cocoa: cocoa_update\n");
46
+ MEMTXATTRS_UNSPECIFIED);
31
32
dispatch_async(dispatch_get_main_queue(), ^{
33
@@ -XXX,XX +XXX,XX @@ static void cocoa_update(DisplayChangeListener *dcl,
34
}
35
[cocoaView setNeedsDisplayInRect:rect];
36
});
37
-
38
- [pool release];
47
}
39
}
48
40
49
static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr,
41
static void cocoa_switch(DisplayChangeListener *dcl,
50
diff --git a/exec.c b/exec.c
42
DisplaySurface *surface)
51
index XXXXXXX..XXXXXXX 100644
43
{
52
--- a/exec.c
44
- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
53
+++ b/exec.c
45
pixman_image_t *image = surface->image;
54
@@ -XXX,XX +XXX,XX @@ static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
46
47
COCOA_DEBUG("qemu_cocoa: cocoa_switch\n");
48
@@ -XXX,XX +XXX,XX @@ static void cocoa_switch(DisplayChangeListener *dcl,
49
[cocoaView updateUIInfo];
50
[cocoaView switchSurface:image];
51
});
52
- [pool release];
55
}
53
}
56
54
57
bool address_space_access_valid(AddressSpace *as, hwaddr addr,
55
static void cocoa_refresh(DisplayChangeListener *dcl)
58
- int len, bool is_write)
59
+ int len, bool is_write,
60
+ MemTxAttrs attrs)
61
{
62
FlatView *fv;
63
bool result;
64
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/s390x/diag.c
67
+++ b/target/s390x/diag.c
68
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
69
return;
70
}
71
if (!address_space_access_valid(&address_space_memory, addr,
72
- sizeof(IplParameterBlock), false)) {
73
+ sizeof(IplParameterBlock), false,
74
+ MEMTXATTRS_UNSPECIFIED)) {
75
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
76
return;
77
}
78
@@ -XXX,XX +XXX,XX @@ out:
79
return;
80
}
81
if (!address_space_access_valid(&address_space_memory, addr,
82
- sizeof(IplParameterBlock), true)) {
83
+ sizeof(IplParameterBlock), true,
84
+ MEMTXATTRS_UNSPECIFIED)) {
85
s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
86
return;
87
}
88
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/s390x/excp_helper.c
91
+++ b/target/s390x/excp_helper.c
92
@@ -XXX,XX +XXX,XX @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
93
94
/* check out of RAM access */
95
if (!address_space_access_valid(&address_space_memory, raddr,
96
- TARGET_PAGE_SIZE, rw)) {
97
+ TARGET_PAGE_SIZE, rw,
98
+ MEMTXATTRS_UNSPECIFIED)) {
99
DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
100
(uint64_t)raddr, (uint64_t)ram_size);
101
trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO);
102
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/s390x/mmu_helper.c
105
+++ b/target/s390x/mmu_helper.c
106
@@ -XXX,XX +XXX,XX @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
107
return ret;
108
}
109
if (!address_space_access_valid(&address_space_memory, pages[i],
110
- TARGET_PAGE_SIZE, is_write)) {
111
+ TARGET_PAGE_SIZE, is_write,
112
+ MEMTXATTRS_UNSPECIFIED)) {
113
trigger_access_exception(env, PGM_ADDRESSING, ILEN_AUTO, 0);
114
return -EFAULT;
115
}
116
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/s390x/sigp.c
119
+++ b/target/s390x/sigp.c
120
@@ -XXX,XX +XXX,XX @@ static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
121
cpu_synchronize_state(cs);
122
123
if (!address_space_access_valid(&address_space_memory, addr,
124
- sizeof(struct LowCore), false)) {
125
+ sizeof(struct LowCore), false,
126
+ MEMTXATTRS_UNSPECIFIED)) {
127
set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
128
return;
129
}
130
--
56
--
131
2.17.1
57
2.25.1
132
133
diff view generated by jsdifflib