1 | arm pullreq for rc1. All minor bugfixes, except for the sve-default-vector-length | 1 | A last small test of bug fixes before rc1. |
---|---|---|---|
2 | patches, which are somewhere between a bugfix and a new feature. | ||
3 | 2 | ||
4 | thanks | 3 | thanks |
5 | -- PMM | 4 | -- PMM |
6 | 5 | ||
7 | The following changes since commit c08ccd1b53f488ac86c1f65cf7623dc91acc249a: | 6 | The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637: |
8 | 7 | ||
9 | Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210726' into staging (2021-07-27 08:35:01 +0100) | 8 | Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100) |
10 | 9 | ||
11 | are available in the Git repository at: | 10 | are available in the Git repository at: |
12 | 11 | ||
13 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210727 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717 |
14 | 13 | ||
15 | for you to fetch changes up to e229a179a503f2aee43a76888cf12fbdfe8a3749: | 14 | for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4: |
16 | 15 | ||
17 | hw: aspeed_gpio: Fix memory size (2021-07-27 11:00:00 +0100) | 16 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100) |
18 | 17 | ||
19 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
20 | target-arm queue: | 19 | target-arm queue: |
21 | * hw/arm/smmuv3: Check 31st bit to see if CD is valid | 20 | * hw/arm/sbsa-ref: set 'slots' property of xhci |
22 | * qemu-options.hx: Fix formatting of -machine memory-backend option | 21 | * linux-user: Remove pointless NULL check in clock_adjtime handling |
23 | * hw: aspeed_gpio: Fix memory size | 22 | * ptw: Fix S1_ptw_translate() debug path |
24 | * hw/arm/nseries: Display hexadecimal value with '0x' prefix | 23 | * ptw: Account for FEAT_RME when applying {N}SW, SA bits |
25 | * Add sve-default-vector-length cpu property | 24 | * accel/tcg: Zero-pad PC in TCG CPU exec trace lines |
26 | * docs: Update path that mentions deprecated.rst | 25 | * hw/nvram: Avoid unnecessary Xilinx eFuse backstore write |
27 | * hw/intc/armv7m_nvic: for v8.1M VECTPENDING hides S exceptions from NS | ||
28 | * hw/intc/armv7m_nvic: Correct size of ICSR.VECTPENDING | ||
29 | * hw/intc/armv7m_nvic: ISCR.ISRPENDING is set for non-enabled pending interrupts | ||
30 | * target/arm: Report M-profile alignment faults correctly to the guest | ||
31 | * target/arm: Add missing 'return's after calling v7m_exception_taken() | ||
32 | * target/arm: Enforce that M-profile SP low 2 bits are always zero | ||
33 | 26 | ||
34 | ---------------------------------------------------------------- | 27 | ---------------------------------------------------------------- |
35 | Joe Komlodi (1): | 28 | Peter Maydell (5): |
36 | hw/arm/smmuv3: Check 31st bit to see if CD is valid | 29 | linux-user: Remove pointless NULL check in clock_adjtime handling |
30 | target/arm/ptw.c: Add comments to S1Translate struct fields | ||
31 | target/arm: Fix S1_ptw_translate() debug path | ||
32 | target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits | ||
33 | accel/tcg: Zero-pad PC in TCG CPU exec trace lines | ||
37 | 34 | ||
38 | Joel Stanley (1): | 35 | Tong Ho (1): |
39 | hw: aspeed_gpio: Fix memory size | 36 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write |
40 | 37 | ||
41 | Mao Zhongyi (1): | 38 | Yuquan Wang (1): |
42 | docs: Update path that mentions deprecated.rst | 39 | hw/arm/sbsa-ref: set 'slots' property of xhci |
43 | 40 | ||
44 | Peter Maydell (7): | 41 | accel/tcg/cpu-exec.c | 4 +-- |
45 | qemu-options.hx: Fix formatting of -machine memory-backend option | 42 | accel/tcg/translate-all.c | 2 +- |
46 | target/arm: Enforce that M-profile SP low 2 bits are always zero | 43 | hw/arm/sbsa-ref.c | 1 + |
47 | target/arm: Add missing 'return's after calling v7m_exception_taken() | 44 | hw/nvram/xlnx-efuse.c | 11 ++++-- |
48 | target/arm: Report M-profile alignment faults correctly to the guest | 45 | linux-user/syscall.c | 12 +++---- |
49 | hw/intc/armv7m_nvic: ISCR.ISRPENDING is set for non-enabled pending interrupts | 46 | target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------ |
50 | hw/intc/armv7m_nvic: Correct size of ICSR.VECTPENDING | 47 | 6 files changed, 98 insertions(+), 22 deletions(-) |
51 | hw/intc/armv7m_nvic: for v8.1M VECTPENDING hides S exceptions from NS | ||
52 | |||
53 | Philippe Mathieu-Daudé (1): | ||
54 | hw/arm/nseries: Display hexadecimal value with '0x' prefix | ||
55 | |||
56 | Richard Henderson (3): | ||
57 | target/arm: Correctly bound length in sve_zcr_get_valid_len | ||
58 | target/arm: Export aarch64_sve_zcr_get_valid_len | ||
59 | target/arm: Add sve-default-vector-length cpu property | ||
60 | |||
61 | docs/system/arm/cpu-features.rst | 15 ++++++++++ | ||
62 | configure | 2 +- | ||
63 | hw/arm/smmuv3-internal.h | 2 +- | ||
64 | target/arm/cpu.h | 5 ++++ | ||
65 | target/arm/internals.h | 10 +++++++ | ||
66 | hw/arm/nseries.c | 2 +- | ||
67 | hw/gpio/aspeed_gpio.c | 3 +- | ||
68 | hw/intc/armv7m_nvic.c | 40 +++++++++++++++++++-------- | ||
69 | target/arm/cpu.c | 14 ++++++++-- | ||
70 | target/arm/cpu64.c | 60 ++++++++++++++++++++++++++++++++++++++++ | ||
71 | target/arm/gdbstub.c | 4 +++ | ||
72 | target/arm/helper.c | 8 ++++-- | ||
73 | target/arm/m_helper.c | 24 ++++++++++++---- | ||
74 | target/arm/translate.c | 3 ++ | ||
75 | target/i386/cpu.c | 2 +- | ||
76 | MAINTAINERS | 2 +- | ||
77 | qemu-options.hx | 30 +++++++++++--------- | ||
78 | 17 files changed, 183 insertions(+), 43 deletions(-) | ||
79 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Joe Komlodi <joe.komlodi@xilinx.com> | ||
2 | 1 | ||
3 | The bit to see if a CD is valid is the last bit of the first word of the CD. | ||
4 | |||
5 | Signed-off-by: Joe Komlodi <joe.komlodi@xilinx.com> | ||
6 | Message-id: 1626728232-134665-2-git-send-email-joe.komlodi@xilinx.com | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | --- | ||
10 | hw/arm/smmuv3-internal.h | 2 +- | ||
11 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
12 | |||
13 | diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/hw/arm/smmuv3-internal.h | ||
16 | +++ b/hw/arm/smmuv3-internal.h | ||
17 | @@ -XXX,XX +XXX,XX @@ static inline int pa_range(STE *ste) | ||
18 | |||
19 | /* CD fields */ | ||
20 | |||
21 | -#define CD_VALID(x) extract32((x)->word[0], 30, 1) | ||
22 | +#define CD_VALID(x) extract32((x)->word[0], 31, 1) | ||
23 | #define CD_ASID(x) extract32((x)->word[1], 16, 16) | ||
24 | #define CD_TTB(x, sel) \ | ||
25 | ({ \ | ||
26 | -- | ||
27 | 2.20.1 | ||
28 | |||
29 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The documentation of the -machine memory-backend has some minor | ||
2 | formatting errors: | ||
3 | * Misindentation of the initial line meant that the whole option | ||
4 | section is incorrectly indented in the HTML output compared to | ||
5 | the other -machine options | ||
6 | * The examples weren't indented, which meant that they were formatted | ||
7 | as plain run-on text including outputting the "::" as text. | ||
8 | * The a) b) list has no rst-format markup so it is rendered as | ||
9 | a single run-on paragraph | ||
10 | 1 | ||
11 | Fix the formatting. | ||
12 | |||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
14 | Reviewed-by: Igor Mammedov <imammedo@redhat.com> | ||
15 | Message-id: 20210719105257.3599-1-peter.maydell@linaro.org | ||
16 | --- | ||
17 | qemu-options.hx | 30 +++++++++++++++++------------- | ||
18 | 1 file changed, 17 insertions(+), 13 deletions(-) | ||
19 | |||
20 | diff --git a/qemu-options.hx b/qemu-options.hx | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/qemu-options.hx | ||
23 | +++ b/qemu-options.hx | ||
24 | @@ -XXX,XX +XXX,XX @@ SRST | ||
25 | Enables or disables ACPI Heterogeneous Memory Attribute Table | ||
26 | (HMAT) support. The default is off. | ||
27 | |||
28 | - ``memory-backend='id'`` | ||
29 | + ``memory-backend='id'`` | ||
30 | An alternative to legacy ``-mem-path`` and ``mem-prealloc`` options. | ||
31 | Allows to use a memory backend as main RAM. | ||
32 | |||
33 | For example: | ||
34 | :: | ||
35 | - -object memory-backend-file,id=pc.ram,size=512M,mem-path=/hugetlbfs,prealloc=on,share=on | ||
36 | - -machine memory-backend=pc.ram | ||
37 | - -m 512M | ||
38 | + | ||
39 | + -object memory-backend-file,id=pc.ram,size=512M,mem-path=/hugetlbfs,prealloc=on,share=on | ||
40 | + -machine memory-backend=pc.ram | ||
41 | + -m 512M | ||
42 | |||
43 | Migration compatibility note: | ||
44 | - a) as backend id one shall use value of 'default-ram-id', advertised by | ||
45 | - machine type (available via ``query-machines`` QMP command), if migration | ||
46 | - to/from old QEMU (<5.0) is expected. | ||
47 | - b) for machine types 4.0 and older, user shall | ||
48 | - use ``x-use-canonical-path-for-ramblock-id=off`` backend option | ||
49 | - if migration to/from old QEMU (<5.0) is expected. | ||
50 | + | ||
51 | + * as backend id one shall use value of 'default-ram-id', advertised by | ||
52 | + machine type (available via ``query-machines`` QMP command), if migration | ||
53 | + to/from old QEMU (<5.0) is expected. | ||
54 | + * for machine types 4.0 and older, user shall | ||
55 | + use ``x-use-canonical-path-for-ramblock-id=off`` backend option | ||
56 | + if migration to/from old QEMU (<5.0) is expected. | ||
57 | + | ||
58 | For example: | ||
59 | :: | ||
60 | - -object memory-backend-ram,id=pc.ram,size=512M,x-use-canonical-path-for-ramblock-id=off | ||
61 | - -machine memory-backend=pc.ram | ||
62 | - -m 512M | ||
63 | + | ||
64 | + -object memory-backend-ram,id=pc.ram,size=512M,x-use-canonical-path-for-ramblock-id=off | ||
65 | + -machine memory-backend=pc.ram | ||
66 | + -m 512M | ||
67 | ERST | ||
68 | |||
69 | HXCOMM Deprecated by -machine | ||
70 | -- | ||
71 | 2.20.1 | ||
72 | |||
73 | diff view generated by jsdifflib |
1 | From: Joel Stanley <joel@jms.id.au> | 1 | From: Yuquan Wang <wangyuquan1236@phytium.com.cn> |
---|---|---|---|
2 | 2 | ||
3 | The macro used to calculate the maximum memory size of the MMIO region | 3 | This extends the slots of xhci to 64, since the default xhci_sysbus |
4 | had a mistake, causing all GPIO models to create a mapping of 0x9D8. | 4 | just supports one slot. |
5 | The intent was to have it be 0x9D8 - 0x800. | ||
6 | 5 | ||
7 | This extra size doesn't matter on ast2400 and ast2500, which have a 4KB | 6 | Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn> |
8 | region set aside for the GPIO controller. | 7 | Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn> |
9 | 8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | |
10 | On the ast2600 the 3.3V and 1.8V GPIO controllers are 2KB apart, so the | 9 | Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> |
11 | regions would overlap. Worse was the 1.8V controller would map over the | 10 | Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> |
12 | top of the following peripheral, which happens to be the RTC. | 11 | Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn |
13 | |||
14 | The mmio region used by each device is a maximum of 2KB, so avoid the | ||
15 | calculations and hard code this as the maximum. | ||
16 | |||
17 | Fixes: 36d737ee82b2 ("hw/gpio: Add in AST2600 specific implementation") | ||
18 | Signed-off-by: Joel Stanley <joel@jms.id.au> | ||
19 | Reviewed-by: Rashmica Gupta <rashmica.g@gmail.com> | ||
20 | Reviewed-by: Cédric Le Goater <clg@kaod.org> | ||
21 | Message-id: 20210713065854.134634-2-joel@jms.id.au | ||
22 | [PMM: fix autocorrect error in commit message] | ||
23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
24 | --- | 13 | --- |
25 | hw/gpio/aspeed_gpio.c | 3 +-- | 14 | hw/arm/sbsa-ref.c | 1 + |
26 | 1 file changed, 1 insertion(+), 2 deletions(-) | 15 | 1 file changed, 1 insertion(+) |
27 | 16 | ||
28 | diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c | 17 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
29 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/hw/gpio/aspeed_gpio.c | 19 | --- a/hw/arm/sbsa-ref.c |
31 | +++ b/hw/gpio/aspeed_gpio.c | 20 | +++ b/hw/arm/sbsa-ref.c |
32 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms) |
33 | #define GPIO_1_8V_MEM_SIZE 0x9D8 | 22 | hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base; |
34 | #define GPIO_1_8V_REG_ARRAY_SIZE ((GPIO_1_8V_MEM_SIZE - \ | 23 | int irq = sbsa_ref_irqmap[SBSA_XHCI]; |
35 | GPIO_1_8V_REG_OFFSET) >> 2) | 24 | DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS); |
36 | -#define GPIO_MAX_MEM_SIZE MAX(GPIO_3_6V_MEM_SIZE, GPIO_1_8V_MEM_SIZE) | 25 | + qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS); |
37 | 26 | ||
38 | static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio) | 27 | sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); |
39 | { | 28 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); |
40 | @@ -XXX,XX +XXX,XX @@ static void aspeed_gpio_realize(DeviceState *dev, Error **errp) | ||
41 | } | ||
42 | |||
43 | memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s, | ||
44 | - TYPE_ASPEED_GPIO, GPIO_MAX_MEM_SIZE); | ||
45 | + TYPE_ASPEED_GPIO, 0x800); | ||
46 | |||
47 | sysbus_init_mmio(sbd, &s->iomem); | ||
48 | } | ||
49 | -- | 29 | -- |
50 | 2.20.1 | 30 | 2.34.1 |
51 | |||
52 | diff view generated by jsdifflib |
1 | In Arm v8.1M the VECTPENDING field in the ICSR has new behaviour: if | 1 | In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to |
---|---|---|---|
2 | the register is accessed NonSecure and the highest priority pending | 2 | the address of the local variable htx. This means it can never be |
3 | enabled exception (that would be returned in the VECTPENDING field) | 3 | NULL, but later in the code we check it for NULL anyway. Coverity |
4 | targets Secure, then the VECTPENDING field must read 1 rather than | 4 | complains about this (CID 1507683) because the NULL check comes after |
5 | the exception number of the pending exception. Implement this. | 5 | a call to clock_adjtime() that assumes it is non-NULL. |
6 | |||
7 | Since phtx is always &htx, and is used only in three places, it's not | ||
8 | really necessary. Remove it, bringing the code structure in to line | ||
9 | with that for TARGET_NR_clock_adjtime64, which already uses a simple | ||
10 | '&htx' when it wants a pointer to 'htx'. | ||
6 | 11 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 14 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20210723162146.5167-7-peter.maydell@linaro.org | 15 | Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org |
10 | --- | 16 | --- |
11 | hw/intc/armv7m_nvic.c | 31 ++++++++++++++++++++++++------- | 17 | linux-user/syscall.c | 12 +++++------- |
12 | 1 file changed, 24 insertions(+), 7 deletions(-) | 18 | 1 file changed, 5 insertions(+), 7 deletions(-) |
13 | 19 | ||
14 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 20 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
15 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/intc/armv7m_nvic.c | 22 | --- a/linux-user/syscall.c |
17 | +++ b/hw/intc/armv7m_nvic.c | 23 | +++ b/linux-user/syscall.c |
18 | @@ -XXX,XX +XXX,XX @@ void armv7m_nvic_acknowledge_irq(void *opaque) | 24 | @@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, |
19 | nvic_irq_update(s); | 25 | #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) |
20 | } | 26 | case TARGET_NR_clock_adjtime: |
21 | 27 | { | |
22 | +static bool vectpending_targets_secure(NVICState *s) | 28 | - struct timex htx, *phtx = &htx; |
23 | +{ | 29 | + struct timex htx; |
24 | + /* Return true if s->vectpending targets Secure state */ | 30 | |
25 | + if (s->vectpending_is_s_banked) { | 31 | - if (target_to_host_timex(phtx, arg2) != 0) { |
26 | + return true; | 32 | + if (target_to_host_timex(&htx, arg2) != 0) { |
27 | + } | 33 | return -TARGET_EFAULT; |
28 | + return !exc_is_banked(s->vectpending) && | 34 | } |
29 | + exc_targets_secure(s, s->vectpending); | 35 | - ret = get_errno(clock_adjtime(arg1, phtx)); |
30 | +} | 36 | - if (!is_error(ret) && phtx) { |
31 | + | 37 | - if (host_to_target_timex(arg2, phtx) != 0) { |
32 | void armv7m_nvic_get_pending_irq_info(void *opaque, | 38 | - return -TARGET_EFAULT; |
33 | int *pirq, bool *ptargets_secure) | 39 | - } |
34 | { | 40 | + ret = get_errno(clock_adjtime(arg1, &htx)); |
35 | @@ -XXX,XX +XXX,XX @@ void armv7m_nvic_get_pending_irq_info(void *opaque, | 41 | + if (!is_error(ret) && host_to_target_timex(arg2, &htx)) { |
36 | 42 | + return -TARGET_EFAULT; | |
37 | assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq); | 43 | } |
38 | 44 | } | |
39 | - if (s->vectpending_is_s_banked) { | 45 | return ret; |
40 | - targets_secure = true; | ||
41 | - } else { | ||
42 | - targets_secure = !exc_is_banked(pending) && | ||
43 | - exc_targets_secure(s, pending); | ||
44 | - } | ||
45 | + targets_secure = vectpending_targets_secure(s); | ||
46 | |||
47 | trace_nvic_get_pending_irq_info(pending, targets_secure); | ||
48 | |||
49 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | ||
50 | /* VECTACTIVE */ | ||
51 | val = cpu->env.v7m.exception; | ||
52 | /* VECTPENDING */ | ||
53 | - val |= (s->vectpending & 0x1ff) << 12; | ||
54 | + if (s->vectpending) { | ||
55 | + /* | ||
56 | + * From v8.1M VECTPENDING must read as 1 if accessed as | ||
57 | + * NonSecure and the highest priority pending and enabled | ||
58 | + * exception targets Secure. | ||
59 | + */ | ||
60 | + int vp = s->vectpending; | ||
61 | + if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && | ||
62 | + vectpending_targets_secure(s)) { | ||
63 | + vp = 1; | ||
64 | + } | ||
65 | + val |= (vp & 0x1ff) << 12; | ||
66 | + } | ||
67 | /* ISRPENDING - set if any external IRQ is pending */ | ||
68 | if (nvic_isrpending(s)) { | ||
69 | val |= (1 << 22); | ||
70 | -- | 46 | -- |
71 | 2.20.1 | 47 | 2.34.1 |
72 | 48 | ||
73 | 49 | diff view generated by jsdifflib |
1 | In do_v7m_exception_exit(), we perform various checks as part of | 1 | Add comments to the in_* fields in the S1Translate struct |
---|---|---|---|
2 | performing the exception return. If one of these checks fails, the | 2 | that explain what they're doing. |
3 | architecture requires that we take an appropriate exception on the | ||
4 | existing stackframe. We implement this by calling | ||
5 | v7m_exception_taken() to set up to take the new exception, and then | ||
6 | immediately returning from do_v7m_exception_exit() without proceeding | ||
7 | any further with the unstack-and-exception-return process. | ||
8 | |||
9 | In a couple of checks that are new in v8.1M, we forgot the "return" | ||
10 | statement, with the effect that if bad code in the guest tripped over | ||
11 | these checks we would set up to take a UsageFault exception but then | ||
12 | blunder on trying to also unstack and return from the original | ||
13 | exception, with the probable result that the guest would crash. | ||
14 | |||
15 | Add the missing return statements. | ||
16 | 3 | ||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
19 | Message-id: 20210723162146.5167-3-peter.maydell@linaro.org | 6 | Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org |
20 | --- | 7 | --- |
21 | target/arm/m_helper.c | 2 ++ | 8 | target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++ |
22 | 1 file changed, 2 insertions(+) | 9 | 1 file changed, 40 insertions(+) |
23 | 10 | ||
24 | diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c | 11 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
25 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/target/arm/m_helper.c | 13 | --- a/target/arm/ptw.c |
27 | +++ b/target/arm/m_helper.c | 14 | +++ b/target/arm/ptw.c |
28 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | 15 | @@ -XXX,XX +XXX,XX @@ |
29 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | 16 | #endif |
30 | "stackframe: NSACR prevents clearing FPU registers\n"); | 17 | |
31 | v7m_exception_taken(cpu, excret, true, false); | 18 | typedef struct S1Translate { |
32 | + return; | 19 | + /* |
33 | } else if (!cpacr_pass) { | 20 | + * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. |
34 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | 21 | + * Together with in_space, specifies the architectural translation regime. |
35 | exc_secure); | 22 | + */ |
36 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | 23 | ARMMMUIdx in_mmu_idx; |
37 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | 24 | + /* |
38 | "stackframe: CPACR prevents clearing FPU registers\n"); | 25 | + * in_ptw_idx: specifies which mmuidx to use for the actual |
39 | v7m_exception_taken(cpu, excret, true, false); | 26 | + * page table descriptor load operations. This will be one of the |
40 | + return; | 27 | + * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. |
41 | } | 28 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, |
42 | } | 29 | + * this field is updated accordingly. |
43 | /* Clear s0..s15, FPSCR and VPR */ | 30 | + */ |
31 | ARMMMUIdx in_ptw_idx; | ||
32 | + /* | ||
33 | + * in_space: the security space for this walk. This plus | ||
34 | + * the in_mmu_idx specify the architectural translation regime. | ||
35 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
36 | + * this field is updated accordingly. | ||
37 | + * | ||
38 | + * Note that the security space for the in_ptw_idx may be different | ||
39 | + * from that for the in_mmu_idx. We do not need to explicitly track | ||
40 | + * the in_ptw_idx security space because: | ||
41 | + * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx | ||
42 | + * itself specifies the security space | ||
43 | + * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security | ||
44 | + * space used for ptw reads is the same as that of the security | ||
45 | + * space of the stage 1 translation for all cases except where | ||
46 | + * stage 1 is Secure; in that case the only possibilities for | ||
47 | + * the ptw read are Secure and NonSecure, and the in_ptw_idx | ||
48 | + * value being Stage2 vs Stage2_S distinguishes those. | ||
49 | + */ | ||
50 | ARMSecuritySpace in_space; | ||
51 | + /* | ||
52 | + * in_secure: whether the translation regime is a Secure one. | ||
53 | + * This is always equal to arm_space_is_secure(in_space). | ||
54 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
55 | + * this field is updated accordingly. | ||
56 | + */ | ||
57 | bool in_secure; | ||
58 | + /* | ||
59 | + * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug | ||
60 | + * accesses will not update the guest page table access flags | ||
61 | + * and will not change the state of the softmmu TLBs. | ||
62 | + */ | ||
63 | bool in_debug; | ||
64 | /* | ||
65 | * If this is stage 2 of a stage 1+2 page table walk, then this must | ||
44 | -- | 66 | -- |
45 | 2.20.1 | 67 | 2.34.1 |
46 | |||
47 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate() |
---|---|---|---|
2 | so that the debug-access "call get_phys_addr_*" codepath is used both | ||
3 | when S1 is doing ptw reads from stage 2 and when it is doing ptw | ||
4 | reads from physical memory. However, we didn't update the | ||
5 | calculation of s2ptw->in_space and s2ptw->in_secure to account for | ||
6 | the "ptw reads from physical memory" case. This meant that debug | ||
7 | accesses when in Secure state broke. | ||
2 | 8 | ||
3 | Mirror the behavour of /proc/sys/abi/sve_default_vector_length | 9 | Create a new function S2_security_space() which returns the |
4 | under the real linux kernel. We have no way of passing along | 10 | correct security space to use for the ptw load, and use it to |
5 | a real default across exec like the kernel can, but this is a | 11 | determine the correct .in_secure and .in_space fields for the |
6 | decent way of adjusting the startup vector length of a process. | 12 | stage 2 lookup for the ptw load. |
7 | 13 | ||
8 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/482 | 14 | Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org> |
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 16 | Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org> |
11 | Message-id: 20210723203344.968563-4-richard.henderson@linaro.org | 17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
12 | [PMM: tweaked docs formatting, document -1 special-case, | 18 | Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org |
13 | added fixup patch from RTH mentioning QEMU's maximum veclen.] | 19 | Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate") |
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 20 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | --- | 21 | --- |
16 | docs/system/arm/cpu-features.rst | 15 ++++++++ | 22 | target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++----- |
17 | target/arm/cpu.h | 5 +++ | 23 | 1 file changed, 32 insertions(+), 5 deletions(-) |
18 | target/arm/cpu.c | 14 ++++++-- | ||
19 | target/arm/cpu64.c | 60 ++++++++++++++++++++++++++++++++ | ||
20 | 4 files changed, 92 insertions(+), 2 deletions(-) | ||
21 | 24 | ||
22 | diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst | 25 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
23 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/docs/system/arm/cpu-features.rst | 27 | --- a/target/arm/ptw.c |
25 | +++ b/docs/system/arm/cpu-features.rst | 28 | +++ b/target/arm/ptw.c |
26 | @@ -XXX,XX +XXX,XX @@ verbose command lines. However, the recommended way to select vector | 29 | @@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) |
27 | lengths is to explicitly enable each desired length. Therefore only | 30 | } |
28 | example's (1), (4), and (6) exhibit recommended uses of the properties. | 31 | } |
29 | 32 | ||
30 | +SVE User-mode Default Vector Length Property | 33 | +static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, |
31 | +-------------------------------------------- | 34 | + ARMMMUIdx s2_mmu_idx) |
32 | + | 35 | +{ |
33 | +For qemu-aarch64, the cpu property ``sve-default-vector-length=N`` is | ||
34 | +defined to mirror the Linux kernel parameter file | ||
35 | +``/proc/sys/abi/sve_default_vector_length``. The default length, ``N``, | ||
36 | +is in units of bytes and must be between 16 and 8192. | ||
37 | +If not specified, the default vector length is 64. | ||
38 | + | ||
39 | +If the default length is larger than the maximum vector length enabled, | ||
40 | +the actual vector length will be reduced. Note that the maximum vector | ||
41 | +length supported by QEMU is 256. | ||
42 | + | ||
43 | +If this property is set to ``-1`` then the default vector length | ||
44 | +is set to the maximum possible length. | ||
45 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/target/arm/cpu.h | ||
48 | +++ b/target/arm/cpu.h | ||
49 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | ||
50 | /* Used to set the maximum vector length the cpu will support. */ | ||
51 | uint32_t sve_max_vq; | ||
52 | |||
53 | +#ifdef CONFIG_USER_ONLY | ||
54 | + /* Used to set the default vector length at process start. */ | ||
55 | + uint32_t sve_default_vq; | ||
56 | +#endif | ||
57 | + | ||
58 | /* | ||
59 | * In sve_vq_map each set bit is a supported vector length of | ||
60 | * (bit-number + 1) * 16 bytes, i.e. each bit number + 1 is the vector | ||
61 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/target/arm/cpu.c | ||
64 | +++ b/target/arm/cpu.c | ||
65 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) | ||
66 | env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3); | ||
67 | /* with reasonable vector length */ | ||
68 | if (cpu_isar_feature(aa64_sve, cpu)) { | ||
69 | - env->vfp.zcr_el[1] = MIN(cpu->sve_max_vq - 1, 3); | ||
70 | + env->vfp.zcr_el[1] = | ||
71 | + aarch64_sve_zcr_get_valid_len(cpu, cpu->sve_default_vq - 1); | ||
72 | } | ||
73 | /* | ||
74 | * Enable TBI0 but not TBI1. | ||
75 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj) | ||
76 | QLIST_INIT(&cpu->pre_el_change_hooks); | ||
77 | QLIST_INIT(&cpu->el_change_hooks); | ||
78 | |||
79 | -#ifndef CONFIG_USER_ONLY | ||
80 | +#ifdef CONFIG_USER_ONLY | ||
81 | +# ifdef TARGET_AARCH64 | ||
82 | + /* | 36 | + /* |
83 | + * The linux kernel defaults to 512-bit vectors, when sve is supported. | 37 | + * Return the security space to use for stage 2 when doing |
84 | + * See documentation for /proc/sys/abi/sve_default_vector_length, and | 38 | + * the S1 page table descriptor load. |
85 | + * our corresponding sve-default-vector-length cpu property. | ||
86 | + */ | 39 | + */ |
87 | + cpu->sve_default_vq = 4; | 40 | + if (regime_is_stage2(s2_mmu_idx)) { |
88 | +# endif | 41 | + /* |
89 | +#else | 42 | + * The security space for ptw reads is almost always the same |
90 | /* Our inbound IRQ and FIQ lines */ | 43 | + * as that of the security space of the stage 1 translation. |
91 | if (kvm_enabled()) { | 44 | + * The only exception is when stage 1 is Secure; in that case |
92 | /* VIRQ and VFIQ are unused with KVM but we add them to maintain | 45 | + * the ptw read might be to the Secure or the NonSecure space |
93 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c | 46 | + * (but never Realm or Root), and the s2_mmu_idx tells us which. |
94 | index XXXXXXX..XXXXXXX 100644 | 47 | + * Root translations are always single-stage. |
95 | --- a/target/arm/cpu64.c | 48 | + */ |
96 | +++ b/target/arm/cpu64.c | 49 | + if (s1_space == ARMSS_Secure) { |
97 | @@ -XXX,XX +XXX,XX @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp) | 50 | + return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); |
98 | cpu->isar.id_aa64pfr0 = t; | 51 | + } else { |
99 | } | 52 | + assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); |
100 | 53 | + assert(s1_space != ARMSS_Root); | |
101 | +#ifdef CONFIG_USER_ONLY | 54 | + return s1_space; |
102 | +/* Mirror linux /proc/sys/abi/sve_default_vector_length. */ | 55 | + } |
103 | +static void cpu_arm_set_sve_default_vec_len(Object *obj, Visitor *v, | 56 | + } else { |
104 | + const char *name, void *opaque, | 57 | + /* ptw loads are from phys: the mmu idx itself says which space */ |
105 | + Error **errp) | 58 | + return arm_phys_to_space(s2_mmu_idx); |
106 | +{ | ||
107 | + ARMCPU *cpu = ARM_CPU(obj); | ||
108 | + int32_t default_len, default_vq, remainder; | ||
109 | + | ||
110 | + if (!visit_type_int32(v, name, &default_len, errp)) { | ||
111 | + return; | ||
112 | + } | 59 | + } |
113 | + | ||
114 | + /* Undocumented, but the kernel allows -1 to indicate "maximum". */ | ||
115 | + if (default_len == -1) { | ||
116 | + cpu->sve_default_vq = ARM_MAX_VQ; | ||
117 | + return; | ||
118 | + } | ||
119 | + | ||
120 | + default_vq = default_len / 16; | ||
121 | + remainder = default_len % 16; | ||
122 | + | ||
123 | + /* | ||
124 | + * Note that the 512 max comes from include/uapi/asm/sve_context.h | ||
125 | + * and is the maximum architectural width of ZCR_ELx.LEN. | ||
126 | + */ | ||
127 | + if (remainder || default_vq < 1 || default_vq > 512) { | ||
128 | + error_setg(errp, "cannot set sve-default-vector-length"); | ||
129 | + if (remainder) { | ||
130 | + error_append_hint(errp, "Vector length not a multiple of 16\n"); | ||
131 | + } else if (default_vq < 1) { | ||
132 | + error_append_hint(errp, "Vector length smaller than 16\n"); | ||
133 | + } else { | ||
134 | + error_append_hint(errp, "Vector length larger than %d\n", | ||
135 | + 512 * 16); | ||
136 | + } | ||
137 | + return; | ||
138 | + } | ||
139 | + | ||
140 | + cpu->sve_default_vq = default_vq; | ||
141 | +} | 60 | +} |
142 | + | 61 | + |
143 | +static void cpu_arm_get_sve_default_vec_len(Object *obj, Visitor *v, | 62 | /* Translate a S1 pagetable walk through S2 if needed. */ |
144 | + const char *name, void *opaque, | 63 | static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, |
145 | + Error **errp) | 64 | hwaddr addr, ARMMMUFaultInfo *fi) |
146 | +{ | ||
147 | + ARMCPU *cpu = ARM_CPU(obj); | ||
148 | + int32_t value = cpu->sve_default_vq * 16; | ||
149 | + | ||
150 | + visit_type_int32(v, name, &value, errp); | ||
151 | +} | ||
152 | +#endif | ||
153 | + | ||
154 | void aarch64_add_sve_properties(Object *obj) | ||
155 | { | 65 | { |
156 | uint32_t vq; | 66 | - ARMSecuritySpace space = ptw->in_space; |
157 | @@ -XXX,XX +XXX,XX @@ void aarch64_add_sve_properties(Object *obj) | 67 | bool is_secure = ptw->in_secure; |
158 | object_property_add(obj, name, "bool", cpu_arm_get_sve_vq, | 68 | ARMMMUIdx mmu_idx = ptw->in_mmu_idx; |
159 | cpu_arm_set_sve_vq, NULL, NULL); | 69 | ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; |
160 | } | 70 | @@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, |
161 | + | 71 | * From gdbstub, do not use softmmu so that we don't modify the |
162 | +#ifdef CONFIG_USER_ONLY | 72 | * state of the cpu at all, including softmmu tlb contents. |
163 | + /* Mirror linux /proc/sys/abi/sve_default_vector_length. */ | 73 | */ |
164 | + object_property_add(obj, "sve-default-vector-length", "int32", | 74 | + ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); |
165 | + cpu_arm_get_sve_default_vec_len, | 75 | S1Translate s2ptw = { |
166 | + cpu_arm_set_sve_default_vec_len, NULL, NULL); | 76 | .in_mmu_idx = s2_mmu_idx, |
167 | +#endif | 77 | .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), |
168 | } | 78 | - .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S, |
169 | 79 | - .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure | |
170 | void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) | 80 | - : space == ARMSS_Realm ? ARMSS_Realm |
81 | - : ARMSS_NonSecure), | ||
82 | + .in_secure = arm_space_is_secure(s2_space), | ||
83 | + .in_space = s2_space, | ||
84 | .in_debug = true, | ||
85 | }; | ||
86 | GetPhysAddrResult s2 = { }; | ||
171 | -- | 87 | -- |
172 | 2.20.1 | 88 | 2.34.1 |
173 | |||
174 | diff view generated by jsdifflib |
1 | For M-profile, unlike A-profile, the low 2 bits of SP are defined to be | 1 | In get_phys_addr_twostage() the code that applies the effects of |
---|---|---|---|
2 | RES0H, which is to say that they must be hardwired to zero so that | 2 | VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure. |
3 | guest attempts to write non-zero values to them are ignored. | 3 | Now we also have f.attrs.space for FEAT_RME, we need to keep the two |
4 | in sync. | ||
4 | 5 | ||
5 | Implement this behaviour by masking out the low bits: | 6 | These bits only have an effect for Secure space translations, not |
6 | * for writes to r13 by the gdbstub | 7 | for Root, so use the input in_space field to determine whether to |
7 | * for writes to any of the various flavours of SP via MSR | 8 | apply them rather than the input is_secure. This doesn't actually |
8 | * for writes to r13 via store_reg() in generated code | 9 | make a difference because Root translations are never two-stage, |
9 | 10 | but it's a little clearer. | |
10 | Note that all the direct uses of cpu_R[] in translate.c are in places | ||
11 | where the register is definitely not r13 (usually because that has | ||
12 | been checked for as an UNDEFINED or UNPREDICTABLE case and handled as | ||
13 | UNDEF). | ||
14 | |||
15 | All the other writes to regs[13] in C code are either: | ||
16 | * A-profile only code | ||
17 | * writes of values we can guarantee to be aligned, such as | ||
18 | - writes of previous-SP-value plus or minus a 4-aligned constant | ||
19 | - writes of the value in an SP limit register (which we already | ||
20 | enforce to be aligned) | ||
21 | 11 | ||
22 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
23 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
24 | Message-id: 20210723162146.5167-2-peter.maydell@linaro.org | 14 | Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org |
25 | --- | 15 | --- |
26 | target/arm/gdbstub.c | 4 ++++ | 16 | target/arm/ptw.c | 13 ++++++++----- |
27 | target/arm/m_helper.c | 14 ++++++++------ | 17 | 1 file changed, 8 insertions(+), 5 deletions(-) |
28 | target/arm/translate.c | 3 +++ | ||
29 | 3 files changed, 15 insertions(+), 6 deletions(-) | ||
30 | 18 | ||
31 | diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c | 19 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
32 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
33 | --- a/target/arm/gdbstub.c | 21 | --- a/target/arm/ptw.c |
34 | +++ b/target/arm/gdbstub.c | 22 | +++ b/target/arm/ptw.c |
35 | @@ -XXX,XX +XXX,XX @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) | 23 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
36 | 24 | hwaddr ipa; | |
37 | if (n < 16) { | 25 | int s1_prot, s1_lgpgsz; |
38 | /* Core integer register. */ | 26 | bool is_secure = ptw->in_secure; |
39 | + if (n == 13 && arm_feature(env, ARM_FEATURE_M)) { | 27 | + ARMSecuritySpace in_space = ptw->in_space; |
40 | + /* M profile SP low bits are always 0 */ | 28 | bool ret, ipa_secure; |
41 | + tmp &= ~3; | 29 | ARMCacheAttrs cacheattrs1; |
42 | + } | 30 | ARMSecuritySpace ipa_space; |
43 | env->regs[n] = tmp; | 31 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
44 | return 4; | 32 | * Check if IPA translates to secure or non-secure PA space. |
45 | } | 33 | * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. |
46 | diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c | 34 | */ |
47 | index XXXXXXX..XXXXXXX 100644 | 35 | - result->f.attrs.secure = |
48 | --- a/target/arm/m_helper.c | 36 | - (is_secure |
49 | +++ b/target/arm/m_helper.c | 37 | - && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) |
50 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) | 38 | - && (ipa_secure |
51 | if (!env->v7m.secure) { | 39 | - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); |
52 | return; | 40 | + if (in_space == ARMSS_Secure) { |
53 | } | 41 | + result->f.attrs.secure = |
54 | - env->v7m.other_ss_msp = val; | 42 | + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) |
55 | + env->v7m.other_ss_msp = val & ~3; | 43 | + && (ipa_secure |
56 | return; | 44 | + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); |
57 | case 0x89: /* PSP_NS */ | 45 | + result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); |
58 | if (!env->v7m.secure) { | 46 | + } |
59 | return; | 47 | |
60 | } | 48 | return false; |
61 | - env->v7m.other_ss_psp = val; | 49 | } |
62 | + env->v7m.other_ss_psp = val & ~3; | ||
63 | return; | ||
64 | case 0x8a: /* MSPLIM_NS */ | ||
65 | if (!env->v7m.secure) { | ||
66 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) | ||
67 | |||
68 | limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; | ||
69 | |||
70 | + val &= ~0x3; | ||
71 | + | ||
72 | if (val < limit) { | ||
73 | raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC()); | ||
74 | } | ||
75 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) | ||
76 | break; | ||
77 | case 8: /* MSP */ | ||
78 | if (v7m_using_psp(env)) { | ||
79 | - env->v7m.other_sp = val; | ||
80 | + env->v7m.other_sp = val & ~3; | ||
81 | } else { | ||
82 | - env->regs[13] = val; | ||
83 | + env->regs[13] = val & ~3; | ||
84 | } | ||
85 | break; | ||
86 | case 9: /* PSP */ | ||
87 | if (v7m_using_psp(env)) { | ||
88 | - env->regs[13] = val; | ||
89 | + env->regs[13] = val & ~3; | ||
90 | } else { | ||
91 | - env->v7m.other_sp = val; | ||
92 | + env->v7m.other_sp = val & ~3; | ||
93 | } | ||
94 | break; | ||
95 | case 10: /* MSPLIM */ | ||
96 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
97 | index XXXXXXX..XXXXXXX 100644 | ||
98 | --- a/target/arm/translate.c | ||
99 | +++ b/target/arm/translate.c | ||
100 | @@ -XXX,XX +XXX,XX @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var) | ||
101 | */ | ||
102 | tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3); | ||
103 | s->base.is_jmp = DISAS_JUMP; | ||
104 | + } else if (reg == 13 && arm_dc_feature(s, ARM_FEATURE_M)) { | ||
105 | + /* For M-profile SP bits [1:0] are always zero */ | ||
106 | + tcg_gen_andi_i32(var, var, ~3); | ||
107 | } | ||
108 | tcg_gen_mov_i32(cpu_R[reg], var); | ||
109 | tcg_temp_free_i32(var); | ||
110 | -- | 50 | -- |
111 | 2.20.1 | 51 | 2.34.1 |
112 | |||
113 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | For M-profile, we weren't reporting alignment faults triggered by the | ||
2 | generic TCG code correctly to the guest. These get passed into | ||
3 | arm_v7m_cpu_do_interrupt() as an EXCP_DATA_ABORT with an A-profile | ||
4 | style exception.fsr value of 1. We didn't check for this, and so | ||
5 | they fell through into the default of "assume this is an MPU fault" | ||
6 | and were reported to the guest as a data access violation MPU fault. | ||
7 | 1 | ||
8 | Report these alignment faults as UsageFaults which set the UNALIGNED | ||
9 | bit in the UFSR. | ||
10 | |||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: 20210723162146.5167-4-peter.maydell@linaro.org | ||
14 | --- | ||
15 | target/arm/m_helper.c | 8 ++++++++ | ||
16 | 1 file changed, 8 insertions(+) | ||
17 | |||
18 | diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/target/arm/m_helper.c | ||
21 | +++ b/target/arm/m_helper.c | ||
22 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
23 | env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK; | ||
24 | break; | ||
25 | case EXCP_UNALIGNED: | ||
26 | + /* Unaligned faults reported by M-profile aware code */ | ||
27 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); | ||
28 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK; | ||
29 | break; | ||
30 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
31 | } | ||
32 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); | ||
33 | break; | ||
34 | + case 0x1: /* Alignment fault reported by generic code */ | ||
35 | + qemu_log_mask(CPU_LOG_INT, | ||
36 | + "...really UsageFault with UFSR.UNALIGNED\n"); | ||
37 | + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK; | ||
38 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | ||
39 | + env->v7m.secure); | ||
40 | + break; | ||
41 | default: | ||
42 | /* | ||
43 | * All other FSR values are either MPU faults or "can't happen | ||
44 | -- | ||
45 | 2.20.1 | ||
46 | |||
47 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The ISCR.ISRPENDING bit is set when an external interrupt is pending. | ||
2 | This is true whether that external interrupt is enabled or not. | ||
3 | This means that we can't use 's->vectpending == 0' as a shortcut to | ||
4 | "ISRPENDING is zero", because s->vectpending indicates only the | ||
5 | highest priority pending enabled interrupt. | ||
6 | 1 | ||
7 | Remove the incorrect optimization so that if there is no pending | ||
8 | enabled interrupt we fall through to scanning through the whole | ||
9 | interrupt array. | ||
10 | |||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: 20210723162146.5167-5-peter.maydell@linaro.org | ||
14 | --- | ||
15 | hw/intc/armv7m_nvic.c | 9 ++++----- | ||
16 | 1 file changed, 4 insertions(+), 5 deletions(-) | ||
17 | |||
18 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/hw/intc/armv7m_nvic.c | ||
21 | +++ b/hw/intc/armv7m_nvic.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static bool nvic_isrpending(NVICState *s) | ||
23 | { | ||
24 | int irq; | ||
25 | |||
26 | - /* We can shortcut if the highest priority pending interrupt | ||
27 | - * happens to be external or if there is nothing pending. | ||
28 | + /* | ||
29 | + * We can shortcut if the highest priority pending interrupt | ||
30 | + * happens to be external; if not we need to check the whole | ||
31 | + * vectors[] array. | ||
32 | */ | ||
33 | if (s->vectpending > NVIC_FIRST_IRQ) { | ||
34 | return true; | ||
35 | } | ||
36 | - if (s->vectpending == 0) { | ||
37 | - return false; | ||
38 | - } | ||
39 | |||
40 | for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) { | ||
41 | if (s->vectors[irq].pending) { | ||
42 | -- | ||
43 | 2.20.1 | ||
44 | |||
45 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | The VECTPENDING field in the ICSR is 9 bits wide, in bits [20:12] of | ||
2 | the register. We were incorrectly masking it to 8 bits, so it would | ||
3 | report the wrong value if the pending exception was greater than 256. | ||
4 | Fix the bug. | ||
5 | 1 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 20210723162146.5167-6-peter.maydell@linaro.org | ||
9 | --- | ||
10 | hw/intc/armv7m_nvic.c | 2 +- | ||
11 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
12 | |||
13 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/hw/intc/armv7m_nvic.c | ||
16 | +++ b/hw/intc/armv7m_nvic.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | ||
18 | /* VECTACTIVE */ | ||
19 | val = cpu->env.v7m.exception; | ||
20 | /* VECTPENDING */ | ||
21 | - val |= (s->vectpending & 0xff) << 12; | ||
22 | + val |= (s->vectpending & 0x1ff) << 12; | ||
23 | /* ISRPENDING - set if any external IRQ is pending */ | ||
24 | if (nvic_isrpending(s)) { | ||
25 | val |= (1 << 22); | ||
26 | -- | ||
27 | 2.20.1 | ||
28 | |||
29 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Mao Zhongyi <maozhongyi@cmss.chinamobile.com> | ||
2 | 1 | ||
3 | Missed in commit f3478392 "docs: Move deprecation, build | ||
4 | and license info out of system/" | ||
5 | |||
6 | Signed-off-by: Mao Zhongyi <maozhongyi@cmss.chinamobile.com> | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Message-id: 20210723065828.1336760-1-maozhongyi@cmss.chinamobile.com | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | --- | ||
11 | configure | 2 +- | ||
12 | target/i386/cpu.c | 2 +- | ||
13 | MAINTAINERS | 2 +- | ||
14 | 3 files changed, 3 insertions(+), 3 deletions(-) | ||
15 | |||
16 | diff --git a/configure b/configure | ||
17 | index XXXXXXX..XXXXXXX 100755 | ||
18 | --- a/configure | ||
19 | +++ b/configure | ||
20 | @@ -XXX,XX +XXX,XX @@ fi | ||
21 | |||
22 | if test -n "${deprecated_features}"; then | ||
23 | echo "Warning, deprecated features enabled." | ||
24 | - echo "Please see docs/system/deprecated.rst" | ||
25 | + echo "Please see docs/about/deprecated.rst" | ||
26 | echo " features: ${deprecated_features}" | ||
27 | fi | ||
28 | |||
29 | diff --git a/target/i386/cpu.c b/target/i386/cpu.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/target/i386/cpu.c | ||
32 | +++ b/target/i386/cpu.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static const X86CPUDefinition builtin_x86_defs[] = { | ||
34 | * none", but this is just for compatibility while libvirt isn't | ||
35 | * adapted to resolve CPU model versions before creating VMs. | ||
36 | * See "Runnability guarantee of CPU models" at | ||
37 | - * docs/system/deprecated.rst. | ||
38 | + * docs/about/deprecated.rst. | ||
39 | */ | ||
40 | X86CPUVersion default_cpu_version = 1; | ||
41 | |||
42 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/MAINTAINERS | ||
45 | +++ b/MAINTAINERS | ||
46 | @@ -XXX,XX +XXX,XX @@ F: contrib/gitdm/* | ||
47 | |||
48 | Incompatible changes | ||
49 | R: libvir-list@redhat.com | ||
50 | -F: docs/system/deprecated.rst | ||
51 | +F: docs/about/deprecated.rst | ||
52 | |||
53 | Build System | ||
54 | ------------ | ||
55 | -- | ||
56 | 2.20.1 | ||
57 | |||
58 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Richard Henderson <richard.henderson@linaro.org> | ||
2 | 1 | ||
3 | Currently, our only caller is sve_zcr_len_for_el, which has | ||
4 | already masked the length extracted from ZCR_ELx, so the | ||
5 | masking done here is a nop. But we will shortly have uses | ||
6 | from other locations, where the length will be unmasked. | ||
7 | |||
8 | Saturate the length to ARM_MAX_VQ instead of truncating to | ||
9 | the low 4 bits. | ||
10 | |||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Message-id: 20210723203344.968563-2-richard.henderson@linaro.org | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | --- | ||
16 | target/arm/helper.c | 4 +++- | ||
17 | 1 file changed, 3 insertions(+), 1 deletion(-) | ||
18 | |||
19 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/target/arm/helper.c | ||
22 | +++ b/target/arm/helper.c | ||
23 | @@ -XXX,XX +XXX,XX @@ static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) | ||
24 | { | ||
25 | uint32_t end_len; | ||
26 | |||
27 | - end_len = start_len &= 0xf; | ||
28 | + start_len = MIN(start_len, ARM_MAX_VQ - 1); | ||
29 | + end_len = start_len; | ||
30 | + | ||
31 | if (!test_bit(start_len, cpu->sve_vq_map)) { | ||
32 | end_len = find_last_bit(cpu->sve_vq_map, start_len); | ||
33 | assert(end_len < start_len); | ||
34 | -- | ||
35 | 2.20.1 | ||
36 | |||
37 | diff view generated by jsdifflib |
1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> | 1 | In commit f0a08b0913befbd we changed the type of the PC from |
---|---|---|---|
2 | target_ulong to vaddr. In doing so we inadvertently dropped the | ||
3 | zero-padding on the PC in trace lines (the second item inside the [] | ||
4 | in these lines). They used to look like this on AArch64, for | ||
5 | instance: | ||
2 | 6 | ||
3 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 7 | Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000] |
4 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 8 | |
5 | Message-id: 20210726150953.1218690-1-f4bug@amsat.org | 9 | and now they look like this: |
10 | Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000] | ||
11 | |||
12 | and if the PC happens to be somewhere low like 0x5000 | ||
13 | then the field is shown as /5000/. | ||
14 | |||
15 | This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier, | ||
16 | depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64 | ||
17 | with no width specifier. | ||
18 | |||
19 | Restore the zero-padding by adding an 016 width specifier to | ||
20 | this tracing and a couple of others that were similarly recently | ||
21 | changed to use VADDR_PRIx without a width specifier. | ||
22 | |||
23 | We can't unfortunately restore the "32-bit guests are padded to | ||
24 | 8 hex digits and 64-bit guests to 16 hex digits" behaviour so | ||
25 | easily. | ||
26 | |||
27 | Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr") | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 28 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
29 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
30 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
31 | Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org | ||
7 | --- | 32 | --- |
8 | hw/arm/nseries.c | 2 +- | 33 | accel/tcg/cpu-exec.c | 4 ++-- |
9 | 1 file changed, 1 insertion(+), 1 deletion(-) | 34 | accel/tcg/translate-all.c | 2 +- |
35 | 2 files changed, 3 insertions(+), 3 deletions(-) | ||
10 | 36 | ||
11 | diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c | 37 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
12 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/hw/arm/nseries.c | 39 | --- a/accel/tcg/cpu-exec.c |
14 | +++ b/hw/arm/nseries.c | 40 | +++ b/accel/tcg/cpu-exec.c |
15 | @@ -XXX,XX +XXX,XX @@ static uint32_t mipid_txrx(void *opaque, uint32_t cmd, int len) | 41 | @@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, |
16 | default: | 42 | if (qemu_log_in_addr_range(pc)) { |
17 | bad_cmd: | 43 | qemu_log_mask(CPU_LOG_EXEC, |
18 | qemu_log_mask(LOG_GUEST_ERROR, | 44 | "Trace %d: %p [%08" PRIx64 |
19 | - "%s: unknown command %02x\n", __func__, s->cmd); | 45 | - "/%" VADDR_PRIx "/%08x/%08x] %s\n", |
20 | + "%s: unknown command 0x%02x\n", __func__, s->cmd); | 46 | + "/%016" VADDR_PRIx "/%08x/%08x] %s\n", |
21 | break; | 47 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
48 | tb->flags, tb->cflags, lookup_symbol(pc)); | ||
49 | |||
50 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | ||
51 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||
52 | vaddr pc = log_pc(cpu, last_tb); | ||
53 | if (qemu_log_in_addr_range(pc)) { | ||
54 | - qemu_log("Stopped execution of TB chain before %p [%" | ||
55 | + qemu_log("Stopped execution of TB chain before %p [%016" | ||
56 | VADDR_PRIx "] %s\n", | ||
57 | last_tb->tc.ptr, pc, lookup_symbol(pc)); | ||
58 | } | ||
59 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/accel/tcg/translate-all.c | ||
62 | +++ b/accel/tcg/translate-all.c | ||
63 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
64 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||
65 | vaddr pc = log_pc(cpu, tb); | ||
66 | if (qemu_log_in_addr_range(pc)) { | ||
67 | - qemu_log("cpu_io_recompile: rewound execution of TB to %" | ||
68 | + qemu_log("cpu_io_recompile: rewound execution of TB to %016" | ||
69 | VADDR_PRIx "\n", pc); | ||
70 | } | ||
22 | } | 71 | } |
23 | |||
24 | -- | 72 | -- |
25 | 2.20.1 | 73 | 2.34.1 |
26 | 74 | ||
27 | 75 | diff view generated by jsdifflib |
1 | From: Richard Henderson <richard.henderson@linaro.org> | 1 | From: Tong Ho <tong.ho@amd.com> |
---|---|---|---|
2 | 2 | ||
3 | Rename from sve_zcr_get_valid_len and make accessible | 3 | Add a check in the bit-set operation to write the backstore |
4 | from outside of helper.c. | 4 | only if the affected bit is 0 before. |
5 | 5 | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 6 | With this in place, there will be no need for callers to |
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 7 | do the checking in order to avoid unnecessary writes. |
8 | Message-id: 20210723203344.968563-3-richard.henderson@linaro.org | 8 | |
9 | Signed-off-by: Tong Ho <tong.ho@amd.com> | ||
10 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
11 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> | ||
12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | --- | 14 | --- |
11 | target/arm/internals.h | 10 ++++++++++ | 15 | hw/nvram/xlnx-efuse.c | 11 +++++++++-- |
12 | target/arm/helper.c | 4 ++-- | 16 | 1 file changed, 9 insertions(+), 2 deletions(-) |
13 | 2 files changed, 12 insertions(+), 2 deletions(-) | ||
14 | 17 | ||
15 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 18 | diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/internals.h | 20 | --- a/hw/nvram/xlnx-efuse.c |
18 | +++ b/target/arm/internals.h | 21 | +++ b/hw/nvram/xlnx-efuse.c |
19 | @@ -XXX,XX +XXX,XX @@ void arm_translate_init(void); | 22 | @@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) |
20 | void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); | 23 | |
21 | #endif /* CONFIG_TCG */ | 24 | bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) |
22 | 25 | { | |
23 | +/** | 26 | + uint32_t set, *row; |
24 | + * aarch64_sve_zcr_get_valid_len: | 27 | + |
25 | + * @cpu: cpu context | 28 | if (efuse_ro_bits_find(s, bit)) { |
26 | + * @start_len: maximum len to consider | 29 | g_autofree char *path = object_get_canonical_path(OBJECT(s)); |
27 | + * | 30 | |
28 | + * Return the maximum supported sve vector length <= @start_len. | 31 | @@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) |
29 | + * Note that both @start_len and the return value are in units | 32 | return false; |
30 | + * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128. | 33 | } |
31 | + */ | 34 | |
32 | +uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len); | 35 | - s->fuse32[bit / 32] |= 1 << (bit % 32); |
33 | 36 | - efuse_bdrv_sync(s, bit); | |
34 | enum arm_fprounding { | 37 | + /* Avoid back-end write unless there is a real update */ |
35 | FPROUNDING_TIEEVEN, | 38 | + row = &s->fuse32[bit / 32]; |
36 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 39 | + set = 1 << (bit % 32); |
37 | index XXXXXXX..XXXXXXX 100644 | 40 | + if (!(set & *row)) { |
38 | --- a/target/arm/helper.c | 41 | + *row |= set; |
39 | +++ b/target/arm/helper.c | 42 | + efuse_bdrv_sync(s, bit); |
40 | @@ -XXX,XX +XXX,XX @@ int sve_exception_el(CPUARMState *env, int el) | 43 | + } |
41 | return 0; | 44 | return true; |
42 | } | 45 | } |
43 | 46 | ||
44 | -static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) | ||
45 | +uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) | ||
46 | { | ||
47 | uint32_t end_len; | ||
48 | |||
49 | @@ -XXX,XX +XXX,XX @@ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) | ||
50 | zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); | ||
51 | } | ||
52 | |||
53 | - return sve_zcr_get_valid_len(cpu, zcr_len); | ||
54 | + return aarch64_sve_zcr_get_valid_len(cpu, zcr_len); | ||
55 | } | ||
56 | |||
57 | static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
58 | -- | 47 | -- |
59 | 2.20.1 | 48 | 2.34.1 |
60 | 49 | ||
61 | 50 | diff view generated by jsdifflib |