1 | Some small arm bug fixes for rc3. | 1 | A last small test of bug fixes before rc1. |
---|---|---|---|
2 | 2 | ||
3 | thanks | ||
3 | -- PMM | 4 | -- PMM |
4 | 5 | ||
5 | The following changes since commit 9b617b1bb4056e60b39be4c33be20c10928a6a5c: | 6 | The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637: |
6 | 7 | ||
7 | Merge tag 'trivial-branch-for-7.0-pull-request' of https://gitlab.com/laurent_vivier/qemu into staging (2022-04-01 10:23:27 +0100) | 8 | Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100) |
8 | 9 | ||
9 | are available in the Git repository at: | 10 | are available in the Git repository at: |
10 | 11 | ||
11 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20220401 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717 |
12 | 13 | ||
13 | for you to fetch changes up to a5b1e1ab662aa6dc42d5a913080fccbb8bf82e9b: | 14 | for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4: |
14 | 15 | ||
15 | target/arm: Don't use DISAS_NORETURN in STXP !HAVE_CMPXCHG128 codegen (2022-04-01 15:35:49 +0100) | 16 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100) |
16 | 17 | ||
17 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
18 | target-arm queue: | 19 | target-arm queue: |
19 | * target/arm: Fix some bugs in secure EL2 handling | 20 | * hw/arm/sbsa-ref: set 'slots' property of xhci |
20 | * target/arm: Fix assert when !HAVE_CMPXCHG128 | 21 | * linux-user: Remove pointless NULL check in clock_adjtime handling |
21 | * MAINTAINERS: change Fred Konrad's email address | 22 | * ptw: Fix S1_ptw_translate() debug path |
23 | * ptw: Account for FEAT_RME when applying {N}SW, SA bits | ||
24 | * accel/tcg: Zero-pad PC in TCG CPU exec trace lines | ||
25 | * hw/nvram: Avoid unnecessary Xilinx eFuse backstore write | ||
22 | 26 | ||
23 | ---------------------------------------------------------------- | 27 | ---------------------------------------------------------------- |
24 | Frederic Konrad (1): | 28 | Peter Maydell (5): |
25 | MAINTAINERS: change Fred Konrad's email address | 29 | linux-user: Remove pointless NULL check in clock_adjtime handling |
30 | target/arm/ptw.c: Add comments to S1Translate struct fields | ||
31 | target/arm: Fix S1_ptw_translate() debug path | ||
32 | target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits | ||
33 | accel/tcg: Zero-pad PC in TCG CPU exec trace lines | ||
26 | 34 | ||
27 | Idan Horowitz (4): | 35 | Tong Ho (1): |
28 | target/arm: Fix MTE access checks for disabled SEL2 | 36 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write |
29 | target/arm: Check VSTCR.SW when assigning the stage 2 output PA space | ||
30 | target/arm: Take VSTCR.SW, VTCR.NSW into account in final stage 2 walk | ||
31 | target/arm: Determine final stage 2 output PA space based on original IPA | ||
32 | 37 | ||
33 | Peter Maydell (1): | 38 | Yuquan Wang (1): |
34 | target/arm: Don't use DISAS_NORETURN in STXP !HAVE_CMPXCHG128 codegen | 39 | hw/arm/sbsa-ref: set 'slots' property of xhci |
35 | 40 | ||
36 | target/arm/internals.h | 2 +- | 41 | accel/tcg/cpu-exec.c | 4 +-- |
37 | target/arm/helper.c | 18 +++++++++++++++--- | 42 | accel/tcg/translate-all.c | 2 +- |
38 | target/arm/translate-a64.c | 7 ++++++- | 43 | hw/arm/sbsa-ref.c | 1 + |
39 | .mailmap | 3 ++- | 44 | hw/nvram/xlnx-efuse.c | 11 ++++-- |
40 | MAINTAINERS | 2 +- | 45 | linux-user/syscall.c | 12 +++---- |
41 | 5 files changed, 25 insertions(+), 7 deletions(-) | 46 | target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------ |
47 | 6 files changed, 98 insertions(+), 22 deletions(-) | diff view generated by jsdifflib |
1 | From: Idan Horowitz <idan.horowitz@gmail.com> | 1 | From: Yuquan Wang <wangyuquan1236@phytium.com.cn> |
---|---|---|---|
2 | 2 | ||
3 | As per the AArch64.S2Walk() pseudo-code in the ARMv8 ARM, the final | 3 | This extends the slots of xhci to 64, since the default xhci_sysbus |
4 | decision as to the output address's PA space based on the SA/SW/NSA/NSW | 4 | just supports one slot. |
5 | bits needs to take the input IPA's PA space into account, and not the | ||
6 | PA space of the result of the stage 2 walk itself. | ||
7 | 5 | ||
8 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | 6 | Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn> |
7 | Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Message-id: 20220327093427.1548629-4-idan.horowitz@gmail.com | 9 | Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> |
11 | [PMM: fixed commit message typo] | 10 | Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> |
11 | Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 13 | --- |
14 | target/arm/helper.c | 8 +++++--- | 14 | hw/arm/sbsa-ref.c | 1 + |
15 | 1 file changed, 5 insertions(+), 3 deletions(-) | 15 | 1 file changed, 1 insertion(+) |
16 | 16 | ||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 17 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
18 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper.c | 19 | --- a/hw/arm/sbsa-ref.c |
20 | +++ b/target/arm/helper.c | 20 | +++ b/hw/arm/sbsa-ref.c |
21 | @@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address, | 21 | @@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms) |
22 | hwaddr ipa; | 22 | hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base; |
23 | int s2_prot; | 23 | int irq = sbsa_ref_irqmap[SBSA_XHCI]; |
24 | int ret; | 24 | DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS); |
25 | + bool ipa_secure; | 25 | + qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS); |
26 | ARMCacheAttrs cacheattrs2 = {}; | 26 | |
27 | ARMMMUIdx s2_mmu_idx; | 27 | sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); |
28 | bool is_el0; | 28 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); |
29 | @@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address, | ||
30 | return ret; | ||
31 | } | ||
32 | |||
33 | + ipa_secure = attrs->secure; | ||
34 | if (arm_is_secure_below_el3(env)) { | ||
35 | - if (attrs->secure) { | ||
36 | + if (ipa_secure) { | ||
37 | attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); | ||
38 | } else { | ||
39 | attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); | ||
40 | } | ||
41 | } else { | ||
42 | - assert(!attrs->secure); | ||
43 | + assert(!ipa_secure); | ||
44 | } | ||
45 | |||
46 | s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; | ||
47 | @@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address, | ||
48 | |||
49 | /* Check if IPA translates to secure or non-secure PA space. */ | ||
50 | if (arm_is_secure_below_el3(env)) { | ||
51 | - if (attrs->secure) { | ||
52 | + if (ipa_secure) { | ||
53 | attrs->secure = | ||
54 | !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)); | ||
55 | } else { | ||
56 | -- | 29 | -- |
57 | 2.25.1 | 30 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Idan Horowitz <idan.horowitz@gmail.com> | 1 | In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to |
---|---|---|---|
2 | the address of the local variable htx. This means it can never be | ||
3 | NULL, but later in the code we check it for NULL anyway. Coverity | ||
4 | complains about this (CID 1507683) because the NULL check comes after | ||
5 | a call to clock_adjtime() that assumes it is non-NULL. | ||
2 | 6 | ||
3 | As per the AArch64.SS2InitialTTWState() psuedo-code in the ARMv8 ARM the | 7 | Since phtx is always &htx, and is used only in three places, it's not |
4 | initial PA space used for stage 2 table walks is assigned based on the SW | 8 | really necessary. Remove it, bringing the code structure in to line |
5 | and NSW bits of the VSTCR and VTCR registers. | 9 | with that for TARGET_NR_clock_adjtime64, which already uses a simple |
6 | This was already implemented for the recursive stage 2 page table walks | 10 | '&htx' when it wants a pointer to 'htx'. |
7 | in S1_ptw_translate(), but was missing for the final stage 2 walk. | ||
8 | 11 | ||
9 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
10 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 14 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
11 | Message-id: 20220327093427.1548629-3-idan.horowitz@gmail.com | 15 | Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | 16 | --- |
14 | target/arm/helper.c | 10 ++++++++++ | 17 | linux-user/syscall.c | 12 +++++------- |
15 | 1 file changed, 10 insertions(+) | 18 | 1 file changed, 5 insertions(+), 7 deletions(-) |
16 | 19 | ||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 20 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
18 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper.c | 22 | --- a/linux-user/syscall.c |
20 | +++ b/target/arm/helper.c | 23 | +++ b/linux-user/syscall.c |
21 | @@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address, | 24 | @@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, |
22 | return ret; | 25 | #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) |
26 | case TARGET_NR_clock_adjtime: | ||
27 | { | ||
28 | - struct timex htx, *phtx = &htx; | ||
29 | + struct timex htx; | ||
30 | |||
31 | - if (target_to_host_timex(phtx, arg2) != 0) { | ||
32 | + if (target_to_host_timex(&htx, arg2) != 0) { | ||
33 | return -TARGET_EFAULT; | ||
23 | } | 34 | } |
24 | 35 | - ret = get_errno(clock_adjtime(arg1, phtx)); | |
25 | + if (arm_is_secure_below_el3(env)) { | 36 | - if (!is_error(ret) && phtx) { |
26 | + if (attrs->secure) { | 37 | - if (host_to_target_timex(arg2, phtx) != 0) { |
27 | + attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); | 38 | - return -TARGET_EFAULT; |
28 | + } else { | 39 | - } |
29 | + attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); | 40 | + ret = get_errno(clock_adjtime(arg1, &htx)); |
30 | + } | 41 | + if (!is_error(ret) && host_to_target_timex(arg2, &htx)) { |
31 | + } else { | 42 | + return -TARGET_EFAULT; |
32 | + assert(!attrs->secure); | 43 | } |
33 | + } | 44 | } |
34 | + | 45 | return ret; |
35 | s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; | ||
36 | is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0; | ||
37 | |||
38 | -- | 46 | -- |
39 | 2.25.1 | 47 | 2.34.1 |
48 | |||
49 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Add comments to the in_* fields in the S1Translate struct | ||
2 | that explain what they're doing. | ||
1 | 3 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org | ||
7 | --- | ||
8 | target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++ | ||
9 | 1 file changed, 40 insertions(+) | ||
10 | |||
11 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/arm/ptw.c | ||
14 | +++ b/target/arm/ptw.c | ||
15 | @@ -XXX,XX +XXX,XX @@ | ||
16 | #endif | ||
17 | |||
18 | typedef struct S1Translate { | ||
19 | + /* | ||
20 | + * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. | ||
21 | + * Together with in_space, specifies the architectural translation regime. | ||
22 | + */ | ||
23 | ARMMMUIdx in_mmu_idx; | ||
24 | + /* | ||
25 | + * in_ptw_idx: specifies which mmuidx to use for the actual | ||
26 | + * page table descriptor load operations. This will be one of the | ||
27 | + * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. | ||
28 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
29 | + * this field is updated accordingly. | ||
30 | + */ | ||
31 | ARMMMUIdx in_ptw_idx; | ||
32 | + /* | ||
33 | + * in_space: the security space for this walk. This plus | ||
34 | + * the in_mmu_idx specify the architectural translation regime. | ||
35 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
36 | + * this field is updated accordingly. | ||
37 | + * | ||
38 | + * Note that the security space for the in_ptw_idx may be different | ||
39 | + * from that for the in_mmu_idx. We do not need to explicitly track | ||
40 | + * the in_ptw_idx security space because: | ||
41 | + * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx | ||
42 | + * itself specifies the security space | ||
43 | + * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security | ||
44 | + * space used for ptw reads is the same as that of the security | ||
45 | + * space of the stage 1 translation for all cases except where | ||
46 | + * stage 1 is Secure; in that case the only possibilities for | ||
47 | + * the ptw read are Secure and NonSecure, and the in_ptw_idx | ||
48 | + * value being Stage2 vs Stage2_S distinguishes those. | ||
49 | + */ | ||
50 | ARMSecuritySpace in_space; | ||
51 | + /* | ||
52 | + * in_secure: whether the translation regime is a Secure one. | ||
53 | + * This is always equal to arm_space_is_secure(in_space). | ||
54 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
55 | + * this field is updated accordingly. | ||
56 | + */ | ||
57 | bool in_secure; | ||
58 | + /* | ||
59 | + * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug | ||
60 | + * accesses will not update the guest page table access flags | ||
61 | + * and will not change the state of the softmmu TLBs. | ||
62 | + */ | ||
63 | bool in_debug; | ||
64 | /* | ||
65 | * If this is stage 2 of a stage 1+2 page table walk, then this must | ||
66 | -- | ||
67 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Idan Horowitz <idan.horowitz@gmail.com> | 1 | In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate() |
---|---|---|---|
2 | so that the debug-access "call get_phys_addr_*" codepath is used both | ||
3 | when S1 is doing ptw reads from stage 2 and when it is doing ptw | ||
4 | reads from physical memory. However, we didn't update the | ||
5 | calculation of s2ptw->in_space and s2ptw->in_secure to account for | ||
6 | the "ptw reads from physical memory" case. This meant that debug | ||
7 | accesses when in Secure state broke. | ||
2 | 8 | ||
3 | As per the AArch64.SS2OutputPASpace() psuedo-code in the ARMv8 ARM when the | 9 | Create a new function S2_security_space() which returns the |
4 | PA space of the IPA is non secure, the output PA space is secure if and only | 10 | correct security space to use for the ptw load, and use it to |
5 | if all of the bits VTCR.<NSW, NSA>, VSTCR.<SW, SA> are not set. | 11 | determine the correct .in_secure and .in_space fields for the |
12 | stage 2 lookup for the ptw load. | ||
6 | 13 | ||
7 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | 14 | Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org> |
15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
16 | Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Message-id: 20220327093427.1548629-2-idan.horowitz@gmail.com | 18 | Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org |
19 | Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate") | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 20 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 21 | --- |
12 | target/arm/helper.c | 2 +- | 22 | target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++----- |
13 | 1 file changed, 1 insertion(+), 1 deletion(-) | 23 | 1 file changed, 32 insertions(+), 5 deletions(-) |
14 | 24 | ||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 25 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
16 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/helper.c | 27 | --- a/target/arm/ptw.c |
18 | +++ b/target/arm/helper.c | 28 | +++ b/target/arm/ptw.c |
19 | @@ -XXX,XX +XXX,XX @@ bool get_phys_addr(CPUARMState *env, target_ulong address, | 29 | @@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) |
20 | } else { | 30 | } |
21 | attrs->secure = | 31 | } |
22 | !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW)) | 32 | |
23 | - || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA)); | 33 | +static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, |
24 | + || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW))); | 34 | + ARMMMUIdx s2_mmu_idx) |
25 | } | 35 | +{ |
26 | } | 36 | + /* |
27 | return 0; | 37 | + * Return the security space to use for stage 2 when doing |
38 | + * the S1 page table descriptor load. | ||
39 | + */ | ||
40 | + if (regime_is_stage2(s2_mmu_idx)) { | ||
41 | + /* | ||
42 | + * The security space for ptw reads is almost always the same | ||
43 | + * as that of the security space of the stage 1 translation. | ||
44 | + * The only exception is when stage 1 is Secure; in that case | ||
45 | + * the ptw read might be to the Secure or the NonSecure space | ||
46 | + * (but never Realm or Root), and the s2_mmu_idx tells us which. | ||
47 | + * Root translations are always single-stage. | ||
48 | + */ | ||
49 | + if (s1_space == ARMSS_Secure) { | ||
50 | + return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); | ||
51 | + } else { | ||
52 | + assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); | ||
53 | + assert(s1_space != ARMSS_Root); | ||
54 | + return s1_space; | ||
55 | + } | ||
56 | + } else { | ||
57 | + /* ptw loads are from phys: the mmu idx itself says which space */ | ||
58 | + return arm_phys_to_space(s2_mmu_idx); | ||
59 | + } | ||
60 | +} | ||
61 | + | ||
62 | /* Translate a S1 pagetable walk through S2 if needed. */ | ||
63 | static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, | ||
64 | hwaddr addr, ARMMMUFaultInfo *fi) | ||
65 | { | ||
66 | - ARMSecuritySpace space = ptw->in_space; | ||
67 | bool is_secure = ptw->in_secure; | ||
68 | ARMMMUIdx mmu_idx = ptw->in_mmu_idx; | ||
69 | ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; | ||
70 | @@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, | ||
71 | * From gdbstub, do not use softmmu so that we don't modify the | ||
72 | * state of the cpu at all, including softmmu tlb contents. | ||
73 | */ | ||
74 | + ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); | ||
75 | S1Translate s2ptw = { | ||
76 | .in_mmu_idx = s2_mmu_idx, | ||
77 | .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), | ||
78 | - .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S, | ||
79 | - .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure | ||
80 | - : space == ARMSS_Realm ? ARMSS_Realm | ||
81 | - : ARMSS_NonSecure), | ||
82 | + .in_secure = arm_space_is_secure(s2_space), | ||
83 | + .in_space = s2_space, | ||
84 | .in_debug = true, | ||
85 | }; | ||
86 | GetPhysAddrResult s2 = { }; | ||
28 | -- | 87 | -- |
29 | 2.25.1 | 88 | 2.34.1 | diff view generated by jsdifflib |
1 | In gen_store_exclusive(), if the host does not have a cmpxchg128 | 1 | In get_phys_addr_twostage() the code that applies the effects of |
---|---|---|---|
2 | primitive then we generate bad code for STXP for storing two 64-bit | 2 | VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure. |
3 | values. We generate a call to the exit_atomic helper, which never | 3 | Now we also have f.attrs.space for FEAT_RME, we need to keep the two |
4 | returns, and set is_jmp to DISAS_NORETURN. However, this is | 4 | in sync. |
5 | forgetting that we have already emitted a brcond that jumps over this | ||
6 | call for the case where we don't hold the exclusive. The effect is | ||
7 | that we don't generate any code to end the TB for the | ||
8 | exclusive-not-held execution path, which falls into the "exit with | ||
9 | TB_EXIT_REQUESTED" code that gen_tb_end() emits. This then causes an | ||
10 | assert at runtime when cpu_loop_exec_tb() sees an EXIT_REQUESTED TB | ||
11 | return that wasn't for an interrupt or icount. | ||
12 | 5 | ||
13 | In particular, you can hit this case when using the clang sanitizers | 6 | These bits only have an effect for Secure space translations, not |
14 | and trying to run the xlnx-versal-virt acceptance test in 'make | 7 | for Root, so use the input in_space field to determine whether to |
15 | check-acceptance'. This bug was masked until commit 848126d11e93ff | 8 | apply them rather than the input is_secure. This doesn't actually |
16 | ("meson: move int128 checks from configure") because we used to set | 9 | make a difference because Root translations are never two-stage, |
17 | CONFIG_CMPXCHG128=1 and avoid the buggy codepath, but after that we | 10 | but it's a little clearer. |
18 | do not. | ||
19 | 11 | ||
20 | Fix the bug by not setting is_jmp. The code after the exit_atomic | ||
21 | call up to the fail_label is dead, but TCG is smart enough to | ||
22 | eliminate it. We do need to set 'tmp' to some valid value, though | ||
23 | (in the same way the exit_atomic-using code in tcg/tcg-op.c does). | ||
24 | |||
25 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/953 | ||
26 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
27 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
28 | Message-id: 20220331150858.96348-1-peter.maydell@linaro.org | 14 | Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org |
29 | --- | 15 | --- |
30 | target/arm/translate-a64.c | 7 ++++++- | 16 | target/arm/ptw.c | 13 ++++++++----- |
31 | 1 file changed, 6 insertions(+), 1 deletion(-) | 17 | 1 file changed, 8 insertions(+), 5 deletions(-) |
32 | 18 | ||
33 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | 19 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
34 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/target/arm/translate-a64.c | 21 | --- a/target/arm/ptw.c |
36 | +++ b/target/arm/translate-a64.c | 22 | +++ b/target/arm/ptw.c |
37 | @@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, | 23 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
38 | } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { | 24 | hwaddr ipa; |
39 | if (!HAVE_CMPXCHG128) { | 25 | int s1_prot, s1_lgpgsz; |
40 | gen_helper_exit_atomic(cpu_env); | 26 | bool is_secure = ptw->in_secure; |
41 | - s->base.is_jmp = DISAS_NORETURN; | 27 | + ARMSecuritySpace in_space = ptw->in_space; |
42 | + /* | 28 | bool ret, ipa_secure; |
43 | + * Produce a result so we have a well-formed opcode | 29 | ARMCacheAttrs cacheattrs1; |
44 | + * stream when the following (dead) code uses 'tmp'. | 30 | ARMSecuritySpace ipa_space; |
45 | + * TCG will remove the dead ops for us. | 31 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
46 | + */ | 32 | * Check if IPA translates to secure or non-secure PA space. |
47 | + tcg_gen_movi_i64(tmp, 0); | 33 | * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. |
48 | } else if (s->be_data == MO_LE) { | 34 | */ |
49 | gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env, | 35 | - result->f.attrs.secure = |
50 | cpu_exclusive_addr, | 36 | - (is_secure |
37 | - && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) | ||
38 | - && (ipa_secure | ||
39 | - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); | ||
40 | + if (in_space == ARMSS_Secure) { | ||
41 | + result->f.attrs.secure = | ||
42 | + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) | ||
43 | + && (ipa_secure | ||
44 | + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); | ||
45 | + result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); | ||
46 | + } | ||
47 | |||
48 | return false; | ||
49 | } | ||
51 | -- | 50 | -- |
52 | 2.25.1 | 51 | 2.34.1 | diff view generated by jsdifflib |
1 | From: Frederic Konrad <konrad@adacore.com> | 1 | In commit f0a08b0913befbd we changed the type of the PC from |
---|---|---|---|
2 | target_ulong to vaddr. In doing so we inadvertently dropped the | ||
3 | zero-padding on the PC in trace lines (the second item inside the [] | ||
4 | in these lines). They used to look like this on AArch64, for | ||
5 | instance: | ||
2 | 6 | ||
3 | frederic.konrad@adacore.com and konrad@adacore.com will stop working starting | 7 | Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000] |
4 | 2022-04-01. | ||
5 | 8 | ||
6 | Use my personal email instead. | 9 | and now they look like this: |
10 | Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000] | ||
7 | 11 | ||
8 | Signed-off-by: Frederic Konrad <frederic.konrad@adacore.com> | 12 | and if the PC happens to be somewhere low like 0x5000 |
9 | Reviewed-by: Fabien Chouteau <chouteau@adacore.com <clg@kaod.org>> | 13 | then the field is shown as /5000/. |
10 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 14 | |
11 | Message-id: 1648643217-15811-1-git-send-email-frederic.konrad@adacore.com | 15 | This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier, |
16 | depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64 | ||
17 | with no width specifier. | ||
18 | |||
19 | Restore the zero-padding by adding an 016 width specifier to | ||
20 | this tracing and a couple of others that were similarly recently | ||
21 | changed to use VADDR_PRIx without a width specifier. | ||
22 | |||
23 | We can't unfortunately restore the "32-bit guests are padded to | ||
24 | 8 hex digits and 64-bit guests to 16 hex digits" behaviour so | ||
25 | easily. | ||
26 | |||
27 | Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr") | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 28 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
29 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
30 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
31 | Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org | ||
13 | --- | 32 | --- |
14 | .mailmap | 3 ++- | 33 | accel/tcg/cpu-exec.c | 4 ++-- |
15 | MAINTAINERS | 2 +- | 34 | accel/tcg/translate-all.c | 2 +- |
16 | 2 files changed, 3 insertions(+), 2 deletions(-) | 35 | 2 files changed, 3 insertions(+), 3 deletions(-) |
17 | 36 | ||
18 | diff --git a/.mailmap b/.mailmap | 37 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
19 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/.mailmap | 39 | --- a/accel/tcg/cpu-exec.c |
21 | +++ b/.mailmap | 40 | +++ b/accel/tcg/cpu-exec.c |
22 | @@ -XXX,XX +XXX,XX @@ Alexander Graf <agraf@csgraf.de> <agraf@suse.de> | 41 | @@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, |
23 | Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com> | 42 | if (qemu_log_in_addr_range(pc)) { |
24 | Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com> | 43 | qemu_log_mask(CPU_LOG_EXEC, |
25 | Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com> | 44 | "Trace %d: %p [%08" PRIx64 |
26 | -Frederic Konrad <konrad@adacore.com> <fred.konrad@greensocs.com> | 45 | - "/%" VADDR_PRIx "/%08x/%08x] %s\n", |
27 | +Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com> | 46 | + "/%016" VADDR_PRIx "/%08x/%08x] %s\n", |
28 | +Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com> | 47 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
29 | Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com> | 48 | tb->flags, tb->cflags, lookup_symbol(pc)); |
30 | Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com> | 49 | |
31 | Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn> | 50 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) |
32 | diff --git a/MAINTAINERS b/MAINTAINERS | 51 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { |
52 | vaddr pc = log_pc(cpu, last_tb); | ||
53 | if (qemu_log_in_addr_range(pc)) { | ||
54 | - qemu_log("Stopped execution of TB chain before %p [%" | ||
55 | + qemu_log("Stopped execution of TB chain before %p [%016" | ||
56 | VADDR_PRIx "] %s\n", | ||
57 | last_tb->tc.ptr, pc, lookup_symbol(pc)); | ||
58 | } | ||
59 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
33 | index XXXXXXX..XXXXXXX 100644 | 60 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/MAINTAINERS | 61 | --- a/accel/tcg/translate-all.c |
35 | +++ b/MAINTAINERS | 62 | +++ b/accel/tcg/translate-all.c |
36 | @@ -XXX,XX +XXX,XX @@ F: include/hw/rtc/sun4v-rtc.h | 63 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) |
37 | 64 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | |
38 | Leon3 | 65 | vaddr pc = log_pc(cpu, tb); |
39 | M: Fabien Chouteau <chouteau@adacore.com> | 66 | if (qemu_log_in_addr_range(pc)) { |
40 | -M: KONRAD Frederic <frederic.konrad@adacore.com> | 67 | - qemu_log("cpu_io_recompile: rewound execution of TB to %" |
41 | +M: Frederic Konrad <konrad.frederic@yahoo.fr> | 68 | + qemu_log("cpu_io_recompile: rewound execution of TB to %016" |
42 | S: Maintained | 69 | VADDR_PRIx "\n", pc); |
43 | F: hw/sparc/leon3.c | 70 | } |
44 | F: hw/*/grlib* | 71 | } |
45 | -- | 72 | -- |
46 | 2.25.1 | 73 | 2.34.1 |
47 | 74 | ||
48 | 75 | diff view generated by jsdifflib |
1 | From: Idan Horowitz <idan.horowitz@gmail.com> | 1 | From: Tong Ho <tong.ho@amd.com> |
---|---|---|---|
2 | 2 | ||
3 | While not mentioned anywhere in the actual specification text, the | 3 | Add a check in the bit-set operation to write the backstore |
4 | HCR_EL2.ATA bit is treated as '1' when EL2 is disabled at the current | 4 | only if the affected bit is 0 before. |
5 | security state. This can be observed in the psuedo-code implementation | ||
6 | of AArch64.AllocationTagAccessIsEnabled(). | ||
7 | 5 | ||
8 | Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com> | 6 | With this in place, there will be no need for callers to |
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 7 | do the checking in order to avoid unnecessary writes. |
10 | Message-id: 20220328173107.311267-1-idan.horowitz@gmail.com | 8 | |
9 | Signed-off-by: Tong Ho <tong.ho@amd.com> | ||
10 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | ||
11 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> | ||
12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | --- | 14 | --- |
13 | target/arm/internals.h | 2 +- | 15 | hw/nvram/xlnx-efuse.c | 11 +++++++++-- |
14 | target/arm/helper.c | 2 +- | 16 | 1 file changed, 9 insertions(+), 2 deletions(-) |
15 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
16 | 17 | ||
17 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 18 | diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c |
18 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/internals.h | 20 | --- a/hw/nvram/xlnx-efuse.c |
20 | +++ b/target/arm/internals.h | 21 | +++ b/hw/nvram/xlnx-efuse.c |
21 | @@ -XXX,XX +XXX,XX @@ static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, | 22 | @@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) |
22 | && !(env->cp15.scr_el3 & SCR_ATA)) { | 23 | |
24 | bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) | ||
25 | { | ||
26 | + uint32_t set, *row; | ||
27 | + | ||
28 | if (efuse_ro_bits_find(s, bit)) { | ||
29 | g_autofree char *path = object_get_canonical_path(OBJECT(s)); | ||
30 | |||
31 | @@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) | ||
23 | return false; | 32 | return false; |
24 | } | 33 | } |
25 | - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { | 34 | |
26 | + if (el < 2 && arm_is_el2_enabled(env)) { | 35 | - s->fuse32[bit / 32] |= 1 << (bit % 32); |
27 | uint64_t hcr = arm_hcr_el2_eff(env); | 36 | - efuse_bdrv_sync(s, bit); |
28 | if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { | 37 | + /* Avoid back-end write unless there is a real update */ |
29 | return false; | 38 | + row = &s->fuse32[bit / 32]; |
30 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 39 | + set = 1 << (bit % 32); |
31 | index XXXXXXX..XXXXXXX 100644 | 40 | + if (!(set & *row)) { |
32 | --- a/target/arm/helper.c | 41 | + *row |= set; |
33 | +++ b/target/arm/helper.c | 42 | + efuse_bdrv_sync(s, bit); |
34 | @@ -XXX,XX +XXX,XX @@ static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, | 43 | + } |
35 | { | 44 | return true; |
36 | int el = arm_current_el(env); | 45 | } |
37 | 46 | ||
38 | - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { | ||
39 | + if (el < 2 && arm_is_el2_enabled(env)) { | ||
40 | uint64_t hcr = arm_hcr_el2_eff(env); | ||
41 | if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { | ||
42 | return CP_ACCESS_TRAP_EL2; | ||
43 | -- | 47 | -- |
44 | 2.25.1 | 48 | 2.34.1 |
49 | |||
50 | diff view generated by jsdifflib |