1 | Mostly my stuff with a few easy patches from others. I know I have | 1 | Ten arm-related bug fixes for 2.12... |
---|---|---|---|
2 | a few big series in my to-review queue, but I've been too jetlagged | ||
3 | to try to tackle those :-( | ||
4 | 2 | ||
5 | thanks | 3 | thanks |
6 | -- PMM | 4 | -- PMM |
7 | 5 | ||
8 | The following changes since commit a26a98dfb9d448d7234d931ae3720feddf6f0651: | 6 | The following changes since commit 4c2c1015905fa1d616750dfe024b4c0b35875950: |
9 | 7 | ||
10 | Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171006' into staging (2017-10-06 13:19:03 +0100) | 8 | Merge remote-tracking branch 'remotes/borntraeger/tags/s390x-20180323' into staging (2018-03-23 10:20:54 +0000) |
11 | 9 | ||
12 | are available in the git repository at: | 10 | are available in the Git repository at: |
13 | 11 | ||
14 | git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20171006 | 12 | git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180323 |
15 | 13 | ||
16 | for you to fetch changes up to 04829ce334bece78d4fa1d0fdbc8bc27dae9b242: | 14 | for you to fetch changes up to 548f514cf89dd9ab39c0cb4c063097bccf141fdd: |
17 | 15 | ||
18 | nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit (2017-10-06 16:46:49 +0100) | 16 | target/arm: Always set FAR to a known unknown value for debug exceptions (2018-03-23 18:26:46 +0000) |
19 | 17 | ||
20 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
21 | target-arm: | 19 | target-arm queue: |
22 | * v8M: more preparatory work | 20 | * arm/translate-a64: don't lose interrupts after unmasking via write to DAIF |
23 | * nvic: reset properly rather than leaving the nvic in a weird state | 21 | * sdhci: fix incorrect use of Error * |
24 | * xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false | 22 | * hw/intc/arm_gicv3: Fix secure-GIC NS ICC_PMR and ICC_RPR accesses |
25 | * sd: fix out-of-bounds check for multi block reads | 23 | * hw/arm/bcm2836: Use the Cortex-A7 instead of Cortex-A15 |
26 | * arm: Fix SMC reporting to EL2 when QEMU provides PSCI | 24 | * i.MX: Support serial RS-232 break properly |
25 | * mach-virt: Set VM's SMBIOS system version to mc->name | ||
26 | * target/arm: Honour MDCR_EL2.TDE when routing exceptions due to BKPT/BRK | ||
27 | * target/arm: Factor out code to calculate FSR for debug exceptions | ||
28 | * target/arm: Set FSR for BKPT, BRK when raising exception | ||
29 | * target/arm: Always set FAR to a known unknown value for debug exceptions | ||
27 | 30 | ||
28 | ---------------------------------------------------------------- | 31 | ---------------------------------------------------------------- |
29 | Jan Kiszka (1): | 32 | Paolo Bonzini (1): |
30 | arm: Fix SMC reporting to EL2 when QEMU provides PSCI | 33 | sdhci: fix incorrect use of Error * |
31 | 34 | ||
32 | Michael Olbrich (1): | 35 | Peter Maydell (6): |
33 | hw/sd: fix out-of-bounds check for multi block reads | 36 | hw/intc/arm_gicv3: Fix secure-GIC NS ICC_PMR and ICC_RPR accesses |
37 | hw/arm/bcm2836: Use the Cortex-A7 instead of Cortex-A15 | ||
38 | target/arm: Honour MDCR_EL2.TDE when routing exceptions due to BKPT/BRK | ||
39 | target/arm: Factor out code to calculate FSR for debug exceptions | ||
40 | target/arm: Set FSR for BKPT, BRK when raising exception | ||
41 | target/arm: Always set FAR to a known unknown value for debug exceptions | ||
34 | 42 | ||
35 | Peter Maydell (17): | 43 | Trent Piepho (1): |
36 | nvic: Clear the vector arrays and prigroup on reset | 44 | i.MX: Support serial RS-232 break properly |
37 | target/arm: Don't switch to target stack early in v7M exception return | ||
38 | target/arm: Prepare for CONTROL.SPSEL being nonzero in Handler mode | ||
39 | target/arm: Restore security state on exception return | ||
40 | target/arm: Restore SPSEL to correct CONTROL register on exception return | ||
41 | target/arm: Check for xPSR mismatch usage faults earlier for v8M | ||
42 | target/arm: Warn about restoring to unaligned stack | ||
43 | target/arm: Don't warn about exception return with PC low bit set for v8M | ||
44 | target/arm: Add new-in-v8M SFSR and SFAR | ||
45 | target/arm: Update excret sanity checks for v8M | ||
46 | target/arm: Add support for restoring v8M additional state context | ||
47 | target/arm: Add v8M support to exception entry code | ||
48 | nvic: Implement Security Attribution Unit registers | ||
49 | target/arm: Implement security attribute lookups for memory accesses | ||
50 | target/arm: Fix calculation of secure mm_idx values | ||
51 | target/arm: Factor out "get mmuidx for specified security state" | ||
52 | nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit | ||
53 | 45 | ||
54 | Thomas Huth (1): | 46 | Victor Kamensky (1): |
55 | hw/arm/xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false | 47 | arm/translate-a64: treat DISAS_UPDATE as variant of DISAS_EXIT |
56 | 48 | ||
57 | target/arm/cpu.h | 60 ++++- | 49 | Wei Huang (1): |
58 | target/arm/internals.h | 15 ++ | 50 | mach-virt: Set VM's SMBIOS system version to mc->name |
59 | hw/arm/xlnx-zynqmp.c | 2 + | ||
60 | hw/intc/armv7m_nvic.c | 158 ++++++++++- | ||
61 | hw/sd/sd.c | 12 +- | ||
62 | target/arm/cpu.c | 27 ++ | ||
63 | target/arm/helper.c | 691 +++++++++++++++++++++++++++++++++++++++++++------ | ||
64 | target/arm/machine.c | 16 ++ | ||
65 | target/arm/op_helper.c | 27 +- | ||
66 | 9 files changed, 898 insertions(+), 110 deletions(-) | ||
67 | 51 | ||
52 | include/hw/arm/virt.h | 1 + | ||
53 | include/hw/char/imx_serial.h | 1 + | ||
54 | target/arm/helper.h | 1 + | ||
55 | target/arm/internals.h | 25 +++++++++++++++++++++++++ | ||
56 | hw/arm/bcm2836.c | 2 +- | ||
57 | hw/arm/raspi.c | 2 +- | ||
58 | hw/arm/virt.c | 8 +++++++- | ||
59 | hw/char/imx_serial.c | 5 ++++- | ||
60 | hw/intc/arm_gicv3_cpuif.c | 6 +++--- | ||
61 | hw/sd/sdhci.c | 4 ++-- | ||
62 | target/arm/helper.c | 1 - | ||
63 | target/arm/op_helper.c | 33 ++++++++++++++++++++++----------- | ||
64 | target/arm/translate-a64.c | 21 ++++++++++++++++----- | ||
65 | target/arm/translate.c | 19 ++++++++++++++----- | ||
66 | 14 files changed, 98 insertions(+), 31 deletions(-) | ||
67 | diff view generated by jsdifflib |
1 | From: Thomas Huth <thuth@redhat.com> | 1 | From: Victor Kamensky <kamensky@cisco.com> |
---|---|---|---|
2 | 2 | ||
3 | The device uses serial_hds in its realize function and thus can't be | 3 | In OE project 4.15 linux kernel boot hang was observed under |
4 | used twice. Apart from that, the comma in its name makes it quite hard | 4 | single cpu aarch64 qemu. Kernel code was in a loop waiting for |
5 | to use for the user anyway, since a comma is normally used to separate | 5 | vtimer arrival, spinning in TC generated blocks, while interrupt |
6 | the device name from its properties when using the "-device" parameter | 6 | was pending unprocessed. This happened because when qemu tried to |
7 | or the "device_add" HMP command. | 7 | handle vtimer interrupt target had interrupts disabled, as |
8 | result flag indicating TCG exit, cpu->icount_decr.u16.high, | ||
9 | was cleared but arm_cpu_exec_interrupt function did not call | ||
10 | arm_cpu_do_interrupt to process interrupt. Later when target | ||
11 | reenabled interrupts, it happened without exit into main loop, so | ||
12 | following code that waited for result of interrupt execution | ||
13 | run in infinite loop. | ||
8 | 14 | ||
9 | Signed-off-by: Thomas Huth <thuth@redhat.com> | 15 | To solve the problem instructions that operate on CPU sys state |
10 | Reviewed-by: Alistair Francis <alistair.francis@xilinx.com> | 16 | (i.e enable/disable interrupt), and marked as DISAS_UPDATE, |
11 | Message-id: 1506441116-16627-1-git-send-email-thuth@redhat.com | 17 | should be considered as DISAS_EXIT variant, and should be |
18 | forced to exit back to main loop so qemu will have a chance | ||
19 | processing pending CPU state updates, including pending | ||
20 | interrupts. | ||
21 | |||
22 | This change brings consistency with how DISAS_UPDATE is treated | ||
23 | in aarch32 case. | ||
24 | |||
25 | CC: Peter Maydell <peter.maydell@linaro.org> | ||
26 | CC: Alex Bennée <alex.bennee@linaro.org> | ||
27 | CC: qemu-stable@nongnu.org | ||
28 | Suggested-by: Peter Maydell <peter.maydell@linaro.org> | ||
29 | Signed-off-by: Victor Kamensky <kamensky@cisco.com> | ||
30 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
31 | Message-id: 1521526368-1996-1-git-send-email-kamensky@cisco.com | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 32 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 33 | --- |
14 | hw/arm/xlnx-zynqmp.c | 2 ++ | 34 | target/arm/translate-a64.c | 6 +++--- |
15 | 1 file changed, 2 insertions(+) | 35 | 1 file changed, 3 insertions(+), 3 deletions(-) |
16 | 36 | ||
17 | diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c | 37 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c |
18 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/hw/arm/xlnx-zynqmp.c | 39 | --- a/target/arm/translate-a64.c |
20 | +++ b/hw/arm/xlnx-zynqmp.c | 40 | +++ b/target/arm/translate-a64.c |
21 | @@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data) | 41 | @@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) |
22 | 42 | case DISAS_UPDATE: | |
23 | dc->props = xlnx_zynqmp_props; | 43 | gen_a64_set_pc_im(dc->pc); |
24 | dc->realize = xlnx_zynqmp_realize; | 44 | /* fall through */ |
25 | + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ | 45 | - case DISAS_JUMP: |
26 | + dc->user_creatable = false; | 46 | - tcg_gen_lookup_and_goto_ptr(); |
27 | } | 47 | - break; |
28 | 48 | case DISAS_EXIT: | |
29 | static const TypeInfo xlnx_zynqmp_type_info = { | 49 | tcg_gen_exit_tb(0); |
50 | break; | ||
51 | + case DISAS_JUMP: | ||
52 | + tcg_gen_lookup_and_goto_ptr(); | ||
53 | + break; | ||
54 | case DISAS_NORETURN: | ||
55 | case DISAS_SWI: | ||
56 | break; | ||
30 | -- | 57 | -- |
31 | 2.7.4 | 58 | 2.16.2 |
32 | 59 | ||
33 | 60 | diff view generated by jsdifflib |
1 | From: Jan Kiszka <jan.kiszka@siemens.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This properly forwards SMC events to EL2 when PSCI is provided by QEMU | 3 | Detected by Coverity (CID 1386072, 1386073, 1386076, 1386077). local_err |
4 | itself and, thus, ARM_FEATURE_EL3 is off. | 4 | was unused, and this made the static analyzer unhappy. |
5 | 5 | ||
6 | Found and tested with the Jailhouse hypervisor. Solution based on | 6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
7 | suggestions by Peter Maydell. | 7 | Message-id: 20180320151355.25854-1-pbonzini@redhat.com |
8 | |||
9 | Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> | ||
10 | Message-id: 4f243068-aaea-776f-d18f-f9e05e7be9cd@siemens.com | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 10 | --- |
14 | target/arm/helper.c | 9 ++++++++- | 11 | hw/sd/sdhci.c | 4 ++-- |
15 | target/arm/op_helper.c | 27 +++++++++++++++++---------- | 12 | 1 file changed, 2 insertions(+), 2 deletions(-) |
16 | 2 files changed, 25 insertions(+), 11 deletions(-) | ||
17 | 13 | ||
18 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 14 | diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c |
19 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper.c | 16 | --- a/hw/sd/sdhci.c |
21 | +++ b/target/arm/helper.c | 17 | +++ b/hw/sd/sdhci.c |
22 | @@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | 18 | @@ -XXX,XX +XXX,XX @@ static void sdhci_pci_realize(PCIDevice *dev, Error **errp) |
23 | 19 | Error *local_err = NULL; | |
24 | if (arm_feature(env, ARM_FEATURE_EL3)) { | 20 | |
25 | valid_mask &= ~HCR_HCD; | 21 | sdhci_initfn(s); |
26 | - } else { | 22 | - sdhci_common_realize(s, errp); |
27 | + } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { | 23 | + sdhci_common_realize(s, &local_err); |
28 | + /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. | 24 | if (local_err) { |
29 | + * However, if we're using the SMC PSCI conduit then QEMU is | 25 | error_propagate(errp, local_err); |
30 | + * effectively acting like EL3 firmware and so the guest at | 26 | return; |
31 | + * EL2 should retain the ability to prevent EL1 from being | 27 | @@ -XXX,XX +XXX,XX @@ static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp) |
32 | + * able to make SMC calls into the ersatz firmware, so in | 28 | SysBusDevice *sbd = SYS_BUS_DEVICE(dev); |
33 | + * that case HCR.TSC should be read/write. | 29 | Error *local_err = NULL; |
34 | + */ | 30 | |
35 | valid_mask &= ~HCR_TSC; | 31 | - sdhci_common_realize(s, errp); |
36 | } | 32 | + sdhci_common_realize(s, &local_err); |
37 | 33 | if (local_err) { | |
38 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | 34 | error_propagate(errp, local_err); |
39 | index XXXXXXX..XXXXXXX 100644 | 35 | return; |
40 | --- a/target/arm/op_helper.c | ||
41 | +++ b/target/arm/op_helper.c | ||
42 | @@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) | ||
43 | */ | ||
44 | bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure; | ||
45 | |||
46 | - if (arm_is_psci_call(cpu, EXCP_SMC)) { | ||
47 | - /* If PSCI is enabled and this looks like a valid PSCI call then | ||
48 | - * that overrides the architecturally mandated SMC behaviour. | ||
49 | + if (!arm_feature(env, ARM_FEATURE_EL3) && | ||
50 | + cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { | ||
51 | + /* If we have no EL3 then SMC always UNDEFs and can't be | ||
52 | + * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 | ||
53 | + * firmware within QEMU, and we want an EL2 guest to be able | ||
54 | + * to forbid its EL1 from making PSCI calls into QEMU's | ||
55 | + * "firmware" via HCR.TSC, so for these purposes treat | ||
56 | + * PSCI-via-SMC as implying an EL3. | ||
57 | */ | ||
58 | - return; | ||
59 | - } | ||
60 | - | ||
61 | - if (!arm_feature(env, ARM_FEATURE_EL3)) { | ||
62 | - /* If we have no EL3 then SMC always UNDEFs */ | ||
63 | undef = true; | ||
64 | } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { | ||
65 | - /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ | ||
66 | + /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. | ||
67 | + * We also want an EL2 guest to be able to forbid its EL1 from | ||
68 | + * making PSCI calls into QEMU's "firmware" via HCR.TSC. | ||
69 | + */ | ||
70 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); | ||
71 | } | ||
72 | |||
73 | - if (undef) { | ||
74 | + /* If PSCI is enabled and this looks like a valid PSCI call then | ||
75 | + * suppress the UNDEF -- we'll catch the SMC exception and | ||
76 | + * implement the PSCI call behaviour there. | ||
77 | + */ | ||
78 | + if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) { | ||
79 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), | ||
80 | exception_target_el(env)); | ||
81 | } | ||
82 | -- | 36 | -- |
83 | 2.7.4 | 37 | 2.16.2 |
84 | 38 | ||
85 | 39 | diff view generated by jsdifflib |
1 | When we added support for the new SHCSR bits in v8M in commit | 1 | If the GIC has the security extension support enabled, then a |
---|---|---|---|
2 | 437d59c17e9 the code to support writing to the new HARDFAULTPENDED | 2 | non-secure access to ICC_PMR must take account of the non-secure |
3 | bit was accidentally only added for non-secure writes; the | 3 | view of interrupt priorities, where real priorities 0x00..0x7f |
4 | secure banked version of the bit should also be writable. | 4 | are secure-only and not visible to the non-secure guest, and |
5 | priorities 0x80..0xff are shown to the guest as if they were | ||
6 | 0x00..0xff. We had the logic here wrong: | ||
7 | * on reads, the priority is in the secure range if bit 7 | ||
8 | is clear, not if it is set | ||
9 | * on writes, we want to set bit 7, not mask everything else | ||
5 | 10 | ||
11 | Our ICC_RPR read code had the same error as ICC_PMR. | ||
12 | |||
13 | (Compare the GICv3 spec pseudocode functions ICC_RPR_EL1 | ||
14 | and ICC_PMR_EL1.) | ||
15 | |||
16 | Fixes: https://bugs.launchpad.net/qemu/+bug/1748434 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 18 | Reviewed-by: Andrew Jones <drjones@redhat.com> |
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 19 | Message-id: 20180315133441.24149-1-peter.maydell@linaro.org |
9 | Message-id: 1506092407-26985-21-git-send-email-peter.maydell@linaro.org | ||
10 | --- | 20 | --- |
11 | hw/intc/armv7m_nvic.c | 1 + | 21 | hw/intc/arm_gicv3_cpuif.c | 6 +++--- |
12 | 1 file changed, 1 insertion(+) | 22 | 1 file changed, 3 insertions(+), 3 deletions(-) |
13 | 23 | ||
14 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 24 | diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c |
15 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/intc/armv7m_nvic.c | 26 | --- a/hw/intc/arm_gicv3_cpuif.c |
17 | +++ b/hw/intc/armv7m_nvic.c | 27 | +++ b/hw/intc/arm_gicv3_cpuif.c |
18 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | 28 | @@ -XXX,XX +XXX,XX @@ static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) |
19 | s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; | 29 | /* NS access and Group 0 is inaccessible to NS: return the |
20 | s->sec_vectors[ARMV7M_EXCP_USAGE].enabled = | 30 | * NS view of the current priority |
21 | (value & (1 << 18)) != 0; | 31 | */ |
22 | + s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; | 32 | - if (value & 0x80) { |
23 | /* SecureFault not banked, but RAZ/WI to NS */ | 33 | + if ((value & 0x80) == 0) { |
24 | s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0; | 34 | /* Secure priorities not visible to NS */ |
25 | s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0; | 35 | value = 0; |
36 | } else if (value != 0xff) { | ||
37 | @@ -XXX,XX +XXX,XX @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, | ||
38 | /* Current PMR in the secure range, don't allow NS to change it */ | ||
39 | return; | ||
40 | } | ||
41 | - value = (value >> 1) & 0x80; | ||
42 | + value = (value >> 1) | 0x80; | ||
43 | } | ||
44 | cs->icc_pmr_el1 = value; | ||
45 | gicv3_cpuif_update(cs); | ||
46 | @@ -XXX,XX +XXX,XX @@ static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) | ||
47 | if (arm_feature(env, ARM_FEATURE_EL3) && | ||
48 | !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) { | ||
49 | /* NS GIC access and Group 0 is inaccessible to NS */ | ||
50 | - if (prio & 0x80) { | ||
51 | + if ((prio & 0x80) == 0) { | ||
52 | /* NS mustn't see priorities in the Secure half of the range */ | ||
53 | prio = 0; | ||
54 | } else if (prio != 0xff) { | ||
26 | -- | 55 | -- |
27 | 2.7.4 | 56 | 2.16.2 |
28 | 57 | ||
29 | 58 | diff view generated by jsdifflib |
1 | For the SG instruction and secure function return we are going | 1 | The BCM2836 uses a Cortex-A7, not a Cortex-A15. Update the device to |
---|---|---|---|
2 | to want to do memory accesses using the MMU index of the CPU | 2 | use the correct CPU. |
3 | in secure state, even though the CPU is currently in non-secure | 3 | https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2836/QA7_rev3.4.pdf |
4 | state. Write arm_v7m_mmu_idx_for_secstate() to do this job, | 4 | |
5 | and use it in cpu_mmu_index(). | 5 | When the BCM2836 was introduced (bad5623690b) the Cortex-A7 was not |
6 | available, so the very similar Cortex-A15 was used. Since dcf578ed8ce | ||
7 | we can model the correct core. | ||
6 | 8 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Reviewed-by: Alistair Francis <alistair@alistair23.me> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 11 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 12 | Message-id: 20180319110215.16755-1-peter.maydell@linaro.org |
10 | Message-id: 1506092407-26985-17-git-send-email-peter.maydell@linaro.org | ||
11 | --- | 13 | --- |
12 | target/arm/cpu.h | 32 +++++++++++++++++++++----------- | 14 | hw/arm/bcm2836.c | 2 +- |
13 | 1 file changed, 21 insertions(+), 11 deletions(-) | 15 | hw/arm/raspi.c | 2 +- |
16 | 2 files changed, 2 insertions(+), 2 deletions(-) | ||
14 | 17 | ||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 18 | diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/cpu.h | 20 | --- a/hw/arm/bcm2836.c |
18 | +++ b/target/arm/cpu.h | 21 | +++ b/hw/arm/bcm2836.c |
19 | @@ -XXX,XX +XXX,XX @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) | 22 | @@ -XXX,XX +XXX,XX @@ struct BCM283XInfo { |
20 | } | 23 | static const BCM283XInfo bcm283x_socs[] = { |
21 | } | 24 | { |
22 | 25 | .name = TYPE_BCM2836, | |
23 | +/* Return the MMU index for a v7M CPU in the specified security state */ | 26 | - .cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"), |
24 | +static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, | 27 | + .cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"), |
25 | + bool secstate) | 28 | .clusterid = 0xf, |
26 | +{ | 29 | }, |
27 | + int el = arm_current_el(env); | 30 | #ifdef TARGET_AARCH64 |
28 | + ARMMMUIdx mmu_idx; | 31 | diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c |
29 | + | 32 | index XXXXXXX..XXXXXXX 100644 |
30 | + if (el == 0) { | 33 | --- a/hw/arm/raspi.c |
31 | + mmu_idx = secstate ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | 34 | +++ b/hw/arm/raspi.c |
32 | + } else { | 35 | @@ -XXX,XX +XXX,XX @@ static void raspi2_machine_init(MachineClass *mc) |
33 | + mmu_idx = secstate ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | 36 | mc->no_parallel = 1; |
34 | + } | 37 | mc->no_floppy = 1; |
35 | + | 38 | mc->no_cdrom = 1; |
36 | + if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { | 39 | - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); |
37 | + mmu_idx = secstate ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | 40 | + mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); |
38 | + } | 41 | mc->max_cpus = BCM283X_NCPUS; |
39 | + | 42 | mc->min_cpus = BCM283X_NCPUS; |
40 | + return mmu_idx; | 43 | mc->default_cpus = BCM283X_NCPUS; |
41 | +} | ||
42 | + | ||
43 | /* Determine the current mmu_idx to use for normal loads/stores */ | ||
44 | static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) | ||
45 | { | ||
46 | int el = arm_current_el(env); | ||
47 | |||
48 | if (arm_feature(env, ARM_FEATURE_M)) { | ||
49 | - ARMMMUIdx mmu_idx; | ||
50 | - | ||
51 | - if (el == 0) { | ||
52 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | ||
53 | - } else { | ||
54 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | ||
55 | - } | ||
56 | - | ||
57 | - if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | ||
58 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | ||
59 | - } | ||
60 | + ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); | ||
61 | |||
62 | return arm_to_core_mmu_idx(mmu_idx); | ||
63 | } | ||
64 | -- | 44 | -- |
65 | 2.7.4 | 45 | 2.16.2 |
66 | 46 | ||
67 | 47 | diff view generated by jsdifflib |
1 | On exception return for v8M, the SPSEL bit in the EXC_RETURN magic | 1 | From: Trent Piepho <tpiepho@impinj.com> |
---|---|---|---|
2 | value should be restored to the SPSEL bit in the CONTROL register | ||
3 | banked specified by the EXC_RETURN.ES bit. | ||
4 | 2 | ||
5 | Add write_v7m_control_spsel_for_secstate() which behaves like | 3 | Linux does not detect a break from this IMX serial driver as a magic |
6 | write_v7m_control_spsel() but allows the caller to specify which | 4 | sysrq. Nor does it note a break in the port error counts. |
7 | CONTROL bank to use, reimplement write_v7m_control_spsel() in | ||
8 | terms of it, and use it in exception return. | ||
9 | 5 | ||
6 | The former is because the Linux driver uses the BRCD bit in the USR2 | ||
7 | register to trigger the RS-232 break handler in the kernel, which is | ||
8 | where sysrq hooks in. The emulated UART was not setting this status | ||
9 | bit. | ||
10 | |||
11 | The latter is because the Linux driver expects, in addition to the BRK | ||
12 | bit, that the ERR bit is set when a break is read in the FIFO. A break | ||
13 | should also count as a frame error, so add that bit too. | ||
14 | |||
15 | Cc: Andrey Smirnov <andrew.smirnov@gmail.com> | ||
16 | Signed-off-by: Trent Piepho <tpiepho@impinj.com> | ||
17 | Message-id: 20180320013657.25038-1-tpiepho@impinj.com | ||
18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 1506092407-26985-6-git-send-email-peter.maydell@linaro.org | ||
13 | --- | 20 | --- |
14 | target/arm/helper.c | 40 +++++++++++++++++++++++++++------------- | 21 | include/hw/char/imx_serial.h | 1 + |
15 | 1 file changed, 27 insertions(+), 13 deletions(-) | 22 | hw/char/imx_serial.c | 5 ++++- |
23 | 2 files changed, 5 insertions(+), 1 deletion(-) | ||
16 | 24 | ||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 25 | diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h |
18 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper.c | 27 | --- a/include/hw/char/imx_serial.h |
20 | +++ b/target/arm/helper.c | 28 | +++ b/include/hw/char/imx_serial.h |
21 | @@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env) | 29 | @@ -XXX,XX +XXX,XX @@ |
22 | env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; | 30 | |
31 | #define URXD_CHARRDY (1<<15) /* character read is valid */ | ||
32 | #define URXD_ERR (1<<14) /* Character has error */ | ||
33 | +#define URXD_FRMERR (1<<12) /* Character has frame error */ | ||
34 | #define URXD_BRK (1<<11) /* Break received */ | ||
35 | |||
36 | #define USR1_PARTYER (1<<15) /* Parity Error */ | ||
37 | diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/hw/char/imx_serial.c | ||
40 | +++ b/hw/char/imx_serial.c | ||
41 | @@ -XXX,XX +XXX,XX @@ static void imx_put_data(void *opaque, uint32_t value) | ||
42 | s->usr2 |= USR2_RDR; | ||
43 | s->uts1 &= ~UTS1_RXEMPTY; | ||
44 | s->readbuff = value; | ||
45 | + if (value & URXD_BRK) { | ||
46 | + s->usr2 |= USR2_BRCD; | ||
47 | + } | ||
48 | imx_update(s); | ||
23 | } | 49 | } |
24 | 50 | ||
25 | -/* Write to v7M CONTROL.SPSEL bit. This may change the current | 51 | @@ -XXX,XX +XXX,XX @@ static void imx_receive(void *opaque, const uint8_t *buf, int size) |
26 | - * stack pointer between Main and Process stack pointers. | 52 | static void imx_event(void *opaque, int event) |
27 | +/* Write to v7M CONTROL.SPSEL bit for the specified security bank. | ||
28 | + * This may change the current stack pointer between Main and Process | ||
29 | + * stack pointers if it is done for the CONTROL register for the current | ||
30 | + * security state. | ||
31 | */ | ||
32 | -static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | ||
33 | +static void write_v7m_control_spsel_for_secstate(CPUARMState *env, | ||
34 | + bool new_spsel, | ||
35 | + bool secstate) | ||
36 | { | 53 | { |
37 | - uint32_t tmp; | 54 | if (event == CHR_EVENT_BREAK) { |
38 | - bool new_is_psp, old_is_psp = v7m_using_psp(env); | 55 | - imx_put_data(opaque, URXD_BRK); |
39 | + bool old_is_psp = v7m_using_psp(env); | 56 | + imx_put_data(opaque, URXD_BRK | URXD_FRMERR | URXD_ERR); |
40 | |||
41 | - env->v7m.control[env->v7m.secure] = | ||
42 | - deposit32(env->v7m.control[env->v7m.secure], | ||
43 | + env->v7m.control[secstate] = | ||
44 | + deposit32(env->v7m.control[secstate], | ||
45 | R_V7M_CONTROL_SPSEL_SHIFT, | ||
46 | R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | ||
47 | |||
48 | - new_is_psp = v7m_using_psp(env); | ||
49 | + if (secstate == env->v7m.secure) { | ||
50 | + bool new_is_psp = v7m_using_psp(env); | ||
51 | + uint32_t tmp; | ||
52 | |||
53 | - if (old_is_psp != new_is_psp) { | ||
54 | - tmp = env->v7m.other_sp; | ||
55 | - env->v7m.other_sp = env->regs[13]; | ||
56 | - env->regs[13] = tmp; | ||
57 | + if (old_is_psp != new_is_psp) { | ||
58 | + tmp = env->v7m.other_sp; | ||
59 | + env->v7m.other_sp = env->regs[13]; | ||
60 | + env->regs[13] = tmp; | ||
61 | + } | ||
62 | } | 57 | } |
63 | } | 58 | } |
64 | 59 | ||
65 | +/* Write to v7M CONTROL.SPSEL bit. This may change the current | ||
66 | + * stack pointer between Main and Process stack pointers. | ||
67 | + */ | ||
68 | +static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | ||
69 | +{ | ||
70 | + write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); | ||
71 | +} | ||
72 | + | ||
73 | void write_v7m_exception(CPUARMState *env, uint32_t new_exc) | ||
74 | { | ||
75 | /* Write a new value to v7m.exception, thus transitioning into or out | ||
76 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
77 | * Handler mode (and will be until we write the new XPSR.Interrupt | ||
78 | * field) this does not switch around the current stack pointer. | ||
79 | */ | ||
80 | - write_v7m_control_spsel(env, return_to_sp_process); | ||
81 | + write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); | ||
82 | |||
83 | switch_v7m_security_state(env, return_to_secure); | ||
84 | |||
85 | -- | 60 | -- |
86 | 2.7.4 | 61 | 2.16.2 |
87 | 62 | ||
88 | 63 | diff view generated by jsdifflib |
1 | From: Michael Olbrich <m.olbrich@pengutronix.de> | 1 | From: Wei Huang <wei@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | The current code checks if the next block exceeds the size of the card. | 3 | Instead of using "1.0" as the system version of SMBIOS, we should use |
4 | This generates an error while reading the last block of the card. | 4 | mc->name for mach-virt machine type to be consistent other architectures. |
5 | Do the out-of-bounds check when starting to read a new block to fix this. | 5 | With this patch, "dmidecode -t 1" (e.g., "-M virt-2.12,accel=kvm") will |
6 | show: | ||
6 | 7 | ||
7 | This issue became visible with increased error checking in Linux 4.13. | 8 | Handle 0x0100, DMI type 1, 27 bytes |
9 | System Information | ||
10 | Manufacturer: QEMU | ||
11 | Product Name: KVM Virtual Machine | ||
12 | Version: virt-2.12 | ||
13 | Serial Number: Not Specified | ||
14 | ... | ||
8 | 15 | ||
9 | Cc: qemu-stable@nongnu.org | 16 | instead of: |
10 | Signed-off-by: Michael Olbrich <m.olbrich@pengutronix.de> | 17 | |
11 | Reviewed-by: Alistair Francis <alistair.francis@xilinx.com> | 18 | Handle 0x0100, DMI type 1, 27 bytes |
12 | Message-id: 20170916091611.10241-1-m.olbrich@pengutronix.de | 19 | System Information |
20 | Manufacturer: QEMU | ||
21 | Product Name: KVM Virtual Machine | ||
22 | Version: 1.0 | ||
23 | Serial Number: Not Specified | ||
24 | ... | ||
25 | |||
26 | For backward compatibility, we allow older machine types to keep "1.0" | ||
27 | as the default system version. | ||
28 | |||
29 | Signed-off-by: Wei Huang <wei@redhat.com> | ||
30 | Reviewed-by: Andrew Jones <drjones@redhat.com> | ||
31 | Message-id: 20180322212318.7182-1-wei@redhat.com | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 32 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | --- | 33 | --- |
15 | hw/sd/sd.c | 12 ++++++------ | 34 | include/hw/arm/virt.h | 1 + |
16 | 1 file changed, 6 insertions(+), 6 deletions(-) | 35 | hw/arm/virt.c | 8 +++++++- |
36 | 2 files changed, 8 insertions(+), 1 deletion(-) | ||
17 | 37 | ||
18 | diff --git a/hw/sd/sd.c b/hw/sd/sd.c | 38 | diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h |
19 | index XXXXXXX..XXXXXXX 100644 | 39 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/hw/sd/sd.c | 40 | --- a/include/hw/arm/virt.h |
21 | +++ b/hw/sd/sd.c | 41 | +++ b/include/hw/arm/virt.h |
22 | @@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd) | 42 | @@ -XXX,XX +XXX,XX @@ typedef struct { |
23 | break; | 43 | bool no_its; |
24 | 44 | bool no_pmu; | |
25 | case 18: /* CMD18: READ_MULTIPLE_BLOCK */ | 45 | bool claim_edge_triggered_timers; |
26 | - if (sd->data_offset == 0) | 46 | + bool smbios_old_sys_ver; |
27 | + if (sd->data_offset == 0) { | 47 | } VirtMachineClass; |
28 | + if (sd->data_start + io_len > sd->size) { | 48 | |
29 | + sd->card_status |= ADDRESS_ERROR; | 49 | typedef struct { |
30 | + return 0x00; | 50 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c |
31 | + } | 51 | index XXXXXXX..XXXXXXX 100644 |
32 | BLK_READ_BLOCK(sd->data_start, io_len); | 52 | --- a/hw/arm/virt.c |
33 | + } | 53 | +++ b/hw/arm/virt.c |
34 | ret = sd->data[sd->data_offset ++]; | 54 | @@ -XXX,XX +XXX,XX @@ static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size) |
35 | 55 | ||
36 | if (sd->data_offset >= io_len) { | 56 | static void virt_build_smbios(VirtMachineState *vms) |
37 | @@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd) | 57 | { |
38 | break; | 58 | + MachineClass *mc = MACHINE_GET_CLASS(vms); |
39 | } | 59 | + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); |
40 | } | 60 | uint8_t *smbios_tables, *smbios_anchor; |
41 | - | 61 | size_t smbios_tables_len, smbios_anchor_len; |
42 | - if (sd->data_start + io_len > sd->size) { | 62 | const char *product = "QEMU Virtual Machine"; |
43 | - sd->card_status |= ADDRESS_ERROR; | 63 | @@ -XXX,XX +XXX,XX @@ static void virt_build_smbios(VirtMachineState *vms) |
44 | - break; | 64 | } |
45 | - } | 65 | |
46 | } | 66 | smbios_set_defaults("QEMU", product, |
47 | break; | 67 | - "1.0", false, true, SMBIOS_ENTRY_POINT_30); |
68 | + vmc->smbios_old_sys_ver ? "1.0" : mc->name, false, | ||
69 | + true, SMBIOS_ENTRY_POINT_30); | ||
70 | |||
71 | smbios_get_tables(NULL, 0, &smbios_tables, &smbios_tables_len, | ||
72 | &smbios_anchor, &smbios_anchor_len); | ||
73 | @@ -XXX,XX +XXX,XX @@ static void virt_2_11_instance_init(Object *obj) | ||
74 | |||
75 | static void virt_machine_2_11_options(MachineClass *mc) | ||
76 | { | ||
77 | + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); | ||
78 | + | ||
79 | virt_machine_2_12_options(mc); | ||
80 | SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_11); | ||
81 | + vmc->smbios_old_sys_ver = true; | ||
82 | } | ||
83 | DEFINE_VIRT_MACHINE(2, 11) | ||
48 | 84 | ||
49 | -- | 85 | -- |
50 | 2.7.4 | 86 | 2.16.2 |
51 | 87 | ||
52 | 88 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reset for devices does not include an automatic clear of the | ||
2 | device state (unlike CPU state, where most of the state | ||
3 | structure is cleared to zero). Add some missing initialization | ||
4 | of NVIC state that meant that the device was left in the wrong | ||
5 | state if the guest did a warm reset. | ||
6 | 1 | ||
7 | (In particular, since we were resetting the computed state like | ||
8 | s->exception_prio but not all the state it was computed | ||
9 | from like s->vectors[x].active, the NVIC wound up in an | ||
10 | inconsistent state that could later trigger assertion failures.) | ||
11 | |||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
15 | Message-id: 1506092407-26985-2-git-send-email-peter.maydell@linaro.org | ||
16 | --- | ||
17 | hw/intc/armv7m_nvic.c | 5 +++++ | ||
18 | 1 file changed, 5 insertions(+) | ||
19 | |||
20 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/hw/intc/armv7m_nvic.c | ||
23 | +++ b/hw/intc/armv7m_nvic.c | ||
24 | @@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev) | ||
25 | int resetprio; | ||
26 | NVICState *s = NVIC(dev); | ||
27 | |||
28 | + memset(s->vectors, 0, sizeof(s->vectors)); | ||
29 | + memset(s->sec_vectors, 0, sizeof(s->sec_vectors)); | ||
30 | + s->prigroup[M_REG_NS] = 0; | ||
31 | + s->prigroup[M_REG_S] = 0; | ||
32 | + | ||
33 | s->vectors[ARMV7M_EXCP_NMI].enabled = 1; | ||
34 | /* MEM, BUS, and USAGE are enabled through | ||
35 | * the System Handler Control register | ||
36 | -- | ||
37 | 2.7.4 | ||
38 | |||
39 | diff view generated by jsdifflib |
1 | In the v7M architecture, there is an invariant that if the CPU is | 1 | The MDCR_EL2.TDE bit allows the exception level targeted by debug |
---|---|---|---|
2 | in Handler mode then the CONTROL.SPSEL bit cannot be nonzero. | 2 | exceptions to be set to EL2 for code executing at EL0. We handle |
3 | This in turn means that the current stack pointer is always | 3 | this in the arm_debug_target_el() function, but this is only used for |
4 | indicated by CONTROL.SPSEL, even though Handler mode always uses | 4 | hardware breakpoint and watchpoint exceptions, not for the exception |
5 | the Main stack pointer. | 5 | generated when the guest executes an AArch32 BKPT or AArch64 BRK |
6 | 6 | instruction. We don't have enough information for a translate-time | |
7 | In v8M, this invariant is removed, and CONTROL.SPSEL may now | 7 | equivalent of arm_debug_target_el(), so instead make BKPT and BRK |
8 | be nonzero in Handler mode (though Handler mode still always | 8 | call a special purpose helper which can do the routing, rather than |
9 | uses the Main stack pointer). In preparation for this change, | 9 | the generic exception_with_syndrome helper. |
10 | change how we handle this bit: rename switch_v7m_sp() to | ||
11 | the now more accurate write_v7m_control_spsel(), and make it | ||
12 | check both the handler mode state and the SPSEL bit. | ||
13 | |||
14 | Note that this implicitly changes the point at which we switch | ||
15 | active SP on exception exit from before we pop the exception | ||
16 | frame to after it. | ||
17 | 10 | ||
18 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
19 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 12 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
20 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 13 | Message-id: 20180320134114.30418-2-peter.maydell@linaro.org |
21 | Message-id: 1506092407-26985-4-git-send-email-peter.maydell@linaro.org | ||
22 | --- | 14 | --- |
23 | target/arm/cpu.h | 8 ++++++- | 15 | target/arm/helper.h | 1 + |
24 | hw/intc/armv7m_nvic.c | 2 +- | 16 | target/arm/op_helper.c | 8 ++++++++ |
25 | target/arm/helper.c | 65 ++++++++++++++++++++++++++++++++++----------------- | 17 | target/arm/translate-a64.c | 15 +++++++++++++-- |
26 | 3 files changed, 51 insertions(+), 24 deletions(-) | 18 | target/arm/translate.c | 19 ++++++++++++++----- |
19 | 4 files changed, 36 insertions(+), 7 deletions(-) | ||
27 | 20 | ||
28 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 21 | diff --git a/target/arm/helper.h b/target/arm/helper.h |
29 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/cpu.h | 23 | --- a/target/arm/helper.h |
31 | +++ b/target/arm/cpu.h | 24 | +++ b/target/arm/helper.h |
32 | @@ -XXX,XX +XXX,XX @@ void pmccntr_sync(CPUARMState *env); | 25 | @@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, |
33 | #define PSTATE_MODE_EL1t 4 | 26 | i32, i32, i32, i32) |
34 | #define PSTATE_MODE_EL0t 0 | 27 | DEF_HELPER_2(exception_internal, void, env, i32) |
35 | 28 | DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32) | |
36 | +/* Write a new value to v7m.exception, thus transitioning into or out | 29 | +DEF_HELPER_2(exception_bkpt_insn, void, env, i32) |
37 | + * of Handler mode; this may result in a change of active stack pointer. | 30 | DEF_HELPER_1(setend, void, env) |
31 | DEF_HELPER_2(wfi, void, env, i32) | ||
32 | DEF_HELPER_1(wfe, void, env) | ||
33 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/target/arm/op_helper.c | ||
36 | +++ b/target/arm/op_helper.c | ||
37 | @@ -XXX,XX +XXX,XX @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, | ||
38 | raise_exception(env, excp, syndrome, target_el); | ||
39 | } | ||
40 | |||
41 | +/* Raise an EXCP_BKPT with the specified syndrome register value, | ||
42 | + * targeting the correct exception level for debug exceptions. | ||
38 | + */ | 43 | + */ |
39 | +void write_v7m_exception(CPUARMState *env, uint32_t new_exc); | 44 | +void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) |
40 | + | 45 | +{ |
41 | /* Map EL and handler into a PSTATE_MODE. */ | 46 | + raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env)); |
42 | static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) | ||
43 | { | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) | ||
45 | env->condexec_bits |= (val >> 8) & 0xfc; | ||
46 | } | ||
47 | if (mask & XPSR_EXCP) { | ||
48 | - env->v7m.exception = val & XPSR_EXCP; | ||
49 | + /* Note that this only happens on exception exit */ | ||
50 | + write_v7m_exception(env, val & XPSR_EXCP); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/hw/intc/armv7m_nvic.c | ||
57 | +++ b/hw/intc/armv7m_nvic.c | ||
58 | @@ -XXX,XX +XXX,XX @@ bool armv7m_nvic_acknowledge_irq(void *opaque) | ||
59 | vec->active = 1; | ||
60 | vec->pending = 0; | ||
61 | |||
62 | - env->v7m.exception = s->vectpending; | ||
63 | + write_v7m_exception(env, s->vectpending); | ||
64 | |||
65 | nvic_irq_update(s); | ||
66 | |||
67 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
68 | index XXXXXXX..XXXXXXX 100644 | ||
69 | --- a/target/arm/helper.c | ||
70 | +++ b/target/arm/helper.c | ||
71 | @@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env) | ||
72 | env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; | ||
73 | } | ||
74 | |||
75 | -/* Switch to V7M main or process stack pointer. */ | ||
76 | -static void switch_v7m_sp(CPUARMState *env, bool new_spsel) | ||
77 | +/* Write to v7M CONTROL.SPSEL bit. This may change the current | ||
78 | + * stack pointer between Main and Process stack pointers. | ||
79 | + */ | ||
80 | +static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | ||
81 | { | ||
82 | uint32_t tmp; | ||
83 | - uint32_t old_control = env->v7m.control[env->v7m.secure]; | ||
84 | - bool old_spsel = old_control & R_V7M_CONTROL_SPSEL_MASK; | ||
85 | + bool new_is_psp, old_is_psp = v7m_using_psp(env); | ||
86 | + | ||
87 | + env->v7m.control[env->v7m.secure] = | ||
88 | + deposit32(env->v7m.control[env->v7m.secure], | ||
89 | + R_V7M_CONTROL_SPSEL_SHIFT, | ||
90 | + R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | ||
91 | + | ||
92 | + new_is_psp = v7m_using_psp(env); | ||
93 | |||
94 | - if (old_spsel != new_spsel) { | ||
95 | + if (old_is_psp != new_is_psp) { | ||
96 | tmp = env->v7m.other_sp; | ||
97 | env->v7m.other_sp = env->regs[13]; | ||
98 | env->regs[13] = tmp; | ||
99 | + } | ||
100 | +} | 47 | +} |
101 | + | 48 | + |
102 | +void write_v7m_exception(CPUARMState *env, uint32_t new_exc) | 49 | uint32_t HELPER(cpsr_read)(CPUARMState *env) |
50 | { | ||
51 | return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); | ||
52 | diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/target/arm/translate-a64.c | ||
55 | +++ b/target/arm/translate-a64.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp, | ||
57 | s->base.is_jmp = DISAS_NORETURN; | ||
58 | } | ||
59 | |||
60 | +static void gen_exception_bkpt_insn(DisasContext *s, int offset, | ||
61 | + uint32_t syndrome) | ||
103 | +{ | 62 | +{ |
104 | + /* Write a new value to v7m.exception, thus transitioning into or out | 63 | + TCGv_i32 tcg_syn; |
105 | + * of Handler mode; this may result in a change of active stack pointer. | ||
106 | + */ | ||
107 | + bool new_is_psp, old_is_psp = v7m_using_psp(env); | ||
108 | + uint32_t tmp; | ||
109 | |||
110 | - env->v7m.control[env->v7m.secure] = deposit32(old_control, | ||
111 | - R_V7M_CONTROL_SPSEL_SHIFT, | ||
112 | - R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | ||
113 | + env->v7m.exception = new_exc; | ||
114 | + | 64 | + |
115 | + new_is_psp = v7m_using_psp(env); | 65 | + gen_a64_set_pc_im(s->pc - offset); |
66 | + tcg_syn = tcg_const_i32(syndrome); | ||
67 | + gen_helper_exception_bkpt_insn(cpu_env, tcg_syn); | ||
68 | + tcg_temp_free_i32(tcg_syn); | ||
69 | + s->base.is_jmp = DISAS_NORETURN; | ||
70 | +} | ||
116 | + | 71 | + |
117 | + if (old_is_psp != new_is_psp) { | 72 | static void gen_ss_advance(DisasContext *s) |
118 | + tmp = env->v7m.other_sp; | 73 | { |
119 | + env->v7m.other_sp = env->regs[13]; | 74 | /* If the singlestep state is Active-not-pending, advance to |
120 | + env->regs[13] = tmp; | 75 | @@ -XXX,XX +XXX,XX @@ static void disas_exc(DisasContext *s, uint32_t insn) |
121 | } | 76 | break; |
77 | } | ||
78 | /* BRK */ | ||
79 | - gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16), | ||
80 | - default_exception_el(s)); | ||
81 | + gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16)); | ||
82 | break; | ||
83 | case 2: | ||
84 | if (op2_ll != 0) { | ||
85 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
86 | index XXXXXXX..XXXXXXX 100644 | ||
87 | --- a/target/arm/translate.c | ||
88 | +++ b/target/arm/translate.c | ||
89 | @@ -XXX,XX +XXX,XX @@ static void gen_exception_insn(DisasContext *s, int offset, int excp, | ||
90 | s->base.is_jmp = DISAS_NORETURN; | ||
122 | } | 91 | } |
123 | 92 | ||
124 | @@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | 93 | +static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn) |
125 | bool want_psp = threadmode && spsel; | 94 | +{ |
126 | 95 | + TCGv_i32 tcg_syn; | |
127 | if (secure == env->v7m.secure) { | 96 | + |
128 | - /* Currently switch_v7m_sp switches SP as it updates SPSEL, | 97 | + gen_set_condexec(s); |
129 | - * so the SP we want is always in regs[13]. | 98 | + gen_set_pc_im(s, s->pc - offset); |
130 | - * When we decouple SPSEL from the actually selected SP | 99 | + tcg_syn = tcg_const_i32(syn); |
131 | - * we need to check want_psp against v7m_using_psp() | 100 | + gen_helper_exception_bkpt_insn(cpu_env, tcg_syn); |
132 | - * to see whether we need regs[13] or v7m.other_sp. | 101 | + tcg_temp_free_i32(tcg_syn); |
133 | - */ | 102 | + s->base.is_jmp = DISAS_NORETURN; |
134 | - return &env->regs[13]; | 103 | +} |
135 | + if (want_psp == v7m_using_psp(env)) { | 104 | + |
136 | + return &env->regs[13]; | 105 | /* Force a TB lookup after an instruction that changes the CPU state. */ |
137 | + } else { | 106 | static inline void gen_lookup_tb(DisasContext *s) |
138 | + return &env->v7m.other_sp; | 107 | { |
139 | + } | 108 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) |
140 | } else { | 109 | case 1: |
141 | if (want_psp) { | 110 | /* bkpt */ |
142 | return &env->v7m.other_ss_psp; | 111 | ARCH(5); |
143 | @@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | 112 | - gen_exception_insn(s, 4, EXCP_BKPT, |
144 | uint32_t addr; | 113 | - syn_aa32_bkpt(imm16, false), |
145 | 114 | - default_exception_el(s)); | |
146 | armv7m_nvic_acknowledge_irq(env->nvic); | 115 | + gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false)); |
147 | - switch_v7m_sp(env, 0); | 116 | break; |
148 | + write_v7m_control_spsel(env, 0); | 117 | case 2: |
149 | arm_clear_exclusive(env); | 118 | /* Hypervisor call (v7) */ |
150 | /* Clear IT bits */ | 119 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn) |
151 | env->condexec_bits = 0; | 120 | { |
152 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | 121 | int imm8 = extract32(insn, 0, 8); |
153 | return; | 122 | ARCH(5); |
154 | } | 123 | - gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true), |
155 | 124 | - default_exception_el(s)); | |
156 | - /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently | 125 | + gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true)); |
157 | - * causes us to switch the active SP, but we will change this | 126 | break; |
158 | - * later to not do that so we can support v8M. | ||
159 | + /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in | ||
160 | + * Handler mode (and will be until we write the new XPSR.Interrupt | ||
161 | + * field) this does not switch around the current stack pointer. | ||
162 | */ | ||
163 | - switch_v7m_sp(env, return_to_sp_process); | ||
164 | + write_v7m_control_spsel(env, return_to_sp_process); | ||
165 | |||
166 | { | ||
167 | /* The stack pointer we should be reading the exception frame from | ||
168 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) | ||
169 | case 20: /* CONTROL */ | ||
170 | /* Writing to the SPSEL bit only has an effect if we are in | ||
171 | * thread mode; other bits can be updated by any privileged code. | ||
172 | - * switch_v7m_sp() deals with updating the SPSEL bit in | ||
173 | + * write_v7m_control_spsel() deals with updating the SPSEL bit in | ||
174 | * env->v7m.control, so we only need update the others. | ||
175 | */ | ||
176 | if (!arm_v7m_is_handler_mode(env)) { | ||
177 | - switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); | ||
178 | + write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); | ||
179 | } | 127 | } |
180 | env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; | 128 | |
181 | env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; | ||
182 | -- | 129 | -- |
183 | 2.7.4 | 130 | 2.16.2 |
184 | 131 | ||
185 | 132 | diff view generated by jsdifflib |
1 | Implement the security attribute lookups for memory accesses | 1 | When a debug exception is taken to AArch32, it appears as a Prefetch |
---|---|---|---|
2 | in the get_phys_addr() functions, causing these to generate | 2 | Abort, and the Instruction Fault Status Register (IFSR) must be set. |
3 | various kinds of SecureFault for bad accesses. | 3 | The IFSR has two possible formats, depending on whether LPAE is in |
4 | use. Factor out the code in arm_debug_excp_handler() which picks | ||
5 | an FSR value into its own utility function, update it to use | ||
6 | arm_fi_to_lfsc() and arm_fi_to_sfsc() rather than hard-coded constants, | ||
7 | and use the correct condition to select long or short format. | ||
4 | 8 | ||
5 | The major subtlety in this code relates to handling of the | 9 | In particular this fixes a bug where we could select the short |
6 | case when the security attributes the SAU assigns to the | 10 | format because we're at EL0 and the EL1 translation regime is |
7 | address don't match the current security state of the CPU. | 11 | not using LPAE, but then route the debug exception to EL2 because |
8 | 12 | of MDCR_EL2.TDE and hand EL2 the wrong format FSR. | |
9 | In the ARM ARM pseudocode for validating instruction | ||
10 | accesses, the security attributes of the address determine | ||
11 | whether the Secure or NonSecure MPU state is used. At face | ||
12 | value, handling this would require us to encode the relevant | ||
13 | bits of state into mmu_idx for both S and NS at once, which | ||
14 | would result in our needing 16 mmu indexes. Fortunately we | ||
15 | don't actually need to do this because a mismatch between | ||
16 | address attributes and CPU state means either: | ||
17 | * some kind of fault (usually a SecureFault, but in theory | ||
18 | perhaps a UserFault for unaligned access to Device memory) | ||
19 | * execution of the SG instruction in NS state from a | ||
20 | Secure & NonSecure code region | ||
21 | |||
22 | The purpose of SG is simply to flip the CPU into Secure | ||
23 | state, so we can handle it by emulating execution of that | ||
24 | instruction directly in arm_v7m_cpu_do_interrupt(), which | ||
25 | means we can treat all the mismatch cases as "throw an | ||
26 | exception" and we don't need to encode the state of the | ||
27 | other MPU bank into our mmu_idx values. | ||
28 | |||
29 | This commit doesn't include the actual emulation of SG; | ||
30 | it also doesn't include implementation of the IDAU, which | ||
31 | is a per-board way to specify hard-coded memory attributes | ||
32 | for addresses, which override the CPU-internal SAU if they | ||
33 | specify a more secure setting than the SAU is programmed to. | ||
34 | 13 | ||
35 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
36 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 15 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
37 | Message-id: 1506092407-26985-15-git-send-email-peter.maydell@linaro.org | 16 | Message-id: 20180320134114.30418-3-peter.maydell@linaro.org |
38 | --- | 17 | --- |
39 | target/arm/internals.h | 15 ++++ | 18 | target/arm/internals.h | 25 +++++++++++++++++++++++++ |
40 | target/arm/helper.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++++- | 19 | target/arm/op_helper.c | 12 ++---------- |
41 | 2 files changed, 195 insertions(+), 2 deletions(-) | 20 | 2 files changed, 27 insertions(+), 10 deletions(-) |
42 | 21 | ||
43 | diff --git a/target/arm/internals.h b/target/arm/internals.h | 22 | diff --git a/target/arm/internals.h b/target/arm/internals.h |
44 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/target/arm/internals.h | 24 | --- a/target/arm/internals.h |
46 | +++ b/target/arm/internals.h | 25 | +++ b/target/arm/internals.h |
47 | @@ -XXX,XX +XXX,XX @@ FIELD(V7M_EXCRET, DCRS, 5, 1) | 26 | @@ -XXX,XX +XXX,XX @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) |
48 | FIELD(V7M_EXCRET, S, 6, 1) | 27 | } |
49 | FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ | 28 | } |
50 | 29 | ||
51 | +/* We use a few fake FSR values for internal purposes in M profile. | 30 | +/* Return the FSR value for a debug exception (watchpoint, hardware |
52 | + * M profile cores don't have A/R format FSRs, but currently our | 31 | + * breakpoint or BKPT insn) targeting the specified exception level. |
53 | + * get_phys_addr() code assumes A/R profile and reports failures via | ||
54 | + * an A/R format FSR value. We then translate that into the proper | ||
55 | + * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). | ||
56 | + * Mostly the FSR values we use for this are those defined for v7PMSA, | ||
57 | + * since we share some of that codepath. A few kinds of fault are | ||
58 | + * only for M profile and have no A/R equivalent, though, so we have | ||
59 | + * to pick a value from the reserved range (which we never otherwise | ||
60 | + * generate) to use for these. | ||
61 | + * These values will never be visible to the guest. | ||
62 | + */ | 32 | + */ |
63 | +#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ | 33 | +static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) |
64 | +#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ | 34 | +{ |
35 | + ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; | ||
36 | + int target_el = arm_debug_target_el(env); | ||
37 | + bool using_lpae = false; | ||
65 | + | 38 | + |
66 | /* | 39 | + if (target_el == 2 || arm_el_is_aa64(env, target_el)) { |
67 | * For AArch64, map a given EL to an index in the banked_spsr array. | 40 | + using_lpae = true; |
68 | * Note that this mapping and the AArch32 mapping defined in bank_number() | 41 | + } else { |
69 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 42 | + if (arm_feature(env, ARM_FEATURE_LPAE) && |
70 | index XXXXXXX..XXXXXXX 100644 | 43 | + (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { |
71 | --- a/target/arm/helper.c | 44 | + using_lpae = true; |
72 | +++ b/target/arm/helper.c | 45 | + } |
73 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, | ||
74 | target_ulong *page_size_ptr, uint32_t *fsr, | ||
75 | ARMMMUFaultInfo *fi); | ||
76 | |||
77 | +/* Security attributes for an address, as returned by v8m_security_lookup. */ | ||
78 | +typedef struct V8M_SAttributes { | ||
79 | + bool ns; | ||
80 | + bool nsc; | ||
81 | + uint8_t sregion; | ||
82 | + bool srvalid; | ||
83 | + uint8_t iregion; | ||
84 | + bool irvalid; | ||
85 | +} V8M_SAttributes; | ||
86 | + | ||
87 | /* Definitions for the PMCCNTR and PMCR registers */ | ||
88 | #define PMCRD 0x8 | ||
89 | #define PMCRC 0x4 | ||
90 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
91 | * raises the fault, in the A profile short-descriptor format. | ||
92 | */ | ||
93 | switch (env->exception.fsr & 0xf) { | ||
94 | + case M_FAKE_FSR_NSC_EXEC: | ||
95 | + /* Exception generated when we try to execute code at an address | ||
96 | + * which is marked as Secure & Non-Secure Callable and the CPU | ||
97 | + * is in the Non-Secure state. The only instruction which can | ||
98 | + * be executed like this is SG (and that only if both halves of | ||
99 | + * the SG instruction have the same security attributes.) | ||
100 | + * Everything else must generate an INVEP SecureFault, so we | ||
101 | + * emulate the SG instruction here. | ||
102 | + * TODO: actually emulate SG. | ||
103 | + */ | ||
104 | + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; | ||
105 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
106 | + qemu_log_mask(CPU_LOG_INT, | ||
107 | + "...really SecureFault with SFSR.INVEP\n"); | ||
108 | + break; | ||
109 | + case M_FAKE_FSR_SFAULT: | ||
110 | + /* Various flavours of SecureFault for attempts to execute or | ||
111 | + * access data in the wrong security state. | ||
112 | + */ | ||
113 | + switch (cs->exception_index) { | ||
114 | + case EXCP_PREFETCH_ABORT: | ||
115 | + if (env->v7m.secure) { | ||
116 | + env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; | ||
117 | + qemu_log_mask(CPU_LOG_INT, | ||
118 | + "...really SecureFault with SFSR.INVTRAN\n"); | ||
119 | + } else { | ||
120 | + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; | ||
121 | + qemu_log_mask(CPU_LOG_INT, | ||
122 | + "...really SecureFault with SFSR.INVEP\n"); | ||
123 | + } | ||
124 | + break; | ||
125 | + case EXCP_DATA_ABORT: | ||
126 | + /* This must be an NS access to S memory */ | ||
127 | + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; | ||
128 | + qemu_log_mask(CPU_LOG_INT, | ||
129 | + "...really SecureFault with SFSR.AUVIOL\n"); | ||
130 | + break; | ||
131 | + } | ||
132 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
133 | + break; | ||
134 | case 0x8: /* External Abort */ | ||
135 | switch (cs->exception_index) { | ||
136 | case EXCP_PREFETCH_ABORT: | ||
137 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, | ||
138 | return !(*prot & (1 << access_type)); | ||
139 | } | ||
140 | |||
141 | +static bool v8m_is_sau_exempt(CPUARMState *env, | ||
142 | + uint32_t address, MMUAccessType access_type) | ||
143 | +{ | ||
144 | + /* The architecture specifies that certain address ranges are | ||
145 | + * exempt from v8M SAU/IDAU checks. | ||
146 | + */ | ||
147 | + return | ||
148 | + (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || | ||
149 | + (address >= 0xe0000000 && address <= 0xe0002fff) || | ||
150 | + (address >= 0xe000e000 && address <= 0xe000efff) || | ||
151 | + (address >= 0xe002e000 && address <= 0xe002efff) || | ||
152 | + (address >= 0xe0040000 && address <= 0xe0041fff) || | ||
153 | + (address >= 0xe00ff000 && address <= 0xe00fffff); | ||
154 | +} | ||
155 | + | ||
156 | +static void v8m_security_lookup(CPUARMState *env, uint32_t address, | ||
157 | + MMUAccessType access_type, ARMMMUIdx mmu_idx, | ||
158 | + V8M_SAttributes *sattrs) | ||
159 | +{ | ||
160 | + /* Look up the security attributes for this address. Compare the | ||
161 | + * pseudocode SecurityCheck() function. | ||
162 | + * We assume the caller has zero-initialized *sattrs. | ||
163 | + */ | ||
164 | + ARMCPU *cpu = arm_env_get_cpu(env); | ||
165 | + int r; | ||
166 | + | ||
167 | + /* TODO: implement IDAU */ | ||
168 | + | ||
169 | + if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { | ||
170 | + /* 0xf0000000..0xffffffff is always S for insn fetches */ | ||
171 | + return; | ||
172 | + } | 46 | + } |
173 | + | 47 | + |
174 | + if (v8m_is_sau_exempt(env, address, access_type)) { | 48 | + if (using_lpae) { |
175 | + sattrs->ns = !regime_is_secure(env, mmu_idx); | 49 | + return arm_fi_to_lfsc(&fi); |
176 | + return; | 50 | + } else { |
177 | + } | 51 | + return arm_fi_to_sfsc(&fi); |
178 | + | ||
179 | + switch (env->sau.ctrl & 3) { | ||
180 | + case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ | ||
181 | + break; | ||
182 | + case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ | ||
183 | + sattrs->ns = true; | ||
184 | + break; | ||
185 | + default: /* SAU.ENABLE == 1 */ | ||
186 | + for (r = 0; r < cpu->sau_sregion; r++) { | ||
187 | + if (env->sau.rlar[r] & 1) { | ||
188 | + uint32_t base = env->sau.rbar[r] & ~0x1f; | ||
189 | + uint32_t limit = env->sau.rlar[r] | 0x1f; | ||
190 | + | ||
191 | + if (base <= address && limit >= address) { | ||
192 | + if (sattrs->srvalid) { | ||
193 | + /* If we hit in more than one region then we must report | ||
194 | + * as Secure, not NS-Callable, with no valid region | ||
195 | + * number info. | ||
196 | + */ | ||
197 | + sattrs->ns = false; | ||
198 | + sattrs->nsc = false; | ||
199 | + sattrs->sregion = 0; | ||
200 | + sattrs->srvalid = false; | ||
201 | + break; | ||
202 | + } else { | ||
203 | + if (env->sau.rlar[r] & 2) { | ||
204 | + sattrs->nsc = true; | ||
205 | + } else { | ||
206 | + sattrs->ns = true; | ||
207 | + } | ||
208 | + sattrs->srvalid = true; | ||
209 | + sattrs->sregion = r; | ||
210 | + } | ||
211 | + } | ||
212 | + } | ||
213 | + } | ||
214 | + | ||
215 | + /* TODO when we support the IDAU then it may override the result here */ | ||
216 | + break; | ||
217 | + } | 52 | + } |
218 | +} | 53 | +} |
219 | + | 54 | + |
220 | static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, | 55 | #endif |
221 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | 56 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c |
222 | - hwaddr *phys_ptr, int *prot, uint32_t *fsr) | 57 | index XXXXXXX..XXXXXXX 100644 |
223 | + hwaddr *phys_ptr, MemTxAttrs *txattrs, | 58 | --- a/target/arm/op_helper.c |
224 | + int *prot, uint32_t *fsr) | 59 | +++ b/target/arm/op_helper.c |
225 | { | 60 | @@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs) |
226 | ARMCPU *cpu = arm_env_get_cpu(env); | 61 | |
227 | bool is_user = regime_is_user(env, mmu_idx); | 62 | cs->watchpoint_hit = NULL; |
228 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, | 63 | |
229 | int n; | 64 | - if (extended_addresses_enabled(env)) { |
230 | int matchregion = -1; | 65 | - env->exception.fsr = (1 << 9) | 0x22; |
231 | bool hit = false; | 66 | - } else { |
232 | + V8M_SAttributes sattrs = {}; | 67 | - env->exception.fsr = 0x2; |
233 | 68 | - } | |
234 | *phys_ptr = address; | 69 | + env->exception.fsr = arm_debug_exception_fsr(env); |
235 | *prot = 0; | 70 | env->exception.vaddress = wp_hit->hitaddr; |
236 | 71 | raise_exception(env, EXCP_DATA_ABORT, | |
237 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | 72 | syn_watchpoint(same_el, 0, wnr), |
238 | + v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); | 73 | @@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs) |
239 | + if (access_type == MMU_INST_FETCH) { | 74 | return; |
240 | + /* Instruction fetches always use the MMU bank and the | 75 | } |
241 | + * transaction attribute determined by the fetch address, | 76 | |
242 | + * regardless of CPU state. This is painful for QEMU | 77 | - if (extended_addresses_enabled(env)) { |
243 | + * to handle, because it would mean we need to encode | 78 | - env->exception.fsr = (1 << 9) | 0x22; |
244 | + * into the mmu_idx not just the (user, negpri) information | 79 | - } else { |
245 | + * for the current security state but also that for the | 80 | - env->exception.fsr = 0x2; |
246 | + * other security state, which would balloon the number | 81 | - } |
247 | + * of mmu_idx values needed alarmingly. | 82 | + env->exception.fsr = arm_debug_exception_fsr(env); |
248 | + * Fortunately we can avoid this because it's not actually | 83 | /* FAR is UNKNOWN, so doesn't need setting */ |
249 | + * possible to arbitrarily execute code from memory with | 84 | raise_exception(env, EXCP_PREFETCH_ABORT, |
250 | + * the wrong security attribute: it will always generate | 85 | syn_breakpoint(same_el), |
251 | + * an exception of some kind or another, apart from the | ||
252 | + * special case of an NS CPU executing an SG instruction | ||
253 | + * in S&NSC memory. So we always just fail the translation | ||
254 | + * here and sort things out in the exception handler | ||
255 | + * (including possibly emulating an SG instruction). | ||
256 | + */ | ||
257 | + if (sattrs.ns != !secure) { | ||
258 | + *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT; | ||
259 | + return true; | ||
260 | + } | ||
261 | + } else { | ||
262 | + /* For data accesses we always use the MMU bank indicated | ||
263 | + * by the current CPU state, but the security attributes | ||
264 | + * might downgrade a secure access to nonsecure. | ||
265 | + */ | ||
266 | + if (sattrs.ns) { | ||
267 | + txattrs->secure = false; | ||
268 | + } else if (!secure) { | ||
269 | + /* NS access to S memory must fault. | ||
270 | + * Architecturally we should first check whether the | ||
271 | + * MPU information for this address indicates that we | ||
272 | + * are doing an unaligned access to Device memory, which | ||
273 | + * should generate a UsageFault instead. QEMU does not | ||
274 | + * currently check for that kind of unaligned access though. | ||
275 | + * If we added it we would need to do so as a special case | ||
276 | + * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). | ||
277 | + */ | ||
278 | + *fsr = M_FAKE_FSR_SFAULT; | ||
279 | + return true; | ||
280 | + } | ||
281 | + } | ||
282 | + } | ||
283 | + | ||
284 | /* Unlike the ARM ARM pseudocode, we don't need to check whether this | ||
285 | * was an exception vector read from the vector table (which is always | ||
286 | * done using the default system address map), because those accesses | ||
287 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, | ||
288 | if (arm_feature(env, ARM_FEATURE_V8)) { | ||
289 | /* PMSAv8 */ | ||
290 | ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, | ||
291 | - phys_ptr, prot, fsr); | ||
292 | + phys_ptr, attrs, prot, fsr); | ||
293 | } else if (arm_feature(env, ARM_FEATURE_V7)) { | ||
294 | /* PMSAv7 */ | ||
295 | ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, | ||
296 | -- | 86 | -- |
297 | 2.7.4 | 87 | 2.16.2 |
298 | 88 | ||
299 | 89 | diff view generated by jsdifflib |
1 | Currently our M profile exception return code switches to the | 1 | Now that we have a helper function specifically for the BRK and |
---|---|---|---|
2 | target stack pointer relatively early in the process, before | 2 | BKPT instructions, we can set the exception.fsr there rather |
3 | it tries to pop the exception frame off the stack. This is | 3 | than in arm_cpu_do_interrupt_aarch32(). This allows us to |
4 | awkward for v8M for two reasons: | 4 | use our new arm_debug_exception_fsr() helper. |
5 | * in v8M the process vs main stack pointer is not selected | ||
6 | purely by the value of CONTROL.SPSEL, so updating SPSEL | ||
7 | and relying on that to switch to the right stack pointer | ||
8 | won't work | ||
9 | * the stack we should be reading the stack frame from and | ||
10 | the stack we will eventually switch to might not be the | ||
11 | same if the guest is doing strange things | ||
12 | 5 | ||
13 | Change our exception return code to use a 'frame pointer' | 6 | In particular this fixes a bug where we were hardcoding the |
14 | to read the exception frame rather than assuming that we | 7 | short-form IFSR value, which is wrong if the target exception |
15 | can switch the live stack pointer this early. | 8 | level has LPAE enabled. |
16 | 9 | ||
10 | Fixes: https://bugs.launchpad.net/qemu/+bug/1756927 | ||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 12 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
19 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 13 | Message-id: 20180320134114.30418-4-peter.maydell@linaro.org |
20 | Message-id: 1506092407-26985-3-git-send-email-peter.maydell@linaro.org | ||
21 | --- | 14 | --- |
22 | target/arm/helper.c | 130 +++++++++++++++++++++++++++++++++++++++------------- | 15 | target/arm/helper.c | 1 - |
23 | 1 file changed, 98 insertions(+), 32 deletions(-) | 16 | target/arm/op_helper.c | 2 ++ |
17 | 2 files changed, 2 insertions(+), 1 deletion(-) | ||
24 | 18 | ||
25 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 19 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
26 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/target/arm/helper.c | 21 | --- a/target/arm/helper.c |
28 | +++ b/target/arm/helper.c | 22 | +++ b/target/arm/helper.c |
29 | @@ -XXX,XX +XXX,XX @@ static void v7m_push(CPUARMState *env, uint32_t val) | 23 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) |
30 | stl_phys(cs->as, env->regs[13], val); | 24 | offset = 0; |
25 | break; | ||
26 | case EXCP_BKPT: | ||
27 | - env->exception.fsr = 2; | ||
28 | /* Fall through to prefetch abort. */ | ||
29 | case EXCP_PREFETCH_ABORT: | ||
30 | A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); | ||
31 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/target/arm/op_helper.c | ||
34 | +++ b/target/arm/op_helper.c | ||
35 | @@ -XXX,XX +XXX,XX @@ void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, | ||
36 | */ | ||
37 | void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) | ||
38 | { | ||
39 | + /* FSR will only be used if the debug target EL is AArch32. */ | ||
40 | + env->exception.fsr = arm_debug_exception_fsr(env); | ||
41 | raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env)); | ||
31 | } | 42 | } |
32 | 43 | ||
33 | -static uint32_t v7m_pop(CPUARMState *env) | ||
34 | -{ | ||
35 | - CPUState *cs = CPU(arm_env_get_cpu(env)); | ||
36 | - uint32_t val; | ||
37 | - | ||
38 | - val = ldl_phys(cs->as, env->regs[13]); | ||
39 | - env->regs[13] += 4; | ||
40 | - return val; | ||
41 | -} | ||
42 | - | ||
43 | /* Return true if we're using the process stack pointer (not the MSP) */ | ||
44 | static bool v7m_using_psp(CPUARMState *env) | ||
45 | { | ||
46 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) | ||
47 | env->regs[15] = dest & ~1; | ||
48 | } | ||
49 | |||
50 | +static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | ||
51 | + bool spsel) | ||
52 | +{ | ||
53 | + /* Return a pointer to the location where we currently store the | ||
54 | + * stack pointer for the requested security state and thread mode. | ||
55 | + * This pointer will become invalid if the CPU state is updated | ||
56 | + * such that the stack pointers are switched around (eg changing | ||
57 | + * the SPSEL control bit). | ||
58 | + * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). | ||
59 | + * Unlike that pseudocode, we require the caller to pass us in the | ||
60 | + * SPSEL control bit value; this is because we also use this | ||
61 | + * function in handling of pushing of the callee-saves registers | ||
62 | + * part of the v8M stack frame (pseudocode PushCalleeStack()), | ||
63 | + * and in the tailchain codepath the SPSEL bit comes from the exception | ||
64 | + * return magic LR value from the previous exception. The pseudocode | ||
65 | + * opencodes the stack-selection in PushCalleeStack(), but we prefer | ||
66 | + * to make this utility function generic enough to do the job. | ||
67 | + */ | ||
68 | + bool want_psp = threadmode && spsel; | ||
69 | + | ||
70 | + if (secure == env->v7m.secure) { | ||
71 | + /* Currently switch_v7m_sp switches SP as it updates SPSEL, | ||
72 | + * so the SP we want is always in regs[13]. | ||
73 | + * When we decouple SPSEL from the actually selected SP | ||
74 | + * we need to check want_psp against v7m_using_psp() | ||
75 | + * to see whether we need regs[13] or v7m.other_sp. | ||
76 | + */ | ||
77 | + return &env->regs[13]; | ||
78 | + } else { | ||
79 | + if (want_psp) { | ||
80 | + return &env->v7m.other_ss_psp; | ||
81 | + } else { | ||
82 | + return &env->v7m.other_ss_msp; | ||
83 | + } | ||
84 | + } | ||
85 | +} | ||
86 | + | ||
87 | static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | ||
88 | { | ||
89 | CPUState *cs = CPU(cpu); | ||
90 | @@ -XXX,XX +XXX,XX @@ static void v7m_push_stack(ARMCPU *cpu) | ||
91 | static void do_v7m_exception_exit(ARMCPU *cpu) | ||
92 | { | ||
93 | CPUARMState *env = &cpu->env; | ||
94 | + CPUState *cs = CPU(cpu); | ||
95 | uint32_t excret; | ||
96 | uint32_t xpsr; | ||
97 | bool ufault = false; | ||
98 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
99 | bool return_to_handler = false; | ||
100 | bool rettobase = false; | ||
101 | bool exc_secure = false; | ||
102 | + bool return_to_secure; | ||
103 | |||
104 | /* We can only get here from an EXCP_EXCEPTION_EXIT, and | ||
105 | * gen_bx_excret() enforces the architectural rule | ||
106 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
107 | g_assert_not_reached(); | ||
108 | } | ||
109 | |||
110 | + return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
111 | + (excret & R_V7M_EXCRET_S_MASK); | ||
112 | + | ||
113 | switch (excret & 0xf) { | ||
114 | case 1: /* Return to Handler */ | ||
115 | return_to_handler = true; | ||
116 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
117 | return; | ||
118 | } | ||
119 | |||
120 | - /* Switch to the target stack. */ | ||
121 | + /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently | ||
122 | + * causes us to switch the active SP, but we will change this | ||
123 | + * later to not do that so we can support v8M. | ||
124 | + */ | ||
125 | switch_v7m_sp(env, return_to_sp_process); | ||
126 | - /* Pop registers. */ | ||
127 | - env->regs[0] = v7m_pop(env); | ||
128 | - env->regs[1] = v7m_pop(env); | ||
129 | - env->regs[2] = v7m_pop(env); | ||
130 | - env->regs[3] = v7m_pop(env); | ||
131 | - env->regs[12] = v7m_pop(env); | ||
132 | - env->regs[14] = v7m_pop(env); | ||
133 | - env->regs[15] = v7m_pop(env); | ||
134 | - if (env->regs[15] & 1) { | ||
135 | - qemu_log_mask(LOG_GUEST_ERROR, | ||
136 | - "M profile return from interrupt with misaligned " | ||
137 | - "PC is UNPREDICTABLE\n"); | ||
138 | - /* Actual hardware seems to ignore the lsbit, and there are several | ||
139 | - * RTOSes out there which incorrectly assume the r15 in the stack | ||
140 | - * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
141 | + | ||
142 | + { | ||
143 | + /* The stack pointer we should be reading the exception frame from | ||
144 | + * depends on bits in the magic exception return type value (and | ||
145 | + * for v8M isn't necessarily the stack pointer we will eventually | ||
146 | + * end up resuming execution with). Get a pointer to the location | ||
147 | + * in the CPU state struct where the SP we need is currently being | ||
148 | + * stored; we will use and modify it in place. | ||
149 | + * We use this limited C variable scope so we don't accidentally | ||
150 | + * use 'frame_sp_p' after we do something that makes it invalid. | ||
151 | + */ | ||
152 | + uint32_t *frame_sp_p = get_v7m_sp_ptr(env, | ||
153 | + return_to_secure, | ||
154 | + !return_to_handler, | ||
155 | + return_to_sp_process); | ||
156 | + uint32_t frameptr = *frame_sp_p; | ||
157 | + | ||
158 | + /* Pop registers. TODO: make these accesses use the correct | ||
159 | + * attributes and address space (S/NS, priv/unpriv) and handle | ||
160 | + * memory transaction failures. | ||
161 | */ | ||
162 | - env->regs[15] &= ~1U; | ||
163 | + env->regs[0] = ldl_phys(cs->as, frameptr); | ||
164 | + env->regs[1] = ldl_phys(cs->as, frameptr + 0x4); | ||
165 | + env->regs[2] = ldl_phys(cs->as, frameptr + 0x8); | ||
166 | + env->regs[3] = ldl_phys(cs->as, frameptr + 0xc); | ||
167 | + env->regs[12] = ldl_phys(cs->as, frameptr + 0x10); | ||
168 | + env->regs[14] = ldl_phys(cs->as, frameptr + 0x14); | ||
169 | + env->regs[15] = ldl_phys(cs->as, frameptr + 0x18); | ||
170 | + if (env->regs[15] & 1) { | ||
171 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
172 | + "M profile return from interrupt with misaligned " | ||
173 | + "PC is UNPREDICTABLE\n"); | ||
174 | + /* Actual hardware seems to ignore the lsbit, and there are several | ||
175 | + * RTOSes out there which incorrectly assume the r15 in the stack | ||
176 | + * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
177 | + */ | ||
178 | + env->regs[15] &= ~1U; | ||
179 | + } | ||
180 | + xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
181 | + | ||
182 | + /* Commit to consuming the stack frame */ | ||
183 | + frameptr += 0x20; | ||
184 | + /* Undo stack alignment (the SPREALIGN bit indicates that the original | ||
185 | + * pre-exception SP was not 8-aligned and we added a padding word to | ||
186 | + * align it, so we undo this by ORing in the bit that increases it | ||
187 | + * from the current 8-aligned value to the 8-unaligned value. (Adding 4 | ||
188 | + * would work too but a logical OR is how the pseudocode specifies it.) | ||
189 | + */ | ||
190 | + if (xpsr & XPSR_SPREALIGN) { | ||
191 | + frameptr |= 4; | ||
192 | + } | ||
193 | + *frame_sp_p = frameptr; | ||
194 | } | ||
195 | - xpsr = v7m_pop(env); | ||
196 | + /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ | ||
197 | xpsr_write(env, xpsr, ~XPSR_SPREALIGN); | ||
198 | - /* Undo stack alignment. */ | ||
199 | - if (xpsr & XPSR_SPREALIGN) { | ||
200 | - env->regs[13] |= 4; | ||
201 | - } | ||
202 | |||
203 | /* The restored xPSR exception field will be zero if we're | ||
204 | * resuming in Thread mode. If that doesn't match what the | ||
205 | -- | 44 | -- |
206 | 2.7.4 | 45 | 2.16.2 |
207 | 46 | ||
208 | 47 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Now that we can handle the CONTROL.SPSEL bit not necessarily being | ||
2 | in sync with the current stack pointer, we can restore the correct | ||
3 | security state on exception return. This happens before we start | ||
4 | to read registers off the stack frame, but after we have taken | ||
5 | possible usage faults for bad exception return magic values and | ||
6 | updated CONTROL.SPSEL. | ||
7 | 1 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 1506092407-26985-5-git-send-email-peter.maydell@linaro.org | ||
11 | --- | ||
12 | target/arm/helper.c | 2 ++ | ||
13 | 1 file changed, 2 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/helper.c | ||
18 | +++ b/target/arm/helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
20 | */ | ||
21 | write_v7m_control_spsel(env, return_to_sp_process); | ||
22 | |||
23 | + switch_v7m_security_state(env, return_to_secure); | ||
24 | + | ||
25 | { | ||
26 | /* The stack pointer we should be reading the exception frame from | ||
27 | * depends on bits in the magic exception return type value (and | ||
28 | -- | ||
29 | 2.7.4 | ||
30 | |||
31 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | ARM v8M specifies that the INVPC usage fault for mismatched | ||
2 | xPSR exception field and handler mode bit should be checked | ||
3 | before updating the PSR and SP, so that the fault is taken | ||
4 | with the existing stack frame rather than by pushing a new one. | ||
5 | Perform this check in the right place for v8M. | ||
6 | 1 | ||
7 | Since v7M specifies in its pseudocode that this usage fault | ||
8 | check should happen later, we have to retain the original | ||
9 | code for that check rather than being able to merge the two. | ||
10 | (The distinction is architecturally visible but only in | ||
11 | very obscure corner cases like attempting an invalid exception | ||
12 | return with an exception frame in read only memory.) | ||
13 | |||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Message-id: 1506092407-26985-7-git-send-email-peter.maydell@linaro.org | ||
17 | --- | ||
18 | target/arm/helper.c | 30 +++++++++++++++++++++++++++--- | ||
19 | 1 file changed, 27 insertions(+), 3 deletions(-) | ||
20 | |||
21 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/target/arm/helper.c | ||
24 | +++ b/target/arm/helper.c | ||
25 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
26 | } | ||
27 | xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
28 | |||
29 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
30 | + /* For v8M we have to check whether the xPSR exception field | ||
31 | + * matches the EXCRET value for return to handler/thread | ||
32 | + * before we commit to changing the SP and xPSR. | ||
33 | + */ | ||
34 | + bool will_be_handler = (xpsr & XPSR_EXCP) != 0; | ||
35 | + if (return_to_handler != will_be_handler) { | ||
36 | + /* Take an INVPC UsageFault on the current stack. | ||
37 | + * By this point we will have switched to the security state | ||
38 | + * for the background state, so this UsageFault will target | ||
39 | + * that state. | ||
40 | + */ | ||
41 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | ||
42 | + env->v7m.secure); | ||
43 | + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
44 | + v7m_exception_taken(cpu, excret); | ||
45 | + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
46 | + "stackframe: failed exception return integrity " | ||
47 | + "check\n"); | ||
48 | + return; | ||
49 | + } | ||
50 | + } | ||
51 | + | ||
52 | /* Commit to consuming the stack frame */ | ||
53 | frameptr += 0x20; | ||
54 | /* Undo stack alignment (the SPREALIGN bit indicates that the original | ||
55 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
56 | /* The restored xPSR exception field will be zero if we're | ||
57 | * resuming in Thread mode. If that doesn't match what the | ||
58 | * exception return excret specified then this is a UsageFault. | ||
59 | + * v7M requires we make this check here; v8M did it earlier. | ||
60 | */ | ||
61 | if (return_to_handler != arm_v7m_is_handler_mode(env)) { | ||
62 | - /* Take an INVPC UsageFault by pushing the stack again. | ||
63 | - * TODO: the v8M version of this code should target the | ||
64 | - * background state for this exception. | ||
65 | + /* Take an INVPC UsageFault by pushing the stack again; | ||
66 | + * we know we're v7M so this is never a Secure UsageFault. | ||
67 | */ | ||
68 | + assert(!arm_feature(env, ARM_FEATURE_V8)); | ||
69 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); | ||
70 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
71 | v7m_push_stack(cpu); | ||
72 | -- | ||
73 | 2.7.4 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Attempting to do an exception return with an exception frame that | ||
2 | is not 8-aligned is UNPREDICTABLE in v8M; warn about this. | ||
3 | (It is not UNPREDICTABLE in v7M, and our implementation can | ||
4 | handle the merely-4-aligned case fine, so we don't need to | ||
5 | do anything except warn.) | ||
6 | 1 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 1506092407-26985-8-git-send-email-peter.maydell@linaro.org | ||
11 | --- | ||
12 | target/arm/helper.c | 7 +++++++ | ||
13 | 1 file changed, 7 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/helper.c | ||
18 | +++ b/target/arm/helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
20 | return_to_sp_process); | ||
21 | uint32_t frameptr = *frame_sp_p; | ||
22 | |||
23 | + if (!QEMU_IS_ALIGNED(frameptr, 8) && | ||
24 | + arm_feature(env, ARM_FEATURE_V8)) { | ||
25 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
26 | + "M profile exception return with non-8-aligned SP " | ||
27 | + "for destination state is UNPREDICTABLE\n"); | ||
28 | + } | ||
29 | + | ||
30 | /* Pop registers. TODO: make these accesses use the correct | ||
31 | * attributes and address space (S/NS, priv/unpriv) and handle | ||
32 | * memory transaction failures. | ||
33 | -- | ||
34 | 2.7.4 | ||
35 | |||
36 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In the v8M architecture, return from an exception to a PC which | ||
2 | has bit 0 set is not UNPREDICTABLE; it is defined that bit 0 | ||
3 | is discarded [R_HRJH]. Restrict our complaint about this to v7M. | ||
4 | 1 | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 1506092407-26985-9-git-send-email-peter.maydell@linaro.org | ||
9 | --- | ||
10 | target/arm/helper.c | 22 +++++++++++++++------- | ||
11 | 1 file changed, 15 insertions(+), 7 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/helper.c | ||
16 | +++ b/target/arm/helper.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
18 | env->regs[12] = ldl_phys(cs->as, frameptr + 0x10); | ||
19 | env->regs[14] = ldl_phys(cs->as, frameptr + 0x14); | ||
20 | env->regs[15] = ldl_phys(cs->as, frameptr + 0x18); | ||
21 | + | ||
22 | + /* Returning from an exception with a PC with bit 0 set is defined | ||
23 | + * behaviour on v8M (bit 0 is ignored), but for v7M it was specified | ||
24 | + * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore | ||
25 | + * the lsbit, and there are several RTOSes out there which incorrectly | ||
26 | + * assume the r15 in the stack frame should be a Thumb-style "lsbit | ||
27 | + * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but | ||
28 | + * complain about the badly behaved guest. | ||
29 | + */ | ||
30 | if (env->regs[15] & 1) { | ||
31 | - qemu_log_mask(LOG_GUEST_ERROR, | ||
32 | - "M profile return from interrupt with misaligned " | ||
33 | - "PC is UNPREDICTABLE\n"); | ||
34 | - /* Actual hardware seems to ignore the lsbit, and there are several | ||
35 | - * RTOSes out there which incorrectly assume the r15 in the stack | ||
36 | - * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
37 | - */ | ||
38 | env->regs[15] &= ~1U; | ||
39 | + if (!arm_feature(env, ARM_FEATURE_V8)) { | ||
40 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
41 | + "M profile return from interrupt with misaligned " | ||
42 | + "PC is UNPREDICTABLE on v7M\n"); | ||
43 | + } | ||
44 | } | ||
45 | + | ||
46 | xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
47 | |||
48 | if (arm_feature(env, ARM_FEATURE_V8)) { | ||
49 | -- | ||
50 | 2.7.4 | ||
51 | |||
52 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Add the new M profile Secure Fault Status Register | ||
2 | and Secure Fault Address Register. | ||
3 | 1 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 1506092407-26985-10-git-send-email-peter.maydell@linaro.org | ||
7 | --- | ||
8 | target/arm/cpu.h | 12 ++++++++++++ | ||
9 | hw/intc/armv7m_nvic.c | 34 ++++++++++++++++++++++++++++++++++ | ||
10 | target/arm/machine.c | 2 ++ | ||
11 | 3 files changed, 48 insertions(+) | ||
12 | |||
13 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/cpu.h | ||
16 | +++ b/target/arm/cpu.h | ||
17 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | ||
18 | uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ | ||
19 | uint32_t hfsr; /* HardFault Status */ | ||
20 | uint32_t dfsr; /* Debug Fault Status Register */ | ||
21 | + uint32_t sfsr; /* Secure Fault Status Register */ | ||
22 | uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ | ||
23 | uint32_t bfar; /* BusFault Address */ | ||
24 | + uint32_t sfar; /* Secure Fault Address Register */ | ||
25 | unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ | ||
26 | int exception; | ||
27 | uint32_t primask[M_REG_NUM_BANKS]; | ||
28 | @@ -XXX,XX +XXX,XX @@ FIELD(V7M_DFSR, DWTTRAP, 2, 1) | ||
29 | FIELD(V7M_DFSR, VCATCH, 3, 1) | ||
30 | FIELD(V7M_DFSR, EXTERNAL, 4, 1) | ||
31 | |||
32 | +/* V7M SFSR bits */ | ||
33 | +FIELD(V7M_SFSR, INVEP, 0, 1) | ||
34 | +FIELD(V7M_SFSR, INVIS, 1, 1) | ||
35 | +FIELD(V7M_SFSR, INVER, 2, 1) | ||
36 | +FIELD(V7M_SFSR, AUVIOL, 3, 1) | ||
37 | +FIELD(V7M_SFSR, INVTRAN, 4, 1) | ||
38 | +FIELD(V7M_SFSR, LSPERR, 5, 1) | ||
39 | +FIELD(V7M_SFSR, SFARVALID, 6, 1) | ||
40 | +FIELD(V7M_SFSR, LSERR, 7, 1) | ||
41 | + | ||
42 | /* v7M MPU_CTRL bits */ | ||
43 | FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) | ||
44 | FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) | ||
45 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/hw/intc/armv7m_nvic.c | ||
48 | +++ b/hw/intc/armv7m_nvic.c | ||
49 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | ||
50 | goto bad_offset; | ||
51 | } | ||
52 | return cpu->env.pmsav8.mair1[attrs.secure]; | ||
53 | + case 0xde4: /* SFSR */ | ||
54 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
55 | + goto bad_offset; | ||
56 | + } | ||
57 | + if (!attrs.secure) { | ||
58 | + return 0; | ||
59 | + } | ||
60 | + return cpu->env.v7m.sfsr; | ||
61 | + case 0xde8: /* SFAR */ | ||
62 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
63 | + goto bad_offset; | ||
64 | + } | ||
65 | + if (!attrs.secure) { | ||
66 | + return 0; | ||
67 | + } | ||
68 | + return cpu->env.v7m.sfar; | ||
69 | default: | ||
70 | bad_offset: | ||
71 | qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); | ||
72 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | ||
73 | * only affect cacheability, and we don't implement caching. | ||
74 | */ | ||
75 | break; | ||
76 | + case 0xde4: /* SFSR */ | ||
77 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
78 | + goto bad_offset; | ||
79 | + } | ||
80 | + if (!attrs.secure) { | ||
81 | + return; | ||
82 | + } | ||
83 | + cpu->env.v7m.sfsr &= ~value; /* W1C */ | ||
84 | + break; | ||
85 | + case 0xde8: /* SFAR */ | ||
86 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
87 | + goto bad_offset; | ||
88 | + } | ||
89 | + if (!attrs.secure) { | ||
90 | + return; | ||
91 | + } | ||
92 | + cpu->env.v7m.sfsr = value; | ||
93 | + break; | ||
94 | case 0xf00: /* Software Triggered Interrupt Register */ | ||
95 | { | ||
96 | int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; | ||
97 | diff --git a/target/arm/machine.c b/target/arm/machine.c | ||
98 | index XXXXXXX..XXXXXXX 100644 | ||
99 | --- a/target/arm/machine.c | ||
100 | +++ b/target/arm/machine.c | ||
101 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = { | ||
102 | VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU), | ||
103 | VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU), | ||
104 | VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU), | ||
105 | + VMSTATE_UINT32(env.v7m.sfsr, ARMCPU), | ||
106 | + VMSTATE_UINT32(env.v7m.sfar, ARMCPU), | ||
107 | VMSTATE_END_OF_LIST() | ||
108 | } | ||
109 | }; | ||
110 | -- | ||
111 | 2.7.4 | ||
112 | |||
113 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In v8M, more bits are defined in the exception-return magic | ||
2 | values; update the code that checks these so we accept | ||
3 | the v8M values when the CPU permits them. | ||
4 | 1 | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 1506092407-26985-11-git-send-email-peter.maydell@linaro.org | ||
8 | --- | ||
9 | target/arm/helper.c | 73 ++++++++++++++++++++++++++++++++++++++++++----------- | ||
10 | 1 file changed, 58 insertions(+), 15 deletions(-) | ||
11 | |||
12 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/target/arm/helper.c | ||
15 | +++ b/target/arm/helper.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
17 | uint32_t excret; | ||
18 | uint32_t xpsr; | ||
19 | bool ufault = false; | ||
20 | - bool return_to_sp_process = false; | ||
21 | - bool return_to_handler = false; | ||
22 | + bool sfault = false; | ||
23 | + bool return_to_sp_process; | ||
24 | + bool return_to_handler; | ||
25 | bool rettobase = false; | ||
26 | bool exc_secure = false; | ||
27 | bool return_to_secure; | ||
28 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
29 | excret); | ||
30 | } | ||
31 | |||
32 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
33 | + /* EXC_RETURN.ES validation check (R_SMFL). We must do this before | ||
34 | + * we pick which FAULTMASK to clear. | ||
35 | + */ | ||
36 | + if (!env->v7m.secure && | ||
37 | + ((excret & R_V7M_EXCRET_ES_MASK) || | ||
38 | + !(excret & R_V7M_EXCRET_DCRS_MASK))) { | ||
39 | + sfault = 1; | ||
40 | + /* For all other purposes, treat ES as 0 (R_HXSR) */ | ||
41 | + excret &= ~R_V7M_EXCRET_ES_MASK; | ||
42 | + } | ||
43 | + } | ||
44 | + | ||
45 | if (env->v7m.exception != ARMV7M_EXCP_NMI) { | ||
46 | /* Auto-clear FAULTMASK on return from other than NMI. | ||
47 | * If the security extension is implemented then this only | ||
48 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
49 | g_assert_not_reached(); | ||
50 | } | ||
51 | |||
52 | + return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); | ||
53 | + return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; | ||
54 | return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
55 | (excret & R_V7M_EXCRET_S_MASK); | ||
56 | |||
57 | - switch (excret & 0xf) { | ||
58 | - case 1: /* Return to Handler */ | ||
59 | - return_to_handler = true; | ||
60 | - break; | ||
61 | - case 13: /* Return to Thread using Process stack */ | ||
62 | - return_to_sp_process = true; | ||
63 | - /* fall through */ | ||
64 | - case 9: /* Return to Thread using Main stack */ | ||
65 | - if (!rettobase && | ||
66 | - !(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) { | ||
67 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
68 | + if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
69 | + /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); | ||
70 | + * we choose to take the UsageFault. | ||
71 | + */ | ||
72 | + if ((excret & R_V7M_EXCRET_S_MASK) || | ||
73 | + (excret & R_V7M_EXCRET_ES_MASK) || | ||
74 | + !(excret & R_V7M_EXCRET_DCRS_MASK)) { | ||
75 | + ufault = true; | ||
76 | + } | ||
77 | + } | ||
78 | + if (excret & R_V7M_EXCRET_RES0_MASK) { | ||
79 | ufault = true; | ||
80 | } | ||
81 | - break; | ||
82 | - default: | ||
83 | - ufault = true; | ||
84 | + } else { | ||
85 | + /* For v7M we only recognize certain combinations of the low bits */ | ||
86 | + switch (excret & 0xf) { | ||
87 | + case 1: /* Return to Handler */ | ||
88 | + break; | ||
89 | + case 13: /* Return to Thread using Process stack */ | ||
90 | + case 9: /* Return to Thread using Main stack */ | ||
91 | + /* We only need to check NONBASETHRDENA for v7M, because in | ||
92 | + * v8M this bit does not exist (it is RES1). | ||
93 | + */ | ||
94 | + if (!rettobase && | ||
95 | + !(env->v7m.ccr[env->v7m.secure] & | ||
96 | + R_V7M_CCR_NONBASETHRDENA_MASK)) { | ||
97 | + ufault = true; | ||
98 | + } | ||
99 | + break; | ||
100 | + default: | ||
101 | + ufault = true; | ||
102 | + } | ||
103 | + } | ||
104 | + | ||
105 | + if (sfault) { | ||
106 | + env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; | ||
107 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
108 | + v7m_exception_taken(cpu, excret); | ||
109 | + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
110 | + "stackframe: failed EXC_RETURN.ES validity check\n"); | ||
111 | + return; | ||
112 | } | ||
113 | |||
114 | if (ufault) { | ||
115 | -- | ||
116 | 2.7.4 | ||
117 | |||
118 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | For v8M, exceptions from Secure to Non-Secure state will save | ||
2 | callee-saved registers to the exception frame as well as the | ||
3 | caller-saved registers. Add support for unstacking these | ||
4 | registers in exception exit when necessary. | ||
5 | 1 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 1506092407-26985-12-git-send-email-peter.maydell@linaro.org | ||
9 | --- | ||
10 | target/arm/helper.c | 30 ++++++++++++++++++++++++++++++ | ||
11 | 1 file changed, 30 insertions(+) | ||
12 | |||
13 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/helper.c | ||
16 | +++ b/target/arm/helper.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
18 | "for destination state is UNPREDICTABLE\n"); | ||
19 | } | ||
20 | |||
21 | + /* Do we need to pop callee-saved registers? */ | ||
22 | + if (return_to_secure && | ||
23 | + ((excret & R_V7M_EXCRET_ES_MASK) == 0 || | ||
24 | + (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { | ||
25 | + uint32_t expected_sig = 0xfefa125b; | ||
26 | + uint32_t actual_sig = ldl_phys(cs->as, frameptr); | ||
27 | + | ||
28 | + if (expected_sig != actual_sig) { | ||
29 | + /* Take a SecureFault on the current stack */ | ||
30 | + env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; | ||
31 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
32 | + v7m_exception_taken(cpu, excret); | ||
33 | + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
34 | + "stackframe: failed exception return integrity " | ||
35 | + "signature check\n"); | ||
36 | + return; | ||
37 | + } | ||
38 | + | ||
39 | + env->regs[4] = ldl_phys(cs->as, frameptr + 0x8); | ||
40 | + env->regs[5] = ldl_phys(cs->as, frameptr + 0xc); | ||
41 | + env->regs[6] = ldl_phys(cs->as, frameptr + 0x10); | ||
42 | + env->regs[7] = ldl_phys(cs->as, frameptr + 0x14); | ||
43 | + env->regs[8] = ldl_phys(cs->as, frameptr + 0x18); | ||
44 | + env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c); | ||
45 | + env->regs[10] = ldl_phys(cs->as, frameptr + 0x20); | ||
46 | + env->regs[11] = ldl_phys(cs->as, frameptr + 0x24); | ||
47 | + | ||
48 | + frameptr += 0x28; | ||
49 | + } | ||
50 | + | ||
51 | /* Pop registers. TODO: make these accesses use the correct | ||
52 | * attributes and address space (S/NS, priv/unpriv) and handle | ||
53 | * memory transaction failures. | ||
54 | -- | ||
55 | 2.7.4 | ||
56 | |||
57 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Add support for v8M and in particular the security extension | ||
2 | to the exception entry code. This requires changes to: | ||
3 | * calculation of the exception-return magic LR value | ||
4 | * push the callee-saves registers in certain cases | ||
5 | * clear registers when taking non-secure exceptions to avoid | ||
6 | leaking information from the interrupted secure code | ||
7 | * switch to the correct security state on entry | ||
8 | * use the vector table for the security state we're targeting | ||
9 | 1 | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 1506092407-26985-13-git-send-email-peter.maydell@linaro.org | ||
13 | --- | ||
14 | target/arm/helper.c | 165 +++++++++++++++++++++++++++++++++++++++++++++------- | ||
15 | 1 file changed, 145 insertions(+), 20 deletions(-) | ||
16 | |||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/helper.c | ||
20 | +++ b/target/arm/helper.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | ||
22 | } | ||
23 | } | ||
24 | |||
25 | -static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | ||
26 | +static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure) | ||
27 | { | ||
28 | CPUState *cs = CPU(cpu); | ||
29 | CPUARMState *env = &cpu->env; | ||
30 | MemTxResult result; | ||
31 | - hwaddr vec = env->v7m.vecbase[env->v7m.secure] + env->v7m.exception * 4; | ||
32 | + hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4; | ||
33 | uint32_t addr; | ||
34 | |||
35 | addr = address_space_ldl(cs->as, vec, | ||
36 | @@ -XXX,XX +XXX,XX @@ static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | ||
37 | * Since we don't model Lockup, we just report this guest error | ||
38 | * via cpu_abort(). | ||
39 | */ | ||
40 | - cpu_abort(cs, "Failed to read from exception vector table " | ||
41 | - "entry %08x\n", (unsigned)vec); | ||
42 | + cpu_abort(cs, "Failed to read from %s exception vector table " | ||
43 | + "entry %08x\n", targets_secure ? "secure" : "nonsecure", | ||
44 | + (unsigned)vec); | ||
45 | } | ||
46 | return addr; | ||
47 | } | ||
48 | |||
49 | -static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | ||
50 | +static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain) | ||
51 | +{ | ||
52 | + /* For v8M, push the callee-saves register part of the stack frame. | ||
53 | + * Compare the v8M pseudocode PushCalleeStack(). | ||
54 | + * In the tailchaining case this may not be the current stack. | ||
55 | + */ | ||
56 | + CPUARMState *env = &cpu->env; | ||
57 | + CPUState *cs = CPU(cpu); | ||
58 | + uint32_t *frame_sp_p; | ||
59 | + uint32_t frameptr; | ||
60 | + | ||
61 | + if (dotailchain) { | ||
62 | + frame_sp_p = get_v7m_sp_ptr(env, true, | ||
63 | + lr & R_V7M_EXCRET_MODE_MASK, | ||
64 | + lr & R_V7M_EXCRET_SPSEL_MASK); | ||
65 | + } else { | ||
66 | + frame_sp_p = &env->regs[13]; | ||
67 | + } | ||
68 | + | ||
69 | + frameptr = *frame_sp_p - 0x28; | ||
70 | + | ||
71 | + stl_phys(cs->as, frameptr, 0xfefa125b); | ||
72 | + stl_phys(cs->as, frameptr + 0x8, env->regs[4]); | ||
73 | + stl_phys(cs->as, frameptr + 0xc, env->regs[5]); | ||
74 | + stl_phys(cs->as, frameptr + 0x10, env->regs[6]); | ||
75 | + stl_phys(cs->as, frameptr + 0x14, env->regs[7]); | ||
76 | + stl_phys(cs->as, frameptr + 0x18, env->regs[8]); | ||
77 | + stl_phys(cs->as, frameptr + 0x1c, env->regs[9]); | ||
78 | + stl_phys(cs->as, frameptr + 0x20, env->regs[10]); | ||
79 | + stl_phys(cs->as, frameptr + 0x24, env->regs[11]); | ||
80 | + | ||
81 | + *frame_sp_p = frameptr; | ||
82 | +} | ||
83 | + | ||
84 | +static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) | ||
85 | { | ||
86 | /* Do the "take the exception" parts of exception entry, | ||
87 | * but not the pushing of state to the stack. This is | ||
88 | @@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | ||
89 | */ | ||
90 | CPUARMState *env = &cpu->env; | ||
91 | uint32_t addr; | ||
92 | + bool targets_secure; | ||
93 | + | ||
94 | + targets_secure = armv7m_nvic_acknowledge_irq(env->nvic); | ||
95 | |||
96 | - armv7m_nvic_acknowledge_irq(env->nvic); | ||
97 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
98 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
99 | + (lr & R_V7M_EXCRET_S_MASK)) { | ||
100 | + /* The background code (the owner of the registers in the | ||
101 | + * exception frame) is Secure. This means it may either already | ||
102 | + * have or now needs to push callee-saves registers. | ||
103 | + */ | ||
104 | + if (targets_secure) { | ||
105 | + if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { | ||
106 | + /* We took an exception from Secure to NonSecure | ||
107 | + * (which means the callee-saved registers got stacked) | ||
108 | + * and are now tailchaining to a Secure exception. | ||
109 | + * Clear DCRS so eventual return from this Secure | ||
110 | + * exception unstacks the callee-saved registers. | ||
111 | + */ | ||
112 | + lr &= ~R_V7M_EXCRET_DCRS_MASK; | ||
113 | + } | ||
114 | + } else { | ||
115 | + /* We're going to a non-secure exception; push the | ||
116 | + * callee-saves registers to the stack now, if they're | ||
117 | + * not already saved. | ||
118 | + */ | ||
119 | + if (lr & R_V7M_EXCRET_DCRS_MASK && | ||
120 | + !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { | ||
121 | + v7m_push_callee_stack(cpu, lr, dotailchain); | ||
122 | + } | ||
123 | + lr |= R_V7M_EXCRET_DCRS_MASK; | ||
124 | + } | ||
125 | + } | ||
126 | + | ||
127 | + lr &= ~R_V7M_EXCRET_ES_MASK; | ||
128 | + if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
129 | + lr |= R_V7M_EXCRET_ES_MASK; | ||
130 | + } | ||
131 | + lr &= ~R_V7M_EXCRET_SPSEL_MASK; | ||
132 | + if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { | ||
133 | + lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
134 | + } | ||
135 | + | ||
136 | + /* Clear registers if necessary to prevent non-secure exception | ||
137 | + * code being able to see register values from secure code. | ||
138 | + * Where register values become architecturally UNKNOWN we leave | ||
139 | + * them with their previous values. | ||
140 | + */ | ||
141 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
142 | + if (!targets_secure) { | ||
143 | + /* Always clear the caller-saved registers (they have been | ||
144 | + * pushed to the stack earlier in v7m_push_stack()). | ||
145 | + * Clear callee-saved registers if the background code is | ||
146 | + * Secure (in which case these regs were saved in | ||
147 | + * v7m_push_callee_stack()). | ||
148 | + */ | ||
149 | + int i; | ||
150 | + | ||
151 | + for (i = 0; i < 13; i++) { | ||
152 | + /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ | ||
153 | + if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { | ||
154 | + env->regs[i] = 0; | ||
155 | + } | ||
156 | + } | ||
157 | + /* Clear EAPSR */ | ||
158 | + xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); | ||
159 | + } | ||
160 | + } | ||
161 | + } | ||
162 | + | ||
163 | + /* Switch to target security state -- must do this before writing SPSEL */ | ||
164 | + switch_v7m_security_state(env, targets_secure); | ||
165 | write_v7m_control_spsel(env, 0); | ||
166 | arm_clear_exclusive(env); | ||
167 | /* Clear IT bits */ | ||
168 | env->condexec_bits = 0; | ||
169 | env->regs[14] = lr; | ||
170 | - addr = arm_v7m_load_vector(cpu); | ||
171 | + addr = arm_v7m_load_vector(cpu, targets_secure); | ||
172 | env->regs[15] = addr & 0xfffffffe; | ||
173 | env->thumb = addr & 1; | ||
174 | } | ||
175 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
176 | if (sfault) { | ||
177 | env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; | ||
178 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
179 | - v7m_exception_taken(cpu, excret); | ||
180 | + v7m_exception_taken(cpu, excret, true); | ||
181 | qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
182 | "stackframe: failed EXC_RETURN.ES validity check\n"); | ||
183 | return; | ||
184 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
185 | */ | ||
186 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
187 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); | ||
188 | - v7m_exception_taken(cpu, excret); | ||
189 | + v7m_exception_taken(cpu, excret, true); | ||
190 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
191 | "stackframe: failed exception return integrity check\n"); | ||
192 | return; | ||
193 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
194 | /* Take a SecureFault on the current stack */ | ||
195 | env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; | ||
196 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
197 | - v7m_exception_taken(cpu, excret); | ||
198 | + v7m_exception_taken(cpu, excret, true); | ||
199 | qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
200 | "stackframe: failed exception return integrity " | ||
201 | "signature check\n"); | ||
202 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
203 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | ||
204 | env->v7m.secure); | ||
205 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
206 | - v7m_exception_taken(cpu, excret); | ||
207 | + v7m_exception_taken(cpu, excret, true); | ||
208 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
209 | "stackframe: failed exception return integrity " | ||
210 | "check\n"); | ||
211 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
212 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); | ||
213 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
214 | v7m_push_stack(cpu); | ||
215 | - v7m_exception_taken(cpu, excret); | ||
216 | + v7m_exception_taken(cpu, excret, false); | ||
217 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " | ||
218 | "failed exception return integrity check\n"); | ||
219 | return; | ||
220 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
221 | return; /* Never happens. Keep compiler happy. */ | ||
222 | } | ||
223 | |||
224 | - lr = R_V7M_EXCRET_RES1_MASK | | ||
225 | - R_V7M_EXCRET_S_MASK | | ||
226 | - R_V7M_EXCRET_DCRS_MASK | | ||
227 | - R_V7M_EXCRET_FTYPE_MASK | | ||
228 | - R_V7M_EXCRET_ES_MASK; | ||
229 | - if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) { | ||
230 | - lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
231 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
232 | + lr = R_V7M_EXCRET_RES1_MASK | | ||
233 | + R_V7M_EXCRET_DCRS_MASK | | ||
234 | + R_V7M_EXCRET_FTYPE_MASK; | ||
235 | + /* The S bit indicates whether we should return to Secure | ||
236 | + * or NonSecure (ie our current state). | ||
237 | + * The ES bit indicates whether we're taking this exception | ||
238 | + * to Secure or NonSecure (ie our target state). We set it | ||
239 | + * later, in v7m_exception_taken(). | ||
240 | + * The SPSEL bit is also set in v7m_exception_taken() for v8M. | ||
241 | + * This corresponds to the ARM ARM pseudocode for v8M setting | ||
242 | + * some LR bits in PushStack() and some in ExceptionTaken(); | ||
243 | + * the distinction matters for the tailchain cases where we | ||
244 | + * can take an exception without pushing the stack. | ||
245 | + */ | ||
246 | + if (env->v7m.secure) { | ||
247 | + lr |= R_V7M_EXCRET_S_MASK; | ||
248 | + } | ||
249 | + } else { | ||
250 | + lr = R_V7M_EXCRET_RES1_MASK | | ||
251 | + R_V7M_EXCRET_S_MASK | | ||
252 | + R_V7M_EXCRET_DCRS_MASK | | ||
253 | + R_V7M_EXCRET_FTYPE_MASK | | ||
254 | + R_V7M_EXCRET_ES_MASK; | ||
255 | + if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { | ||
256 | + lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
257 | + } | ||
258 | } | ||
259 | if (!arm_v7m_is_handler_mode(env)) { | ||
260 | lr |= R_V7M_EXCRET_MODE_MASK; | ||
261 | } | ||
262 | |||
263 | v7m_push_stack(cpu); | ||
264 | - v7m_exception_taken(cpu, lr); | ||
265 | + v7m_exception_taken(cpu, lr, false); | ||
266 | qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); | ||
267 | } | ||
268 | |||
269 | -- | ||
270 | 2.7.4 | ||
271 | |||
272 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Implement the register interface for the SAU: SAU_CTRL, | ||
2 | SAU_TYPE, SAU_RNR, SAU_RBAR and SAU_RLAR. None of the | ||
3 | actual behaviour is implemented here; registers just | ||
4 | read back as written. | ||
5 | 1 | ||
6 | When the CPU definition for Cortex-M33 is eventually | ||
7 | added, its initfn will set cpu->sau_sregion, in the same | ||
8 | way that we currently set cpu->pmsav7_dregion for the | ||
9 | M3 and M4. | ||
10 | |||
11 | Number of SAU regions is typically a configurable | ||
12 | CPU parameter, but this patch doesn't provide a | ||
13 | QEMU CPU property for it. We can easily add one when | ||
14 | we have a board that requires it. | ||
15 | |||
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
18 | Message-id: 1506092407-26985-14-git-send-email-peter.maydell@linaro.org | ||
19 | --- | ||
20 | target/arm/cpu.h | 10 +++++ | ||
21 | hw/intc/armv7m_nvic.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
22 | target/arm/cpu.c | 27 ++++++++++++ | ||
23 | target/arm/machine.c | 14 ++++++ | ||
24 | 4 files changed, 167 insertions(+) | ||
25 | |||
26 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/target/arm/cpu.h | ||
29 | +++ b/target/arm/cpu.h | ||
30 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | ||
31 | uint32_t mair1[M_REG_NUM_BANKS]; | ||
32 | } pmsav8; | ||
33 | |||
34 | + /* v8M SAU */ | ||
35 | + struct { | ||
36 | + uint32_t *rbar; | ||
37 | + uint32_t *rlar; | ||
38 | + uint32_t rnr; | ||
39 | + uint32_t ctrl; | ||
40 | + } sau; | ||
41 | + | ||
42 | void *nvic; | ||
43 | const struct arm_boot_info *boot_info; | ||
44 | /* Store GICv3CPUState to access from this struct */ | ||
45 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | ||
46 | bool has_mpu; | ||
47 | /* PMSAv7 MPU number of supported regions */ | ||
48 | uint32_t pmsav7_dregion; | ||
49 | + /* v8M SAU number of supported regions */ | ||
50 | + uint32_t sau_sregion; | ||
51 | |||
52 | /* PSCI conduit used to invoke PSCI methods | ||
53 | * 0 - disabled, 1 - smc, 2 - hvc | ||
54 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/hw/intc/armv7m_nvic.c | ||
57 | +++ b/hw/intc/armv7m_nvic.c | ||
58 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | ||
59 | goto bad_offset; | ||
60 | } | ||
61 | return cpu->env.pmsav8.mair1[attrs.secure]; | ||
62 | + case 0xdd0: /* SAU_CTRL */ | ||
63 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
64 | + goto bad_offset; | ||
65 | + } | ||
66 | + if (!attrs.secure) { | ||
67 | + return 0; | ||
68 | + } | ||
69 | + return cpu->env.sau.ctrl; | ||
70 | + case 0xdd4: /* SAU_TYPE */ | ||
71 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
72 | + goto bad_offset; | ||
73 | + } | ||
74 | + if (!attrs.secure) { | ||
75 | + return 0; | ||
76 | + } | ||
77 | + return cpu->sau_sregion; | ||
78 | + case 0xdd8: /* SAU_RNR */ | ||
79 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
80 | + goto bad_offset; | ||
81 | + } | ||
82 | + if (!attrs.secure) { | ||
83 | + return 0; | ||
84 | + } | ||
85 | + return cpu->env.sau.rnr; | ||
86 | + case 0xddc: /* SAU_RBAR */ | ||
87 | + { | ||
88 | + int region = cpu->env.sau.rnr; | ||
89 | + | ||
90 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
91 | + goto bad_offset; | ||
92 | + } | ||
93 | + if (!attrs.secure) { | ||
94 | + return 0; | ||
95 | + } | ||
96 | + if (region >= cpu->sau_sregion) { | ||
97 | + return 0; | ||
98 | + } | ||
99 | + return cpu->env.sau.rbar[region]; | ||
100 | + } | ||
101 | + case 0xde0: /* SAU_RLAR */ | ||
102 | + { | ||
103 | + int region = cpu->env.sau.rnr; | ||
104 | + | ||
105 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
106 | + goto bad_offset; | ||
107 | + } | ||
108 | + if (!attrs.secure) { | ||
109 | + return 0; | ||
110 | + } | ||
111 | + if (region >= cpu->sau_sregion) { | ||
112 | + return 0; | ||
113 | + } | ||
114 | + return cpu->env.sau.rlar[region]; | ||
115 | + } | ||
116 | case 0xde4: /* SFSR */ | ||
117 | if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
118 | goto bad_offset; | ||
119 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | ||
120 | * only affect cacheability, and we don't implement caching. | ||
121 | */ | ||
122 | break; | ||
123 | + case 0xdd0: /* SAU_CTRL */ | ||
124 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
125 | + goto bad_offset; | ||
126 | + } | ||
127 | + if (!attrs.secure) { | ||
128 | + return; | ||
129 | + } | ||
130 | + cpu->env.sau.ctrl = value & 3; | ||
131 | + case 0xdd4: /* SAU_TYPE */ | ||
132 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
133 | + goto bad_offset; | ||
134 | + } | ||
135 | + break; | ||
136 | + case 0xdd8: /* SAU_RNR */ | ||
137 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
138 | + goto bad_offset; | ||
139 | + } | ||
140 | + if (!attrs.secure) { | ||
141 | + return; | ||
142 | + } | ||
143 | + if (value >= cpu->sau_sregion) { | ||
144 | + qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %" | ||
145 | + PRIu32 "/%" PRIu32 "\n", | ||
146 | + value, cpu->sau_sregion); | ||
147 | + } else { | ||
148 | + cpu->env.sau.rnr = value; | ||
149 | + } | ||
150 | + break; | ||
151 | + case 0xddc: /* SAU_RBAR */ | ||
152 | + { | ||
153 | + int region = cpu->env.sau.rnr; | ||
154 | + | ||
155 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
156 | + goto bad_offset; | ||
157 | + } | ||
158 | + if (!attrs.secure) { | ||
159 | + return; | ||
160 | + } | ||
161 | + if (region >= cpu->sau_sregion) { | ||
162 | + return; | ||
163 | + } | ||
164 | + cpu->env.sau.rbar[region] = value & ~0x1f; | ||
165 | + tlb_flush(CPU(cpu)); | ||
166 | + break; | ||
167 | + } | ||
168 | + case 0xde0: /* SAU_RLAR */ | ||
169 | + { | ||
170 | + int region = cpu->env.sau.rnr; | ||
171 | + | ||
172 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
173 | + goto bad_offset; | ||
174 | + } | ||
175 | + if (!attrs.secure) { | ||
176 | + return; | ||
177 | + } | ||
178 | + if (region >= cpu->sau_sregion) { | ||
179 | + return; | ||
180 | + } | ||
181 | + cpu->env.sau.rlar[region] = value & ~0x1c; | ||
182 | + tlb_flush(CPU(cpu)); | ||
183 | + break; | ||
184 | + } | ||
185 | case 0xde4: /* SFSR */ | ||
186 | if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
187 | goto bad_offset; | ||
188 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
189 | index XXXXXXX..XXXXXXX 100644 | ||
190 | --- a/target/arm/cpu.c | ||
191 | +++ b/target/arm/cpu.c | ||
192 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s) | ||
193 | env->pmsav8.mair1[M_REG_S] = 0; | ||
194 | } | ||
195 | |||
196 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
197 | + if (cpu->sau_sregion > 0) { | ||
198 | + memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion); | ||
199 | + memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion); | ||
200 | + } | ||
201 | + env->sau.rnr = 0; | ||
202 | + /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what | ||
203 | + * the Cortex-M33 does. | ||
204 | + */ | ||
205 | + env->sau.ctrl = 0; | ||
206 | + } | ||
207 | + | ||
208 | set_flush_to_zero(1, &env->vfp.standard_fp_status); | ||
209 | set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); | ||
210 | set_default_nan_mode(1, &env->vfp.standard_fp_status); | ||
211 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
212 | } | ||
213 | } | ||
214 | |||
215 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
216 | + uint32_t nr = cpu->sau_sregion; | ||
217 | + | ||
218 | + if (nr > 0xff) { | ||
219 | + error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr); | ||
220 | + return; | ||
221 | + } | ||
222 | + | ||
223 | + if (nr) { | ||
224 | + env->sau.rbar = g_new0(uint32_t, nr); | ||
225 | + env->sau.rlar = g_new0(uint32_t, nr); | ||
226 | + } | ||
227 | + } | ||
228 | + | ||
229 | if (arm_feature(env, ARM_FEATURE_EL3)) { | ||
230 | set_feature(env, ARM_FEATURE_VBAR); | ||
231 | } | ||
232 | @@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj) | ||
233 | cpu->midr = 0x410fc240; /* r0p0 */ | ||
234 | cpu->pmsav7_dregion = 8; | ||
235 | } | ||
236 | + | ||
237 | static void arm_v7m_class_init(ObjectClass *oc, void *data) | ||
238 | { | ||
239 | CPUClass *cc = CPU_CLASS(oc); | ||
240 | diff --git a/target/arm/machine.c b/target/arm/machine.c | ||
241 | index XXXXXXX..XXXXXXX 100644 | ||
242 | --- a/target/arm/machine.c | ||
243 | +++ b/target/arm/machine.c | ||
244 | @@ -XXX,XX +XXX,XX @@ static bool s_rnr_vmstate_validate(void *opaque, int version_id) | ||
245 | return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion; | ||
246 | } | ||
247 | |||
248 | +static bool sau_rnr_vmstate_validate(void *opaque, int version_id) | ||
249 | +{ | ||
250 | + ARMCPU *cpu = opaque; | ||
251 | + | ||
252 | + return cpu->env.sau.rnr < cpu->sau_sregion; | ||
253 | +} | ||
254 | + | ||
255 | static bool m_security_needed(void *opaque) | ||
256 | { | ||
257 | ARMCPU *cpu = opaque; | ||
258 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = { | ||
259 | VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU), | ||
260 | VMSTATE_UINT32(env.v7m.sfsr, ARMCPU), | ||
261 | VMSTATE_UINT32(env.v7m.sfar, ARMCPU), | ||
262 | + VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0, | ||
263 | + vmstate_info_uint32, uint32_t), | ||
264 | + VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0, | ||
265 | + vmstate_info_uint32, uint32_t), | ||
266 | + VMSTATE_UINT32(env.sau.rnr, ARMCPU), | ||
267 | + VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate), | ||
268 | + VMSTATE_UINT32(env.sau.ctrl, ARMCPU), | ||
269 | VMSTATE_END_OF_LIST() | ||
270 | } | ||
271 | }; | ||
272 | -- | ||
273 | 2.7.4 | ||
274 | |||
275 | diff view generated by jsdifflib |
1 | In cpu_mmu_index() we try to do this: | 1 | For debug exceptions due to breakpoints or the BKPT instruction which |
---|---|---|---|
2 | if (env->v7m.secure) { | 2 | are taken to AArch32, the Fault Address Register is architecturally |
3 | mmu_idx += ARMMMUIdx_MSUser; | 3 | UNKNOWN. We were using that as license to simply not set |
4 | } | 4 | env->exception.vaddress, but this isn't correct, because it will |
5 | but it will give the wrong answer, because ARMMMUIdx_MSUser | 5 | expose to the guest whatever old value was in that field when |
6 | includes the 0x40 ARM_MMU_IDX_M field, and so does the | 6 | arm_cpu_do_interrupt_aarch32() writes it to the guest IFSR. That old |
7 | mmu_idx we're adding to, and we'll end up with 0x8n rather | 7 | value might be a FAR for a previous guest EL2 or secure exception, in |
8 | than 0x4n. This error is then nullified by the call to | 8 | which case we shouldn't show it to an EL1 or non-secure exception |
9 | arm_to_core_mmu_idx() which masks out the high part, but | 9 | handler. It might also be a non-deterministic value, which is bad |
10 | we're about to factor out the code that calculates the | 10 | for record-and-replay. |
11 | ARMMMUIdx values so it can be used without passing it through | 11 | |
12 | arm_to_core_mmu_idx(), so fix this bug first. | 12 | Clear env->exception.vaddress before taking breakpoint debug |
13 | exceptions, to avoid this minor information leak. | ||
13 | 14 | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
15 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 16 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 17 | Message-id: 20180320134114.30418-5-peter.maydell@linaro.org |
17 | Message-id: 1506092407-26985-16-git-send-email-peter.maydell@linaro.org | ||
18 | --- | 18 | --- |
19 | target/arm/cpu.h | 12 +++++++----- | 19 | target/arm/op_helper.c | 11 ++++++++++- |
20 | 1 file changed, 7 insertions(+), 5 deletions(-) | 20 | 1 file changed, 10 insertions(+), 1 deletion(-) |
21 | 21 | ||
22 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 22 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c |
23 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/target/arm/cpu.h | 24 | --- a/target/arm/op_helper.c |
25 | +++ b/target/arm/cpu.h | 25 | +++ b/target/arm/op_helper.c |
26 | @@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) | 26 | @@ -XXX,XX +XXX,XX @@ void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) |
27 | int el = arm_current_el(env); | 27 | { |
28 | 28 | /* FSR will only be used if the debug target EL is AArch32. */ | |
29 | if (arm_feature(env, ARM_FEATURE_M)) { | 29 | env->exception.fsr = arm_debug_exception_fsr(env); |
30 | - ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv; | 30 | + /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing |
31 | + ARMMMUIdx mmu_idx; | 31 | + * values to the guest that it shouldn't be able to see at its |
32 | 32 | + * exception/security level. | |
33 | - if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | 33 | + */ |
34 | - mmu_idx = ARMMMUIdx_MNegPri; | 34 | + env->exception.vaddress = 0; |
35 | + if (el == 0) { | 35 | raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env)); |
36 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | 36 | } |
37 | + } else { | 37 | |
38 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | 38 | @@ -XXX,XX +XXX,XX @@ void arm_debug_excp_handler(CPUState *cs) |
39 | } | 39 | } |
40 | 40 | ||
41 | - if (env->v7m.secure) { | 41 | env->exception.fsr = arm_debug_exception_fsr(env); |
42 | - mmu_idx += ARMMMUIdx_MSUser; | 42 | - /* FAR is UNKNOWN, so doesn't need setting */ |
43 | + if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | 43 | + /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing |
44 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | 44 | + * values to the guest that it shouldn't be able to see at its |
45 | } | 45 | + * exception/security level. |
46 | 46 | + */ | |
47 | return arm_to_core_mmu_idx(mmu_idx); | 47 | + env->exception.vaddress = 0; |
48 | raise_exception(env, EXCP_PREFETCH_ABORT, | ||
49 | syn_breakpoint(same_el), | ||
50 | arm_debug_target_el(env)); | ||
48 | -- | 51 | -- |
49 | 2.7.4 | 52 | 2.16.2 |
50 | 53 | ||
51 | 54 | diff view generated by jsdifflib |