1 | Mostly my stuff with a few easy patches from others. I know I have | 1 | The following changes since commit 6eeea6725a70e6fcb5abba0764496bdab07ddfb3: |
---|---|---|---|
2 | a few big series in my to-review queue, but I've been too jetlagged | ||
3 | to try to tackle those :-( | ||
4 | 2 | ||
5 | thanks | 3 | Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2020-10-06' into staging (2020-10-06 21:13:34 +0100) |
6 | -- PMM | ||
7 | 4 | ||
8 | The following changes since commit a26a98dfb9d448d7234d931ae3720feddf6f0651: | 5 | are available in the Git repository at: |
9 | 6 | ||
10 | Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171006' into staging (2017-10-06 13:19:03 +0100) | 7 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20201008 |
11 | 8 | ||
12 | are available in the git repository at: | 9 | for you to fetch changes up to ba118c26e16a97e6ff6de8184057d3420ce16a23: |
13 | 10 | ||
14 | git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20171006 | 11 | target/arm: Make '-cpu max' have a 48-bit PA (2020-10-08 15:24:32 +0100) |
15 | |||
16 | for you to fetch changes up to 04829ce334bece78d4fa1d0fdbc8bc27dae9b242: | ||
17 | |||
18 | nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit (2017-10-06 16:46:49 +0100) | ||
19 | 12 | ||
20 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
21 | target-arm: | 14 | target-arm queue: |
22 | * v8M: more preparatory work | 15 | * hw/ssi/npcm7xx_fiu: Fix handling of unsigned integer |
23 | * nvic: reset properly rather than leaving the nvic in a weird state | 16 | * hw/arm/fsl-imx25: Fix a typo |
24 | * xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false | 17 | * hw/arm/sbsa-ref : Fix SMMUv3 Initialisation |
25 | * sd: fix out-of-bounds check for multi block reads | 18 | * hw/arm/sbsa-ref : allocate IRQs for SMMUv3 |
26 | * arm: Fix SMC reporting to EL2 when QEMU provides PSCI | 19 | * hw/char/bcm2835_aux: Allow less than 32-bit accesses |
20 | * hw/arm/virt: Implement kvm-steal-time | ||
21 | * target/arm: Make '-cpu max' have a 48-bit PA | ||
27 | 22 | ||
28 | ---------------------------------------------------------------- | 23 | ---------------------------------------------------------------- |
29 | Jan Kiszka (1): | 24 | Andrew Jones (6): |
30 | arm: Fix SMC reporting to EL2 when QEMU provides PSCI | 25 | linux headers: sync to 5.9-rc7 |
26 | target/arm/kvm: Make uncalled stubs explicitly unreachable | ||
27 | hw/arm/virt: Move post cpu realize check into its own function | ||
28 | hw/arm/virt: Move kvm pmu setup to virt_cpu_post_init | ||
29 | tests/qtest: Restore aarch64 arm-cpu-features test | ||
30 | hw/arm/virt: Implement kvm-steal-time | ||
31 | 31 | ||
32 | Michael Olbrich (1): | 32 | Graeme Gregory (2): |
33 | hw/sd: fix out-of-bounds check for multi block reads | 33 | hw/arm/sbsa-ref : Fix SMMUv3 Initialisation |
34 | hw/arm/sbsa-ref : allocate IRQs for SMMUv3 | ||
34 | 35 | ||
35 | Peter Maydell (17): | 36 | Peter Maydell (1): |
36 | nvic: Clear the vector arrays and prigroup on reset | 37 | target/arm: Make '-cpu max' have a 48-bit PA |
37 | target/arm: Don't switch to target stack early in v7M exception return | ||
38 | target/arm: Prepare for CONTROL.SPSEL being nonzero in Handler mode | ||
39 | target/arm: Restore security state on exception return | ||
40 | target/arm: Restore SPSEL to correct CONTROL register on exception return | ||
41 | target/arm: Check for xPSR mismatch usage faults earlier for v8M | ||
42 | target/arm: Warn about restoring to unaligned stack | ||
43 | target/arm: Don't warn about exception return with PC low bit set for v8M | ||
44 | target/arm: Add new-in-v8M SFSR and SFAR | ||
45 | target/arm: Update excret sanity checks for v8M | ||
46 | target/arm: Add support for restoring v8M additional state context | ||
47 | target/arm: Add v8M support to exception entry code | ||
48 | nvic: Implement Security Attribution Unit registers | ||
49 | target/arm: Implement security attribute lookups for memory accesses | ||
50 | target/arm: Fix calculation of secure mm_idx values | ||
51 | target/arm: Factor out "get mmuidx for specified security state" | ||
52 | nvic: Add missing code for writing SHCSR.HARDFAULTPENDED bit | ||
53 | 38 | ||
54 | Thomas Huth (1): | 39 | Philippe Mathieu-Daudé (3): |
55 | hw/arm/xlnx-zynqmp: Mark the "xlnx, zynqmp" device with user_creatable = false | 40 | hw/ssi/npcm7xx_fiu: Fix handling of unsigned integer |
41 | hw/arm/fsl-imx25: Fix a typo | ||
42 | hw/char/bcm2835_aux: Allow less than 32-bit accesses | ||
56 | 43 | ||
57 | target/arm/cpu.h | 60 ++++- | 44 | docs/system/arm/cpu-features.rst | 11 ++++ |
58 | target/arm/internals.h | 15 ++ | 45 | include/hw/arm/fsl-imx25.h | 2 +- |
59 | hw/arm/xlnx-zynqmp.c | 2 + | 46 | include/hw/arm/virt.h | 5 ++ |
60 | hw/intc/armv7m_nvic.c | 158 ++++++++++- | 47 | linux-headers/linux/kvm.h | 6 ++- |
61 | hw/sd/sd.c | 12 +- | 48 | target/arm/cpu.h | 4 ++ |
62 | target/arm/cpu.c | 27 ++ | 49 | target/arm/kvm_arm.h | 94 ++++++++++++++++++++++++++------- |
63 | target/arm/helper.c | 691 +++++++++++++++++++++++++++++++++++++++++++------ | 50 | hw/arm/sbsa-ref.c | 3 +- |
64 | target/arm/machine.c | 16 ++ | 51 | hw/arm/virt.c | 110 ++++++++++++++++++++++++++++----------- |
65 | target/arm/op_helper.c | 27 +- | 52 | hw/char/bcm2835_aux.c | 4 +- |
66 | 9 files changed, 898 insertions(+), 110 deletions(-) | 53 | hw/ssi/npcm7xx_fiu.c | 12 ++--- |
54 | target/arm/cpu.c | 8 +++ | ||
55 | target/arm/cpu64.c | 4 ++ | ||
56 | target/arm/kvm.c | 16 ++++++ | ||
57 | target/arm/kvm64.c | 64 +++++++++++++++++++++-- | ||
58 | target/arm/monitor.c | 2 +- | ||
59 | tests/qtest/arm-cpu-features.c | 25 +++++++-- | ||
60 | hw/ssi/trace-events | 2 +- | ||
61 | tests/qtest/meson.build | 3 +- | ||
62 | 18 files changed, 303 insertions(+), 72 deletions(-) | ||
67 | 63 | diff view generated by jsdifflib |
1 | Currently our M profile exception return code switches to the | 1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> |
---|---|---|---|
2 | target stack pointer relatively early in the process, before | ||
3 | it tries to pop the exception frame off the stack. This is | ||
4 | awkward for v8M for two reasons: | ||
5 | * in v8M the process vs main stack pointer is not selected | ||
6 | purely by the value of CONTROL.SPSEL, so updating SPSEL | ||
7 | and relying on that to switch to the right stack pointer | ||
8 | won't work | ||
9 | * the stack we should be reading the stack frame from and | ||
10 | the stack we will eventually switch to might not be the | ||
11 | same if the guest is doing strange things | ||
12 | 2 | ||
13 | Change our exception return code to use a 'frame pointer' | 3 | Fix integer handling issues handling issue reported by Coverity: |
14 | to read the exception frame rather than assuming that we | ||
15 | can switch the live stack pointer this early. | ||
16 | 4 | ||
5 | hw/ssi/npcm7xx_fiu.c: 162 in npcm7xx_fiu_flash_read() | ||
6 | >>> CID 1432730: Integer handling issues (NEGATIVE_RETURNS) | ||
7 | >>> "npcm7xx_fiu_cs_index(fiu, f)" is passed to a parameter that cannot be negative. | ||
8 | 162 npcm7xx_fiu_select(fiu, npcm7xx_fiu_cs_index(fiu, f)); | ||
9 | |||
10 | hw/ssi/npcm7xx_fiu.c: 221 in npcm7xx_fiu_flash_write() | ||
11 | 218 cs_id = npcm7xx_fiu_cs_index(fiu, f); | ||
12 | 219 trace_npcm7xx_fiu_flash_write(DEVICE(fiu)->canonical_path, cs_id, addr, | ||
13 | 220 size, v); | ||
14 | >>> CID 1432729: Integer handling issues (NEGATIVE_RETURNS) | ||
15 | >>> "cs_id" is passed to a parameter that cannot be negative. | ||
16 | 221 npcm7xx_fiu_select(fiu, cs_id); | ||
17 | |||
18 | Since the index of the flash can not be negative, return an | ||
19 | unsigned type. | ||
20 | |||
21 | Reported-by: Coverity (CID 1432729 & 1432730: NEGATIVE_RETURNS) | ||
22 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
23 | Reviewed-by: Havard Skinnemoen <hskinnemoen@google.com> | ||
24 | Message-id: 20200919132435.310527-1-f4bug@amsat.org | ||
17 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 25 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
18 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
19 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
20 | Message-id: 1506092407-26985-3-git-send-email-peter.maydell@linaro.org | ||
21 | --- | 26 | --- |
22 | target/arm/helper.c | 130 +++++++++++++++++++++++++++++++++++++++------------- | 27 | hw/ssi/npcm7xx_fiu.c | 12 ++++++------ |
23 | 1 file changed, 98 insertions(+), 32 deletions(-) | 28 | hw/ssi/trace-events | 2 +- |
29 | 2 files changed, 7 insertions(+), 7 deletions(-) | ||
24 | 30 | ||
25 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 31 | diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c |
26 | index XXXXXXX..XXXXXXX 100644 | 32 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/target/arm/helper.c | 33 | --- a/hw/ssi/npcm7xx_fiu.c |
28 | +++ b/target/arm/helper.c | 34 | +++ b/hw/ssi/npcm7xx_fiu.c |
29 | @@ -XXX,XX +XXX,XX @@ static void v7m_push(CPUARMState *env, uint32_t val) | 35 | @@ -XXX,XX +XXX,XX @@ enum NPCM7xxFIURegister { |
30 | stl_phys(cs->as, env->regs[13], val); | 36 | * Returns the index of flash in the fiu->flash array. This corresponds to the |
37 | * chip select ID of the flash. | ||
38 | */ | ||
39 | -static int npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, NPCM7xxFIUFlash *flash) | ||
40 | +static unsigned npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, | ||
41 | + NPCM7xxFIUFlash *flash) | ||
42 | { | ||
43 | int index = flash - fiu->flash; | ||
44 | |||
45 | @@ -XXX,XX +XXX,XX @@ static int npcm7xx_fiu_cs_index(NPCM7xxFIUState *fiu, NPCM7xxFIUFlash *flash) | ||
31 | } | 46 | } |
32 | 47 | ||
33 | -static uint32_t v7m_pop(CPUARMState *env) | 48 | /* Assert the chip select specified in the UMA Control/Status Register. */ |
34 | -{ | 49 | -static void npcm7xx_fiu_select(NPCM7xxFIUState *s, int cs_id) |
35 | - CPUState *cs = CPU(arm_env_get_cpu(env)); | 50 | +static void npcm7xx_fiu_select(NPCM7xxFIUState *s, unsigned cs_id) |
36 | - uint32_t val; | 51 | { |
52 | trace_npcm7xx_fiu_select(DEVICE(s)->canonical_path, cs_id); | ||
53 | |||
54 | if (cs_id < s->cs_count) { | ||
55 | qemu_irq_lower(s->cs_lines[cs_id]); | ||
56 | + s->active_cs = cs_id; | ||
57 | } else { | ||
58 | qemu_log_mask(LOG_GUEST_ERROR, | ||
59 | "%s: UMA to CS%d; this module has only %d chip selects", | ||
60 | DEVICE(s)->canonical_path, cs_id, s->cs_count); | ||
61 | - cs_id = -1; | ||
62 | + s->active_cs = -1; | ||
63 | } | ||
37 | - | 64 | - |
38 | - val = ldl_phys(cs->as, env->regs[13]); | 65 | - s->active_cs = cs_id; |
39 | - env->regs[13] += 4; | ||
40 | - return val; | ||
41 | -} | ||
42 | - | ||
43 | /* Return true if we're using the process stack pointer (not the MSP) */ | ||
44 | static bool v7m_using_psp(CPUARMState *env) | ||
45 | { | ||
46 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) | ||
47 | env->regs[15] = dest & ~1; | ||
48 | } | 66 | } |
49 | 67 | ||
50 | +static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | 68 | /* Deassert the currently active chip select. */ |
51 | + bool spsel) | 69 | @@ -XXX,XX +XXX,XX @@ static void npcm7xx_fiu_flash_write(void *opaque, hwaddr addr, uint64_t v, |
52 | +{ | 70 | NPCM7xxFIUFlash *f = opaque; |
53 | + /* Return a pointer to the location where we currently store the | 71 | NPCM7xxFIUState *fiu = f->fiu; |
54 | + * stack pointer for the requested security state and thread mode. | 72 | uint32_t dwr_cfg; |
55 | + * This pointer will become invalid if the CPU state is updated | 73 | - int cs_id; |
56 | + * such that the stack pointers are switched around (eg changing | 74 | + unsigned cs_id; |
57 | + * the SPSEL control bit). | 75 | int i; |
58 | + * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). | 76 | |
59 | + * Unlike that pseudocode, we require the caller to pass us in the | 77 | if (fiu->active_cs != -1) { |
60 | + * SPSEL control bit value; this is because we also use this | 78 | diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events |
61 | + * function in handling of pushing of the callee-saves registers | 79 | index XXXXXXX..XXXXXXX 100644 |
62 | + * part of the v8M stack frame (pseudocode PushCalleeStack()), | 80 | --- a/hw/ssi/trace-events |
63 | + * and in the tailchain codepath the SPSEL bit comes from the exception | 81 | +++ b/hw/ssi/trace-events |
64 | + * return magic LR value from the previous exception. The pseudocode | 82 | @@ -XXX,XX +XXX,XX @@ npcm7xx_fiu_deselect(const char *id, int cs) "%s deselect CS%d" |
65 | + * opencodes the stack-selection in PushCalleeStack(), but we prefer | 83 | npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 |
66 | + * to make this utility function generic enough to do the job. | 84 | npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 |
67 | + */ | 85 | npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 |
68 | + bool want_psp = threadmode && spsel; | 86 | -npcm7xx_fiu_flash_write(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 |
69 | + | 87 | +npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 |
70 | + if (secure == env->v7m.secure) { | ||
71 | + /* Currently switch_v7m_sp switches SP as it updates SPSEL, | ||
72 | + * so the SP we want is always in regs[13]. | ||
73 | + * When we decouple SPSEL from the actually selected SP | ||
74 | + * we need to check want_psp against v7m_using_psp() | ||
75 | + * to see whether we need regs[13] or v7m.other_sp. | ||
76 | + */ | ||
77 | + return &env->regs[13]; | ||
78 | + } else { | ||
79 | + if (want_psp) { | ||
80 | + return &env->v7m.other_ss_psp; | ||
81 | + } else { | ||
82 | + return &env->v7m.other_ss_msp; | ||
83 | + } | ||
84 | + } | ||
85 | +} | ||
86 | + | ||
87 | static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | ||
88 | { | ||
89 | CPUState *cs = CPU(cpu); | ||
90 | @@ -XXX,XX +XXX,XX @@ static void v7m_push_stack(ARMCPU *cpu) | ||
91 | static void do_v7m_exception_exit(ARMCPU *cpu) | ||
92 | { | ||
93 | CPUARMState *env = &cpu->env; | ||
94 | + CPUState *cs = CPU(cpu); | ||
95 | uint32_t excret; | ||
96 | uint32_t xpsr; | ||
97 | bool ufault = false; | ||
98 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
99 | bool return_to_handler = false; | ||
100 | bool rettobase = false; | ||
101 | bool exc_secure = false; | ||
102 | + bool return_to_secure; | ||
103 | |||
104 | /* We can only get here from an EXCP_EXCEPTION_EXIT, and | ||
105 | * gen_bx_excret() enforces the architectural rule | ||
106 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
107 | g_assert_not_reached(); | ||
108 | } | ||
109 | |||
110 | + return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
111 | + (excret & R_V7M_EXCRET_S_MASK); | ||
112 | + | ||
113 | switch (excret & 0xf) { | ||
114 | case 1: /* Return to Handler */ | ||
115 | return_to_handler = true; | ||
116 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
117 | return; | ||
118 | } | ||
119 | |||
120 | - /* Switch to the target stack. */ | ||
121 | + /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently | ||
122 | + * causes us to switch the active SP, but we will change this | ||
123 | + * later to not do that so we can support v8M. | ||
124 | + */ | ||
125 | switch_v7m_sp(env, return_to_sp_process); | ||
126 | - /* Pop registers. */ | ||
127 | - env->regs[0] = v7m_pop(env); | ||
128 | - env->regs[1] = v7m_pop(env); | ||
129 | - env->regs[2] = v7m_pop(env); | ||
130 | - env->regs[3] = v7m_pop(env); | ||
131 | - env->regs[12] = v7m_pop(env); | ||
132 | - env->regs[14] = v7m_pop(env); | ||
133 | - env->regs[15] = v7m_pop(env); | ||
134 | - if (env->regs[15] & 1) { | ||
135 | - qemu_log_mask(LOG_GUEST_ERROR, | ||
136 | - "M profile return from interrupt with misaligned " | ||
137 | - "PC is UNPREDICTABLE\n"); | ||
138 | - /* Actual hardware seems to ignore the lsbit, and there are several | ||
139 | - * RTOSes out there which incorrectly assume the r15 in the stack | ||
140 | - * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
141 | + | ||
142 | + { | ||
143 | + /* The stack pointer we should be reading the exception frame from | ||
144 | + * depends on bits in the magic exception return type value (and | ||
145 | + * for v8M isn't necessarily the stack pointer we will eventually | ||
146 | + * end up resuming execution with). Get a pointer to the location | ||
147 | + * in the CPU state struct where the SP we need is currently being | ||
148 | + * stored; we will use and modify it in place. | ||
149 | + * We use this limited C variable scope so we don't accidentally | ||
150 | + * use 'frame_sp_p' after we do something that makes it invalid. | ||
151 | + */ | ||
152 | + uint32_t *frame_sp_p = get_v7m_sp_ptr(env, | ||
153 | + return_to_secure, | ||
154 | + !return_to_handler, | ||
155 | + return_to_sp_process); | ||
156 | + uint32_t frameptr = *frame_sp_p; | ||
157 | + | ||
158 | + /* Pop registers. TODO: make these accesses use the correct | ||
159 | + * attributes and address space (S/NS, priv/unpriv) and handle | ||
160 | + * memory transaction failures. | ||
161 | */ | ||
162 | - env->regs[15] &= ~1U; | ||
163 | + env->regs[0] = ldl_phys(cs->as, frameptr); | ||
164 | + env->regs[1] = ldl_phys(cs->as, frameptr + 0x4); | ||
165 | + env->regs[2] = ldl_phys(cs->as, frameptr + 0x8); | ||
166 | + env->regs[3] = ldl_phys(cs->as, frameptr + 0xc); | ||
167 | + env->regs[12] = ldl_phys(cs->as, frameptr + 0x10); | ||
168 | + env->regs[14] = ldl_phys(cs->as, frameptr + 0x14); | ||
169 | + env->regs[15] = ldl_phys(cs->as, frameptr + 0x18); | ||
170 | + if (env->regs[15] & 1) { | ||
171 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
172 | + "M profile return from interrupt with misaligned " | ||
173 | + "PC is UNPREDICTABLE\n"); | ||
174 | + /* Actual hardware seems to ignore the lsbit, and there are several | ||
175 | + * RTOSes out there which incorrectly assume the r15 in the stack | ||
176 | + * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
177 | + */ | ||
178 | + env->regs[15] &= ~1U; | ||
179 | + } | ||
180 | + xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
181 | + | ||
182 | + /* Commit to consuming the stack frame */ | ||
183 | + frameptr += 0x20; | ||
184 | + /* Undo stack alignment (the SPREALIGN bit indicates that the original | ||
185 | + * pre-exception SP was not 8-aligned and we added a padding word to | ||
186 | + * align it, so we undo this by ORing in the bit that increases it | ||
187 | + * from the current 8-aligned value to the 8-unaligned value. (Adding 4 | ||
188 | + * would work too but a logical OR is how the pseudocode specifies it.) | ||
189 | + */ | ||
190 | + if (xpsr & XPSR_SPREALIGN) { | ||
191 | + frameptr |= 4; | ||
192 | + } | ||
193 | + *frame_sp_p = frameptr; | ||
194 | } | ||
195 | - xpsr = v7m_pop(env); | ||
196 | + /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ | ||
197 | xpsr_write(env, xpsr, ~XPSR_SPREALIGN); | ||
198 | - /* Undo stack alignment. */ | ||
199 | - if (xpsr & XPSR_SPREALIGN) { | ||
200 | - env->regs[13] |= 4; | ||
201 | - } | ||
202 | |||
203 | /* The restored xPSR exception field will be zero if we're | ||
204 | * resuming in Thread mode. If that doesn't match what the | ||
205 | -- | 88 | -- |
206 | 2.7.4 | 89 | 2.20.1 |
207 | 90 | ||
208 | 91 | diff view generated by jsdifflib |
1 | From: Jan Kiszka <jan.kiszka@siemens.com> | 1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> |
---|---|---|---|
2 | 2 | ||
3 | This properly forwards SMC events to EL2 when PSCI is provided by QEMU | 3 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
4 | itself and, thus, ARM_FEATURE_EL3 is off. | 4 | Message-id: 20201002080935.1660005-1-f4bug@amsat.org |
5 | |||
6 | Found and tested with the Jailhouse hypervisor. Solution based on | ||
7 | suggestions by Peter Maydell. | ||
8 | |||
9 | Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> | ||
10 | Message-id: 4f243068-aaea-776f-d18f-f9e05e7be9cd@siemens.com | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 7 | --- |
14 | target/arm/helper.c | 9 ++++++++- | 8 | include/hw/arm/fsl-imx25.h | 2 +- |
15 | target/arm/op_helper.c | 27 +++++++++++++++++---------- | 9 | 1 file changed, 1 insertion(+), 1 deletion(-) |
16 | 2 files changed, 25 insertions(+), 11 deletions(-) | ||
17 | 10 | ||
18 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 11 | diff --git a/include/hw/arm/fsl-imx25.h b/include/hw/arm/fsl-imx25.h |
19 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper.c | 13 | --- a/include/hw/arm/fsl-imx25.h |
21 | +++ b/target/arm/helper.c | 14 | +++ b/include/hw/arm/fsl-imx25.h |
22 | @@ -XXX,XX +XXX,XX @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | 15 | @@ -XXX,XX +XXX,XX @@ struct FslIMX25State { |
23 | 16 | * 0xBB00_0000 0xBB00_0FFF 4 Kbytes NAND flash main area buffer | |
24 | if (arm_feature(env, ARM_FEATURE_EL3)) { | 17 | * 0xBB00_1000 0xBB00_11FF 512 B NAND flash spare area buffer |
25 | valid_mask &= ~HCR_HCD; | 18 | * 0xBB00_1200 0xBB00_1DFF 3 Kbytes Reserved |
26 | - } else { | 19 | - * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control regisers |
27 | + } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { | 20 | + * 0xBB00_1E00 0xBB00_1FFF 512 B NAND flash control registers |
28 | + /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. | 21 | * 0xBB01_2000 0xBFFF_FFFF 96 Mbytes (minus 8 Kbytes) Reserved |
29 | + * However, if we're using the SMC PSCI conduit then QEMU is | 22 | * 0xC000_0000 0xFFFF_FFFF 1024 Mbytes Reserved |
30 | + * effectively acting like EL3 firmware and so the guest at | 23 | */ |
31 | + * EL2 should retain the ability to prevent EL1 from being | ||
32 | + * able to make SMC calls into the ersatz firmware, so in | ||
33 | + * that case HCR.TSC should be read/write. | ||
34 | + */ | ||
35 | valid_mask &= ~HCR_TSC; | ||
36 | } | ||
37 | |||
38 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/target/arm/op_helper.c | ||
41 | +++ b/target/arm/op_helper.c | ||
42 | @@ -XXX,XX +XXX,XX @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) | ||
43 | */ | ||
44 | bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure; | ||
45 | |||
46 | - if (arm_is_psci_call(cpu, EXCP_SMC)) { | ||
47 | - /* If PSCI is enabled and this looks like a valid PSCI call then | ||
48 | - * that overrides the architecturally mandated SMC behaviour. | ||
49 | + if (!arm_feature(env, ARM_FEATURE_EL3) && | ||
50 | + cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { | ||
51 | + /* If we have no EL3 then SMC always UNDEFs and can't be | ||
52 | + * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 | ||
53 | + * firmware within QEMU, and we want an EL2 guest to be able | ||
54 | + * to forbid its EL1 from making PSCI calls into QEMU's | ||
55 | + * "firmware" via HCR.TSC, so for these purposes treat | ||
56 | + * PSCI-via-SMC as implying an EL3. | ||
57 | */ | ||
58 | - return; | ||
59 | - } | ||
60 | - | ||
61 | - if (!arm_feature(env, ARM_FEATURE_EL3)) { | ||
62 | - /* If we have no EL3 then SMC always UNDEFs */ | ||
63 | undef = true; | ||
64 | } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { | ||
65 | - /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ | ||
66 | + /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. | ||
67 | + * We also want an EL2 guest to be able to forbid its EL1 from | ||
68 | + * making PSCI calls into QEMU's "firmware" via HCR.TSC. | ||
69 | + */ | ||
70 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); | ||
71 | } | ||
72 | |||
73 | - if (undef) { | ||
74 | + /* If PSCI is enabled and this looks like a valid PSCI call then | ||
75 | + * suppress the UNDEF -- we'll catch the SMC exception and | ||
76 | + * implement the PSCI call behaviour there. | ||
77 | + */ | ||
78 | + if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) { | ||
79 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), | ||
80 | exception_target_el(env)); | ||
81 | } | ||
82 | -- | 24 | -- |
83 | 2.7.4 | 25 | 2.20.1 |
84 | 26 | ||
85 | 27 | diff view generated by jsdifflib |
1 | For the SG instruction and secure function return we are going | 1 | From: Graeme Gregory <graeme@nuviainc.com> |
---|---|---|---|
2 | to want to do memory accesses using the MMU index of the CPU | ||
3 | in secure state, even though the CPU is currently in non-secure | ||
4 | state. Write arm_v7m_mmu_idx_for_secstate() to do this job, | ||
5 | and use it in cpu_mmu_index(). | ||
6 | 2 | ||
3 | SMMUv3 has an error in a previous patch where an i was transposed to a 1 | ||
4 | meaning interrupts would not have been correctly assigned to the SMMUv3 | ||
5 | instance. | ||
6 | |||
7 | Fixes: 48ba18e6d3f3 ("hw/arm/sbsa-ref: Simplify by moving the gic in the machine state") | ||
8 | Signed-off-by: Graeme Gregory <graeme@nuviainc.com> | ||
9 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
10 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | ||
11 | Message-id: 20201007100732.4103790-2-graeme@nuviainc.com | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 1506092407-26985-17-git-send-email-peter.maydell@linaro.org | ||
11 | --- | 13 | --- |
12 | target/arm/cpu.h | 32 +++++++++++++++++++++----------- | 14 | hw/arm/sbsa-ref.c | 2 +- |
13 | 1 file changed, 21 insertions(+), 11 deletions(-) | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
14 | 16 | ||
15 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 17 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
16 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/cpu.h | 19 | --- a/hw/arm/sbsa-ref.c |
18 | +++ b/target/arm/cpu.h | 20 | +++ b/hw/arm/sbsa-ref.c |
19 | @@ -XXX,XX +XXX,XX @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) | 21 | @@ -XXX,XX +XXX,XX @@ static void create_smmu(const SBSAMachineState *sms, PCIBus *bus) |
22 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); | ||
23 | for (i = 0; i < NUM_SMMU_IRQS; i++) { | ||
24 | sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, | ||
25 | - qdev_get_gpio_in(sms->gic, irq + 1)); | ||
26 | + qdev_get_gpio_in(sms->gic, irq + i)); | ||
20 | } | 27 | } |
21 | } | 28 | } |
22 | 29 | ||
23 | +/* Return the MMU index for a v7M CPU in the specified security state */ | ||
24 | +static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, | ||
25 | + bool secstate) | ||
26 | +{ | ||
27 | + int el = arm_current_el(env); | ||
28 | + ARMMMUIdx mmu_idx; | ||
29 | + | ||
30 | + if (el == 0) { | ||
31 | + mmu_idx = secstate ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | ||
32 | + } else { | ||
33 | + mmu_idx = secstate ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | ||
34 | + } | ||
35 | + | ||
36 | + if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { | ||
37 | + mmu_idx = secstate ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | ||
38 | + } | ||
39 | + | ||
40 | + return mmu_idx; | ||
41 | +} | ||
42 | + | ||
43 | /* Determine the current mmu_idx to use for normal loads/stores */ | ||
44 | static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) | ||
45 | { | ||
46 | int el = arm_current_el(env); | ||
47 | |||
48 | if (arm_feature(env, ARM_FEATURE_M)) { | ||
49 | - ARMMMUIdx mmu_idx; | ||
50 | - | ||
51 | - if (el == 0) { | ||
52 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | ||
53 | - } else { | ||
54 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | ||
55 | - } | ||
56 | - | ||
57 | - if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | ||
58 | - mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | ||
59 | - } | ||
60 | + ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); | ||
61 | |||
62 | return arm_to_core_mmu_idx(mmu_idx); | ||
63 | } | ||
64 | -- | 30 | -- |
65 | 2.7.4 | 31 | 2.20.1 |
66 | 32 | ||
67 | 33 | diff view generated by jsdifflib |
1 | When we added support for the new SHCSR bits in v8M in commit | 1 | From: Graeme Gregory <graeme@nuviainc.com> |
---|---|---|---|
2 | 437d59c17e9 the code to support writing to the new HARDFAULTPENDED | ||
3 | bit was accidentally only added for non-secure writes; the | ||
4 | secure banked version of the bit should also be writable. | ||
5 | 2 | ||
3 | Original commit did not allocate IRQs for the SMMUv3 in the irqmap | ||
4 | effectively using irq 0->3 (shared with other devices). Assuming | ||
5 | original intent was to allocate unique IRQs then add an allocation | ||
6 | to the irqmap. | ||
7 | |||
8 | Fixes: e9fdf453240 ("hw/arm: Add arm SBSA reference machine, devices part") | ||
9 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
10 | Signed-off-by: Graeme Gregory <graeme@nuviainc.com> | ||
11 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | ||
12 | Message-id: 20201007100732.4103790-3-graeme@nuviainc.com | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Message-id: 1506092407-26985-21-git-send-email-peter.maydell@linaro.org | ||
10 | --- | 14 | --- |
11 | hw/intc/armv7m_nvic.c | 1 + | 15 | hw/arm/sbsa-ref.c | 1 + |
12 | 1 file changed, 1 insertion(+) | 16 | 1 file changed, 1 insertion(+) |
13 | 17 | ||
14 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 18 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c |
15 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/intc/armv7m_nvic.c | 20 | --- a/hw/arm/sbsa-ref.c |
17 | +++ b/hw/intc/armv7m_nvic.c | 21 | +++ b/hw/arm/sbsa-ref.c |
18 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | 22 | @@ -XXX,XX +XXX,XX @@ static const int sbsa_ref_irqmap[] = { |
19 | s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0; | 23 | [SBSA_SECURE_UART_MM] = 9, |
20 | s->sec_vectors[ARMV7M_EXCP_USAGE].enabled = | 24 | [SBSA_AHCI] = 10, |
21 | (value & (1 << 18)) != 0; | 25 | [SBSA_EHCI] = 11, |
22 | + s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0; | 26 | + [SBSA_SMMU] = 12, /* ... to 15 */ |
23 | /* SecureFault not banked, but RAZ/WI to NS */ | 27 | }; |
24 | s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0; | 28 | |
25 | s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0; | 29 | static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) |
26 | -- | 30 | -- |
27 | 2.7.4 | 31 | 2.20.1 |
28 | 32 | ||
29 | 33 | diff view generated by jsdifflib |
1 | Add support for v8M and in particular the security extension | 1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> |
---|---|---|---|
2 | to the exception entry code. This requires changes to: | ||
3 | * calculation of the exception-return magic LR value | ||
4 | * push the callee-saves registers in certain cases | ||
5 | * clear registers when taking non-secure exceptions to avoid | ||
6 | leaking information from the interrupted secure code | ||
7 | * switch to the correct security state on entry | ||
8 | * use the vector table for the security state we're targeting | ||
9 | 2 | ||
3 | The "BCM2835 ARM Peripherals" datasheet [*] chapter 2 | ||
4 | ("Auxiliaries: UART1 & SPI1, SPI2"), list the register | ||
5 | sizes as 3/8/16/32 bits. We assume this means this | ||
6 | peripheral allows 8-bit accesses. | ||
7 | |||
8 | This was not an issue until commit 5d971f9e67 which reverted | ||
9 | ("memory: accept mismatching sizes in memory_region_access_valid"). | ||
10 | |||
11 | The model is implemented as 32-bit accesses (see commit 97398d900c, | ||
12 | all registers are 32-bit) so replace MemoryRegionOps.valid as | ||
13 | MemoryRegionOps.impl, and re-introduce MemoryRegionOps.valid | ||
14 | with a 8/32-bit range. | ||
15 | |||
16 | [*] https://www.raspberrypi.org/app/uploads/2012/02/BCM2835-ARM-Peripherals.pdf | ||
17 | |||
18 | Fixes: 97398d900c ("bcm2835_aux: add emulation of BCM2835 AUX (aka UART1) block") | ||
19 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
20 | Message-id: 20201002181032.1899463-1-f4bug@amsat.org | ||
21 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 22 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 1506092407-26985-13-git-send-email-peter.maydell@linaro.org | ||
13 | --- | 23 | --- |
14 | target/arm/helper.c | 165 +++++++++++++++++++++++++++++++++++++++++++++------- | 24 | hw/char/bcm2835_aux.c | 4 +++- |
15 | 1 file changed, 145 insertions(+), 20 deletions(-) | 25 | 1 file changed, 3 insertions(+), 1 deletion(-) |
16 | 26 | ||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 27 | diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c |
18 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper.c | 29 | --- a/hw/char/bcm2835_aux.c |
20 | +++ b/target/arm/helper.c | 30 | +++ b/hw/char/bcm2835_aux.c |
21 | @@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | 31 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps bcm2835_aux_ops = { |
22 | } | 32 | .read = bcm2835_aux_read, |
23 | } | 33 | .write = bcm2835_aux_write, |
24 | 34 | .endianness = DEVICE_NATIVE_ENDIAN, | |
25 | -static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | 35 | - .valid.min_access_size = 4, |
26 | +static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure) | 36 | + .impl.min_access_size = 4, |
27 | { | 37 | + .impl.max_access_size = 4, |
28 | CPUState *cs = CPU(cpu); | 38 | + .valid.min_access_size = 1, |
29 | CPUARMState *env = &cpu->env; | 39 | .valid.max_access_size = 4, |
30 | MemTxResult result; | 40 | }; |
31 | - hwaddr vec = env->v7m.vecbase[env->v7m.secure] + env->v7m.exception * 4; | ||
32 | + hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4; | ||
33 | uint32_t addr; | ||
34 | |||
35 | addr = address_space_ldl(cs->as, vec, | ||
36 | @@ -XXX,XX +XXX,XX @@ static uint32_t arm_v7m_load_vector(ARMCPU *cpu) | ||
37 | * Since we don't model Lockup, we just report this guest error | ||
38 | * via cpu_abort(). | ||
39 | */ | ||
40 | - cpu_abort(cs, "Failed to read from exception vector table " | ||
41 | - "entry %08x\n", (unsigned)vec); | ||
42 | + cpu_abort(cs, "Failed to read from %s exception vector table " | ||
43 | + "entry %08x\n", targets_secure ? "secure" : "nonsecure", | ||
44 | + (unsigned)vec); | ||
45 | } | ||
46 | return addr; | ||
47 | } | ||
48 | |||
49 | -static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | ||
50 | +static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain) | ||
51 | +{ | ||
52 | + /* For v8M, push the callee-saves register part of the stack frame. | ||
53 | + * Compare the v8M pseudocode PushCalleeStack(). | ||
54 | + * In the tailchaining case this may not be the current stack. | ||
55 | + */ | ||
56 | + CPUARMState *env = &cpu->env; | ||
57 | + CPUState *cs = CPU(cpu); | ||
58 | + uint32_t *frame_sp_p; | ||
59 | + uint32_t frameptr; | ||
60 | + | ||
61 | + if (dotailchain) { | ||
62 | + frame_sp_p = get_v7m_sp_ptr(env, true, | ||
63 | + lr & R_V7M_EXCRET_MODE_MASK, | ||
64 | + lr & R_V7M_EXCRET_SPSEL_MASK); | ||
65 | + } else { | ||
66 | + frame_sp_p = &env->regs[13]; | ||
67 | + } | ||
68 | + | ||
69 | + frameptr = *frame_sp_p - 0x28; | ||
70 | + | ||
71 | + stl_phys(cs->as, frameptr, 0xfefa125b); | ||
72 | + stl_phys(cs->as, frameptr + 0x8, env->regs[4]); | ||
73 | + stl_phys(cs->as, frameptr + 0xc, env->regs[5]); | ||
74 | + stl_phys(cs->as, frameptr + 0x10, env->regs[6]); | ||
75 | + stl_phys(cs->as, frameptr + 0x14, env->regs[7]); | ||
76 | + stl_phys(cs->as, frameptr + 0x18, env->regs[8]); | ||
77 | + stl_phys(cs->as, frameptr + 0x1c, env->regs[9]); | ||
78 | + stl_phys(cs->as, frameptr + 0x20, env->regs[10]); | ||
79 | + stl_phys(cs->as, frameptr + 0x24, env->regs[11]); | ||
80 | + | ||
81 | + *frame_sp_p = frameptr; | ||
82 | +} | ||
83 | + | ||
84 | +static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain) | ||
85 | { | ||
86 | /* Do the "take the exception" parts of exception entry, | ||
87 | * but not the pushing of state to the stack. This is | ||
88 | @@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | ||
89 | */ | ||
90 | CPUARMState *env = &cpu->env; | ||
91 | uint32_t addr; | ||
92 | + bool targets_secure; | ||
93 | + | ||
94 | + targets_secure = armv7m_nvic_acknowledge_irq(env->nvic); | ||
95 | |||
96 | - armv7m_nvic_acknowledge_irq(env->nvic); | ||
97 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
98 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY) && | ||
99 | + (lr & R_V7M_EXCRET_S_MASK)) { | ||
100 | + /* The background code (the owner of the registers in the | ||
101 | + * exception frame) is Secure. This means it may either already | ||
102 | + * have or now needs to push callee-saves registers. | ||
103 | + */ | ||
104 | + if (targets_secure) { | ||
105 | + if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { | ||
106 | + /* We took an exception from Secure to NonSecure | ||
107 | + * (which means the callee-saved registers got stacked) | ||
108 | + * and are now tailchaining to a Secure exception. | ||
109 | + * Clear DCRS so eventual return from this Secure | ||
110 | + * exception unstacks the callee-saved registers. | ||
111 | + */ | ||
112 | + lr &= ~R_V7M_EXCRET_DCRS_MASK; | ||
113 | + } | ||
114 | + } else { | ||
115 | + /* We're going to a non-secure exception; push the | ||
116 | + * callee-saves registers to the stack now, if they're | ||
117 | + * not already saved. | ||
118 | + */ | ||
119 | + if (lr & R_V7M_EXCRET_DCRS_MASK && | ||
120 | + !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { | ||
121 | + v7m_push_callee_stack(cpu, lr, dotailchain); | ||
122 | + } | ||
123 | + lr |= R_V7M_EXCRET_DCRS_MASK; | ||
124 | + } | ||
125 | + } | ||
126 | + | ||
127 | + lr &= ~R_V7M_EXCRET_ES_MASK; | ||
128 | + if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
129 | + lr |= R_V7M_EXCRET_ES_MASK; | ||
130 | + } | ||
131 | + lr &= ~R_V7M_EXCRET_SPSEL_MASK; | ||
132 | + if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { | ||
133 | + lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
134 | + } | ||
135 | + | ||
136 | + /* Clear registers if necessary to prevent non-secure exception | ||
137 | + * code being able to see register values from secure code. | ||
138 | + * Where register values become architecturally UNKNOWN we leave | ||
139 | + * them with their previous values. | ||
140 | + */ | ||
141 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
142 | + if (!targets_secure) { | ||
143 | + /* Always clear the caller-saved registers (they have been | ||
144 | + * pushed to the stack earlier in v7m_push_stack()). | ||
145 | + * Clear callee-saved registers if the background code is | ||
146 | + * Secure (in which case these regs were saved in | ||
147 | + * v7m_push_callee_stack()). | ||
148 | + */ | ||
149 | + int i; | ||
150 | + | ||
151 | + for (i = 0; i < 13; i++) { | ||
152 | + /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ | ||
153 | + if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { | ||
154 | + env->regs[i] = 0; | ||
155 | + } | ||
156 | + } | ||
157 | + /* Clear EAPSR */ | ||
158 | + xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); | ||
159 | + } | ||
160 | + } | ||
161 | + } | ||
162 | + | ||
163 | + /* Switch to target security state -- must do this before writing SPSEL */ | ||
164 | + switch_v7m_security_state(env, targets_secure); | ||
165 | write_v7m_control_spsel(env, 0); | ||
166 | arm_clear_exclusive(env); | ||
167 | /* Clear IT bits */ | ||
168 | env->condexec_bits = 0; | ||
169 | env->regs[14] = lr; | ||
170 | - addr = arm_v7m_load_vector(cpu); | ||
171 | + addr = arm_v7m_load_vector(cpu, targets_secure); | ||
172 | env->regs[15] = addr & 0xfffffffe; | ||
173 | env->thumb = addr & 1; | ||
174 | } | ||
175 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
176 | if (sfault) { | ||
177 | env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; | ||
178 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
179 | - v7m_exception_taken(cpu, excret); | ||
180 | + v7m_exception_taken(cpu, excret, true); | ||
181 | qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
182 | "stackframe: failed EXC_RETURN.ES validity check\n"); | ||
183 | return; | ||
184 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
185 | */ | ||
186 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
187 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); | ||
188 | - v7m_exception_taken(cpu, excret); | ||
189 | + v7m_exception_taken(cpu, excret, true); | ||
190 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
191 | "stackframe: failed exception return integrity check\n"); | ||
192 | return; | ||
193 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
194 | /* Take a SecureFault on the current stack */ | ||
195 | env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; | ||
196 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
197 | - v7m_exception_taken(cpu, excret); | ||
198 | + v7m_exception_taken(cpu, excret, true); | ||
199 | qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
200 | "stackframe: failed exception return integrity " | ||
201 | "signature check\n"); | ||
202 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
203 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | ||
204 | env->v7m.secure); | ||
205 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
206 | - v7m_exception_taken(cpu, excret); | ||
207 | + v7m_exception_taken(cpu, excret, true); | ||
208 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
209 | "stackframe: failed exception return integrity " | ||
210 | "check\n"); | ||
211 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
212 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); | ||
213 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
214 | v7m_push_stack(cpu); | ||
215 | - v7m_exception_taken(cpu, excret); | ||
216 | + v7m_exception_taken(cpu, excret, false); | ||
217 | qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " | ||
218 | "failed exception return integrity check\n"); | ||
219 | return; | ||
220 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
221 | return; /* Never happens. Keep compiler happy. */ | ||
222 | } | ||
223 | |||
224 | - lr = R_V7M_EXCRET_RES1_MASK | | ||
225 | - R_V7M_EXCRET_S_MASK | | ||
226 | - R_V7M_EXCRET_DCRS_MASK | | ||
227 | - R_V7M_EXCRET_FTYPE_MASK | | ||
228 | - R_V7M_EXCRET_ES_MASK; | ||
229 | - if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) { | ||
230 | - lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
231 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
232 | + lr = R_V7M_EXCRET_RES1_MASK | | ||
233 | + R_V7M_EXCRET_DCRS_MASK | | ||
234 | + R_V7M_EXCRET_FTYPE_MASK; | ||
235 | + /* The S bit indicates whether we should return to Secure | ||
236 | + * or NonSecure (ie our current state). | ||
237 | + * The ES bit indicates whether we're taking this exception | ||
238 | + * to Secure or NonSecure (ie our target state). We set it | ||
239 | + * later, in v7m_exception_taken(). | ||
240 | + * The SPSEL bit is also set in v7m_exception_taken() for v8M. | ||
241 | + * This corresponds to the ARM ARM pseudocode for v8M setting | ||
242 | + * some LR bits in PushStack() and some in ExceptionTaken(); | ||
243 | + * the distinction matters for the tailchain cases where we | ||
244 | + * can take an exception without pushing the stack. | ||
245 | + */ | ||
246 | + if (env->v7m.secure) { | ||
247 | + lr |= R_V7M_EXCRET_S_MASK; | ||
248 | + } | ||
249 | + } else { | ||
250 | + lr = R_V7M_EXCRET_RES1_MASK | | ||
251 | + R_V7M_EXCRET_S_MASK | | ||
252 | + R_V7M_EXCRET_DCRS_MASK | | ||
253 | + R_V7M_EXCRET_FTYPE_MASK | | ||
254 | + R_V7M_EXCRET_ES_MASK; | ||
255 | + if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { | ||
256 | + lr |= R_V7M_EXCRET_SPSEL_MASK; | ||
257 | + } | ||
258 | } | ||
259 | if (!arm_v7m_is_handler_mode(env)) { | ||
260 | lr |= R_V7M_EXCRET_MODE_MASK; | ||
261 | } | ||
262 | |||
263 | v7m_push_stack(cpu); | ||
264 | - v7m_exception_taken(cpu, lr); | ||
265 | + v7m_exception_taken(cpu, lr, false); | ||
266 | qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); | ||
267 | } | ||
268 | 41 | ||
269 | -- | 42 | -- |
270 | 2.7.4 | 43 | 2.20.1 |
271 | 44 | ||
272 | 45 | diff view generated by jsdifflib |
1 | On exception return for v8M, the SPSEL bit in the EXC_RETURN magic | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | value should be restored to the SPSEL bit in the CONTROL register | ||
3 | banked specified by the EXC_RETURN.ES bit. | ||
4 | 2 | ||
5 | Add write_v7m_control_spsel_for_secstate() which behaves like | 3 | Update against Linux 5.9-rc7. |
6 | write_v7m_control_spsel() but allows the caller to specify which | ||
7 | CONTROL bank to use, reimplement write_v7m_control_spsel() in | ||
8 | terms of it, and use it in exception return. | ||
9 | 4 | ||
5 | Cc: Paolo Bonzini <pbonzini@redhat.com> | ||
6 | Signed-off-by: Andrew Jones <drjones@redhat.com> | ||
7 | Message-id: 20201001061718.101915-2-drjones@redhat.com | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | Message-id: 1506092407-26985-6-git-send-email-peter.maydell@linaro.org | ||
13 | --- | 9 | --- |
14 | target/arm/helper.c | 40 +++++++++++++++++++++++++++------------- | 10 | linux-headers/linux/kvm.h | 6 ++++-- |
15 | 1 file changed, 27 insertions(+), 13 deletions(-) | 11 | 1 file changed, 4 insertions(+), 2 deletions(-) |
16 | 12 | ||
17 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 13 | diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h |
18 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/target/arm/helper.c | 15 | --- a/linux-headers/linux/kvm.h |
20 | +++ b/target/arm/helper.c | 16 | +++ b/linux-headers/linux/kvm.h |
21 | @@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env) | 17 | @@ -XXX,XX +XXX,XX @@ struct kvm_ppc_resize_hpt { |
22 | env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; | 18 | #define KVM_VM_PPC_HV 1 |
23 | } | 19 | #define KVM_VM_PPC_PR 2 |
24 | 20 | ||
25 | -/* Write to v7M CONTROL.SPSEL bit. This may change the current | 21 | -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ |
26 | - * stack pointer between Main and Process stack pointers. | 22 | -#define KVM_VM_MIPS_TE 0 |
27 | +/* Write to v7M CONTROL.SPSEL bit for the specified security bank. | 23 | +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ |
28 | + * This may change the current stack pointer between Main and Process | 24 | +#define KVM_VM_MIPS_AUTO 0 |
29 | + * stack pointers if it is done for the CONTROL register for the current | 25 | #define KVM_VM_MIPS_VZ 1 |
30 | + * security state. | 26 | +#define KVM_VM_MIPS_TE 2 |
31 | */ | 27 | |
32 | -static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | 28 | #define KVM_S390_SIE_PAGE_OFFSET 1 |
33 | +static void write_v7m_control_spsel_for_secstate(CPUARMState *env, | 29 | |
34 | + bool new_spsel, | 30 | @@ -XXX,XX +XXX,XX @@ struct kvm_ppc_resize_hpt { |
35 | + bool secstate) | 31 | #define KVM_CAP_LAST_CPU 184 |
36 | { | 32 | #define KVM_CAP_SMALLER_MAXPHYADDR 185 |
37 | - uint32_t tmp; | 33 | #define KVM_CAP_S390_DIAG318 186 |
38 | - bool new_is_psp, old_is_psp = v7m_using_psp(env); | 34 | +#define KVM_CAP_STEAL_TIME 187 |
39 | + bool old_is_psp = v7m_using_psp(env); | 35 | |
40 | 36 | #ifdef KVM_CAP_IRQ_ROUTING | |
41 | - env->v7m.control[env->v7m.secure] = | ||
42 | - deposit32(env->v7m.control[env->v7m.secure], | ||
43 | + env->v7m.control[secstate] = | ||
44 | + deposit32(env->v7m.control[secstate], | ||
45 | R_V7M_CONTROL_SPSEL_SHIFT, | ||
46 | R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | ||
47 | |||
48 | - new_is_psp = v7m_using_psp(env); | ||
49 | + if (secstate == env->v7m.secure) { | ||
50 | + bool new_is_psp = v7m_using_psp(env); | ||
51 | + uint32_t tmp; | ||
52 | |||
53 | - if (old_is_psp != new_is_psp) { | ||
54 | - tmp = env->v7m.other_sp; | ||
55 | - env->v7m.other_sp = env->regs[13]; | ||
56 | - env->regs[13] = tmp; | ||
57 | + if (old_is_psp != new_is_psp) { | ||
58 | + tmp = env->v7m.other_sp; | ||
59 | + env->v7m.other_sp = env->regs[13]; | ||
60 | + env->regs[13] = tmp; | ||
61 | + } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | +/* Write to v7M CONTROL.SPSEL bit. This may change the current | ||
66 | + * stack pointer between Main and Process stack pointers. | ||
67 | + */ | ||
68 | +static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | ||
69 | +{ | ||
70 | + write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); | ||
71 | +} | ||
72 | + | ||
73 | void write_v7m_exception(CPUARMState *env, uint32_t new_exc) | ||
74 | { | ||
75 | /* Write a new value to v7m.exception, thus transitioning into or out | ||
76 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
77 | * Handler mode (and will be until we write the new XPSR.Interrupt | ||
78 | * field) this does not switch around the current stack pointer. | ||
79 | */ | ||
80 | - write_v7m_control_spsel(env, return_to_sp_process); | ||
81 | + write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); | ||
82 | |||
83 | switch_v7m_security_state(env, return_to_secure); | ||
84 | 37 | ||
85 | -- | 38 | -- |
86 | 2.7.4 | 39 | 2.20.1 |
87 | 40 | ||
88 | 41 | diff view generated by jsdifflib |
1 | Implement the security attribute lookups for memory accesses | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | in the get_phys_addr() functions, causing these to generate | ||
3 | various kinds of SecureFault for bad accesses. | ||
4 | 2 | ||
5 | The major subtlety in this code relates to handling of the | 3 | When we compile without KVM support !defined(CONFIG_KVM) we generate |
6 | case when the security attributes the SAU assigns to the | 4 | stubs for functions that the linker will still encounter. Sometimes |
7 | address don't match the current security state of the CPU. | 5 | these stubs can be executed safely and are placed in paths where they |
6 | get executed with or without KVM. Other functions should never be | ||
7 | called without KVM. Those functions should be guarded by kvm_enabled(), | ||
8 | but should also be robust to refactoring mistakes. Putting a | ||
9 | g_assert_not_reached() in the function should help. Additionally, | ||
10 | the g_assert_not_reached() calls may actually help the linker remove | ||
11 | some code. | ||
8 | 12 | ||
9 | In the ARM ARM pseudocode for validating instruction | 13 | We remove the stubs for kvm_arm_get/put_virtual_time(), as they aren't |
10 | accesses, the security attributes of the address determine | 14 | necessary at all - the only caller is in kvm.c |
11 | whether the Secure or NonSecure MPU state is used. At face | ||
12 | value, handling this would require us to encode the relevant | ||
13 | bits of state into mmu_idx for both S and NS at once, which | ||
14 | would result in our needing 16 mmu indexes. Fortunately we | ||
15 | don't actually need to do this because a mismatch between | ||
16 | address attributes and CPU state means either: | ||
17 | * some kind of fault (usually a SecureFault, but in theory | ||
18 | perhaps a UserFault for unaligned access to Device memory) | ||
19 | * execution of the SG instruction in NS state from a | ||
20 | Secure & NonSecure code region | ||
21 | 15 | ||
22 | The purpose of SG is simply to flip the CPU into Secure | 16 | Reviewed-by: Eric Auger <eric.auger@redhat.com> |
23 | state, so we can handle it by emulating execution of that | 17 | Signed-off-by: Andrew Jones <drjones@redhat.com> |
24 | instruction directly in arm_v7m_cpu_do_interrupt(), which | 18 | Message-id: 20201001061718.101915-3-drjones@redhat.com |
25 | means we can treat all the mismatch cases as "throw an | 19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
26 | exception" and we don't need to encode the state of the | 20 | --- |
27 | other MPU bank into our mmu_idx values. | 21 | target/arm/kvm_arm.h | 51 +++++++++++++++++++++++++++----------------- |
22 | 1 file changed, 32 insertions(+), 19 deletions(-) | ||
28 | 23 | ||
29 | This commit doesn't include the actual emulation of SG; | 24 | diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h |
30 | it also doesn't include implementation of the IDAU, which | ||
31 | is a per-board way to specify hard-coded memory attributes | ||
32 | for addresses, which override the CPU-internal SAU if they | ||
33 | specify a more secure setting than the SAU is programmed to. | ||
34 | |||
35 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
36 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
37 | Message-id: 1506092407-26985-15-git-send-email-peter.maydell@linaro.org | ||
38 | --- | ||
39 | target/arm/internals.h | 15 ++++ | ||
40 | target/arm/helper.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++++- | ||
41 | 2 files changed, 195 insertions(+), 2 deletions(-) | ||
42 | |||
43 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
44 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/target/arm/internals.h | 26 | --- a/target/arm/kvm_arm.h |
46 | +++ b/target/arm/internals.h | 27 | +++ b/target/arm/kvm_arm.h |
47 | @@ -XXX,XX +XXX,XX @@ FIELD(V7M_EXCRET, DCRS, 5, 1) | 28 | @@ -XXX,XX +XXX,XX @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); |
48 | FIELD(V7M_EXCRET, S, 6, 1) | 29 | |
49 | FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ | 30 | #else |
50 | 31 | ||
51 | +/* We use a few fake FSR values for internal purposes in M profile. | 32 | -static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) |
52 | + * M profile cores don't have A/R format FSRs, but currently our | 33 | -{ |
53 | + * get_phys_addr() code assumes A/R profile and reports failures via | 34 | - /* |
54 | + * an A/R format FSR value. We then translate that into the proper | 35 | - * This should never actually be called in the "not KVM" case, |
55 | + * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). | 36 | - * but set up the fields to indicate an error anyway. |
56 | + * Mostly the FSR values we use for this are those defined for v7PMSA, | 37 | - */ |
57 | + * since we share some of that codepath. A few kinds of fault are | 38 | - cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; |
58 | + * only for M profile and have no A/R equivalent, though, so we have | 39 | - cpu->host_cpu_probe_failed = true; |
59 | + * to pick a value from the reserved range (which we never otherwise | 40 | -} |
60 | + * generate) to use for these. | 41 | - |
61 | + * These values will never be visible to the guest. | 42 | -static inline void kvm_arm_add_vcpu_properties(Object *obj) {} |
43 | - | ||
44 | +/* | ||
45 | + * It's safe to call these functions without KVM support. | ||
46 | + * They should either do nothing or return "not supported". | ||
62 | + */ | 47 | + */ |
63 | +#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ | 48 | static inline bool kvm_arm_aarch32_supported(void) |
64 | +#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ | 49 | { |
65 | + | 50 | return false; |
66 | /* | 51 | @@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_sve_supported(void) |
67 | * For AArch64, map a given EL to an index in the banked_spsr array. | 52 | return false; |
68 | * Note that this mapping and the AArch32 mapping defined in bank_number() | ||
69 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
70 | index XXXXXXX..XXXXXXX 100644 | ||
71 | --- a/target/arm/helper.c | ||
72 | +++ b/target/arm/helper.c | ||
73 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, | ||
74 | target_ulong *page_size_ptr, uint32_t *fsr, | ||
75 | ARMMMUFaultInfo *fi); | ||
76 | |||
77 | +/* Security attributes for an address, as returned by v8m_security_lookup. */ | ||
78 | +typedef struct V8M_SAttributes { | ||
79 | + bool ns; | ||
80 | + bool nsc; | ||
81 | + uint8_t sregion; | ||
82 | + bool srvalid; | ||
83 | + uint8_t iregion; | ||
84 | + bool irvalid; | ||
85 | +} V8M_SAttributes; | ||
86 | + | ||
87 | /* Definitions for the PMCCNTR and PMCR registers */ | ||
88 | #define PMCRD 0x8 | ||
89 | #define PMCRC 0x4 | ||
90 | @@ -XXX,XX +XXX,XX @@ void arm_v7m_cpu_do_interrupt(CPUState *cs) | ||
91 | * raises the fault, in the A profile short-descriptor format. | ||
92 | */ | ||
93 | switch (env->exception.fsr & 0xf) { | ||
94 | + case M_FAKE_FSR_NSC_EXEC: | ||
95 | + /* Exception generated when we try to execute code at an address | ||
96 | + * which is marked as Secure & Non-Secure Callable and the CPU | ||
97 | + * is in the Non-Secure state. The only instruction which can | ||
98 | + * be executed like this is SG (and that only if both halves of | ||
99 | + * the SG instruction have the same security attributes.) | ||
100 | + * Everything else must generate an INVEP SecureFault, so we | ||
101 | + * emulate the SG instruction here. | ||
102 | + * TODO: actually emulate SG. | ||
103 | + */ | ||
104 | + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; | ||
105 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
106 | + qemu_log_mask(CPU_LOG_INT, | ||
107 | + "...really SecureFault with SFSR.INVEP\n"); | ||
108 | + break; | ||
109 | + case M_FAKE_FSR_SFAULT: | ||
110 | + /* Various flavours of SecureFault for attempts to execute or | ||
111 | + * access data in the wrong security state. | ||
112 | + */ | ||
113 | + switch (cs->exception_index) { | ||
114 | + case EXCP_PREFETCH_ABORT: | ||
115 | + if (env->v7m.secure) { | ||
116 | + env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; | ||
117 | + qemu_log_mask(CPU_LOG_INT, | ||
118 | + "...really SecureFault with SFSR.INVTRAN\n"); | ||
119 | + } else { | ||
120 | + env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; | ||
121 | + qemu_log_mask(CPU_LOG_INT, | ||
122 | + "...really SecureFault with SFSR.INVEP\n"); | ||
123 | + } | ||
124 | + break; | ||
125 | + case EXCP_DATA_ABORT: | ||
126 | + /* This must be an NS access to S memory */ | ||
127 | + env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; | ||
128 | + qemu_log_mask(CPU_LOG_INT, | ||
129 | + "...really SecureFault with SFSR.AUVIOL\n"); | ||
130 | + break; | ||
131 | + } | ||
132 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
133 | + break; | ||
134 | case 0x8: /* External Abort */ | ||
135 | switch (cs->exception_index) { | ||
136 | case EXCP_PREFETCH_ABORT: | ||
137 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, | ||
138 | return !(*prot & (1 << access_type)); | ||
139 | } | 53 | } |
140 | 54 | ||
141 | +static bool v8m_is_sau_exempt(CPUARMState *env, | 55 | +/* |
142 | + uint32_t address, MMUAccessType access_type) | 56 | + * These functions should never actually be called without KVM support. |
57 | + */ | ||
58 | +static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) | ||
143 | +{ | 59 | +{ |
144 | + /* The architecture specifies that certain address ranges are | 60 | + g_assert_not_reached(); |
145 | + * exempt from v8M SAU/IDAU checks. | ||
146 | + */ | ||
147 | + return | ||
148 | + (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || | ||
149 | + (address >= 0xe0000000 && address <= 0xe0002fff) || | ||
150 | + (address >= 0xe000e000 && address <= 0xe000efff) || | ||
151 | + (address >= 0xe002e000 && address <= 0xe002efff) || | ||
152 | + (address >= 0xe0040000 && address <= 0xe0041fff) || | ||
153 | + (address >= 0xe00ff000 && address <= 0xe00fffff); | ||
154 | +} | 61 | +} |
155 | + | 62 | + |
156 | +static void v8m_security_lookup(CPUARMState *env, uint32_t address, | 63 | +static inline void kvm_arm_add_vcpu_properties(Object *obj) |
157 | + MMUAccessType access_type, ARMMMUIdx mmu_idx, | ||
158 | + V8M_SAttributes *sattrs) | ||
159 | +{ | 64 | +{ |
160 | + /* Look up the security attributes for this address. Compare the | 65 | + g_assert_not_reached(); |
161 | + * pseudocode SecurityCheck() function. | ||
162 | + * We assume the caller has zero-initialized *sattrs. | ||
163 | + */ | ||
164 | + ARMCPU *cpu = arm_env_get_cpu(env); | ||
165 | + int r; | ||
166 | + | ||
167 | + /* TODO: implement IDAU */ | ||
168 | + | ||
169 | + if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { | ||
170 | + /* 0xf0000000..0xffffffff is always S for insn fetches */ | ||
171 | + return; | ||
172 | + } | ||
173 | + | ||
174 | + if (v8m_is_sau_exempt(env, address, access_type)) { | ||
175 | + sattrs->ns = !regime_is_secure(env, mmu_idx); | ||
176 | + return; | ||
177 | + } | ||
178 | + | ||
179 | + switch (env->sau.ctrl & 3) { | ||
180 | + case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ | ||
181 | + break; | ||
182 | + case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ | ||
183 | + sattrs->ns = true; | ||
184 | + break; | ||
185 | + default: /* SAU.ENABLE == 1 */ | ||
186 | + for (r = 0; r < cpu->sau_sregion; r++) { | ||
187 | + if (env->sau.rlar[r] & 1) { | ||
188 | + uint32_t base = env->sau.rbar[r] & ~0x1f; | ||
189 | + uint32_t limit = env->sau.rlar[r] | 0x1f; | ||
190 | + | ||
191 | + if (base <= address && limit >= address) { | ||
192 | + if (sattrs->srvalid) { | ||
193 | + /* If we hit in more than one region then we must report | ||
194 | + * as Secure, not NS-Callable, with no valid region | ||
195 | + * number info. | ||
196 | + */ | ||
197 | + sattrs->ns = false; | ||
198 | + sattrs->nsc = false; | ||
199 | + sattrs->sregion = 0; | ||
200 | + sattrs->srvalid = false; | ||
201 | + break; | ||
202 | + } else { | ||
203 | + if (env->sau.rlar[r] & 2) { | ||
204 | + sattrs->nsc = true; | ||
205 | + } else { | ||
206 | + sattrs->ns = true; | ||
207 | + } | ||
208 | + sattrs->srvalid = true; | ||
209 | + sattrs->sregion = r; | ||
210 | + } | ||
211 | + } | ||
212 | + } | ||
213 | + } | ||
214 | + | ||
215 | + /* TODO when we support the IDAU then it may override the result here */ | ||
216 | + break; | ||
217 | + } | ||
218 | +} | 66 | +} |
219 | + | 67 | + |
220 | static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, | 68 | static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms) |
221 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | ||
222 | - hwaddr *phys_ptr, int *prot, uint32_t *fsr) | ||
223 | + hwaddr *phys_ptr, MemTxAttrs *txattrs, | ||
224 | + int *prot, uint32_t *fsr) | ||
225 | { | 69 | { |
226 | ARMCPU *cpu = arm_env_get_cpu(env); | 70 | - return -ENOENT; |
227 | bool is_user = regime_is_user(env, mmu_idx); | 71 | + g_assert_not_reached(); |
228 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, | 72 | } |
229 | int n; | 73 | |
230 | int matchregion = -1; | 74 | static inline int kvm_arm_vgic_probe(void) |
231 | bool hit = false; | 75 | { |
232 | + V8M_SAttributes sattrs = {}; | 76 | - return 0; |
233 | 77 | + g_assert_not_reached(); | |
234 | *phys_ptr = address; | 78 | } |
235 | *prot = 0; | 79 | |
236 | 80 | -static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) {} | |
237 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | 81 | -static inline void kvm_arm_pmu_init(CPUState *cs) {} |
238 | + v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); | 82 | +static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) |
239 | + if (access_type == MMU_INST_FETCH) { | 83 | +{ |
240 | + /* Instruction fetches always use the MMU bank and the | 84 | + g_assert_not_reached(); |
241 | + * transaction attribute determined by the fetch address, | 85 | +} |
242 | + * regardless of CPU state. This is painful for QEMU | 86 | |
243 | + * to handle, because it would mean we need to encode | 87 | -static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) {} |
244 | + * into the mmu_idx not just the (user, negpri) information | 88 | +static inline void kvm_arm_pmu_init(CPUState *cs) |
245 | + * for the current security state but also that for the | 89 | +{ |
246 | + * other security state, which would balloon the number | 90 | + g_assert_not_reached(); |
247 | + * of mmu_idx values needed alarmingly. | 91 | +} |
248 | + * Fortunately we can avoid this because it's not actually | ||
249 | + * possible to arbitrarily execute code from memory with | ||
250 | + * the wrong security attribute: it will always generate | ||
251 | + * an exception of some kind or another, apart from the | ||
252 | + * special case of an NS CPU executing an SG instruction | ||
253 | + * in S&NSC memory. So we always just fail the translation | ||
254 | + * here and sort things out in the exception handler | ||
255 | + * (including possibly emulating an SG instruction). | ||
256 | + */ | ||
257 | + if (sattrs.ns != !secure) { | ||
258 | + *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT; | ||
259 | + return true; | ||
260 | + } | ||
261 | + } else { | ||
262 | + /* For data accesses we always use the MMU bank indicated | ||
263 | + * by the current CPU state, but the security attributes | ||
264 | + * might downgrade a secure access to nonsecure. | ||
265 | + */ | ||
266 | + if (sattrs.ns) { | ||
267 | + txattrs->secure = false; | ||
268 | + } else if (!secure) { | ||
269 | + /* NS access to S memory must fault. | ||
270 | + * Architecturally we should first check whether the | ||
271 | + * MPU information for this address indicates that we | ||
272 | + * are doing an unaligned access to Device memory, which | ||
273 | + * should generate a UsageFault instead. QEMU does not | ||
274 | + * currently check for that kind of unaligned access though. | ||
275 | + * If we added it we would need to do so as a special case | ||
276 | + * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). | ||
277 | + */ | ||
278 | + *fsr = M_FAKE_FSR_SFAULT; | ||
279 | + return true; | ||
280 | + } | ||
281 | + } | ||
282 | + } | ||
283 | + | 92 | + |
284 | /* Unlike the ARM ARM pseudocode, we don't need to check whether this | 93 | +static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) |
285 | * was an exception vector read from the vector table (which is always | 94 | +{ |
286 | * done using the default system address map), because those accesses | 95 | + g_assert_not_reached(); |
287 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, | 96 | +} |
288 | if (arm_feature(env, ARM_FEATURE_V8)) { | 97 | |
289 | /* PMSAv8 */ | 98 | -static inline void kvm_arm_get_virtual_time(CPUState *cs) {} |
290 | ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, | 99 | -static inline void kvm_arm_put_virtual_time(CPUState *cs) {} |
291 | - phys_ptr, prot, fsr); | 100 | #endif |
292 | + phys_ptr, attrs, prot, fsr); | 101 | |
293 | } else if (arm_feature(env, ARM_FEATURE_V7)) { | 102 | static inline const char *gic_class_name(void) |
294 | /* PMSAv7 */ | ||
295 | ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, | ||
296 | -- | 103 | -- |
297 | 2.7.4 | 104 | 2.20.1 |
298 | 105 | ||
299 | 106 | diff view generated by jsdifflib |
1 | In the v7M architecture, there is an invariant that if the CPU is | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | in Handler mode then the CONTROL.SPSEL bit cannot be nonzero. | ||
3 | This in turn means that the current stack pointer is always | ||
4 | indicated by CONTROL.SPSEL, even though Handler mode always uses | ||
5 | the Main stack pointer. | ||
6 | 2 | ||
7 | In v8M, this invariant is removed, and CONTROL.SPSEL may now | 3 | We'll add more to this new function in coming patches so we also |
8 | be nonzero in Handler mode (though Handler mode still always | 4 | state the gic must be created and call it below create_gic(). |
9 | uses the Main stack pointer). In preparation for this change, | ||
10 | change how we handle this bit: rename switch_v7m_sp() to | ||
11 | the now more accurate write_v7m_control_spsel(), and make it | ||
12 | check both the handler mode state and the SPSEL bit. | ||
13 | 5 | ||
14 | Note that this implicitly changes the point at which we switch | 6 | No functional change intended. |
15 | active SP on exception exit from before we pop the exception | ||
16 | frame to after it. | ||
17 | 7 | ||
8 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | ||
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Andrew Jones <drjones@redhat.com> | ||
11 | Message-id: 20201001061718.101915-4-drjones@redhat.com | ||
18 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
19 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
20 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
21 | Message-id: 1506092407-26985-4-git-send-email-peter.maydell@linaro.org | ||
22 | --- | 13 | --- |
23 | target/arm/cpu.h | 8 ++++++- | 14 | hw/arm/virt.c | 43 +++++++++++++++++++++++++++---------------- |
24 | hw/intc/armv7m_nvic.c | 2 +- | 15 | 1 file changed, 27 insertions(+), 16 deletions(-) |
25 | target/arm/helper.c | 65 ++++++++++++++++++++++++++++++++++----------------- | ||
26 | 3 files changed, 51 insertions(+), 24 deletions(-) | ||
27 | 16 | ||
28 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 17 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c |
29 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/target/arm/cpu.h | 19 | --- a/hw/arm/virt.c |
31 | +++ b/target/arm/cpu.h | 20 | +++ b/hw/arm/virt.c |
32 | @@ -XXX,XX +XXX,XX @@ void pmccntr_sync(CPUARMState *env); | 21 | @@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms) |
33 | #define PSTATE_MODE_EL1t 4 | ||
34 | #define PSTATE_MODE_EL0t 0 | ||
35 | |||
36 | +/* Write a new value to v7m.exception, thus transitioning into or out | ||
37 | + * of Handler mode; this may result in a change of active stack pointer. | ||
38 | + */ | ||
39 | +void write_v7m_exception(CPUARMState *env, uint32_t new_exc); | ||
40 | + | ||
41 | /* Map EL and handler into a PSTATE_MODE. */ | ||
42 | static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) | ||
43 | { | ||
44 | @@ -XXX,XX +XXX,XX @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) | ||
45 | env->condexec_bits |= (val >> 8) & 0xfc; | ||
46 | } | ||
47 | if (mask & XPSR_EXCP) { | ||
48 | - env->v7m.exception = val & XPSR_EXCP; | ||
49 | + /* Note that this only happens on exception exit */ | ||
50 | + write_v7m_exception(env, val & XPSR_EXCP); | ||
51 | } | 22 | } |
52 | } | 23 | } |
53 | 24 | ||
54 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 25 | +/* |
55 | index XXXXXXX..XXXXXXX 100644 | 26 | + * virt_cpu_post_init() must be called after the CPUs have |
56 | --- a/hw/intc/armv7m_nvic.c | 27 | + * been realized and the GIC has been created. |
57 | +++ b/hw/intc/armv7m_nvic.c | ||
58 | @@ -XXX,XX +XXX,XX @@ bool armv7m_nvic_acknowledge_irq(void *opaque) | ||
59 | vec->active = 1; | ||
60 | vec->pending = 0; | ||
61 | |||
62 | - env->v7m.exception = s->vectpending; | ||
63 | + write_v7m_exception(env, s->vectpending); | ||
64 | |||
65 | nvic_irq_update(s); | ||
66 | |||
67 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
68 | index XXXXXXX..XXXXXXX 100644 | ||
69 | --- a/target/arm/helper.c | ||
70 | +++ b/target/arm/helper.c | ||
71 | @@ -XXX,XX +XXX,XX @@ static bool v7m_using_psp(CPUARMState *env) | ||
72 | env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; | ||
73 | } | ||
74 | |||
75 | -/* Switch to V7M main or process stack pointer. */ | ||
76 | -static void switch_v7m_sp(CPUARMState *env, bool new_spsel) | ||
77 | +/* Write to v7M CONTROL.SPSEL bit. This may change the current | ||
78 | + * stack pointer between Main and Process stack pointers. | ||
79 | + */ | 28 | + */ |
80 | +static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) | 29 | +static void virt_cpu_post_init(VirtMachineState *vms) |
81 | { | 30 | +{ |
82 | uint32_t tmp; | 31 | + bool aarch64; |
83 | - uint32_t old_control = env->v7m.control[env->v7m.secure]; | ||
84 | - bool old_spsel = old_control & R_V7M_CONTROL_SPSEL_MASK; | ||
85 | + bool new_is_psp, old_is_psp = v7m_using_psp(env); | ||
86 | + | 32 | + |
87 | + env->v7m.control[env->v7m.secure] = | 33 | + aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL); |
88 | + deposit32(env->v7m.control[env->v7m.secure], | ||
89 | + R_V7M_CONTROL_SPSEL_SHIFT, | ||
90 | + R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | ||
91 | + | 34 | + |
92 | + new_is_psp = v7m_using_psp(env); | 35 | + if (!kvm_enabled()) { |
93 | 36 | + if (aarch64 && vms->highmem) { | |
94 | - if (old_spsel != new_spsel) { | 37 | + int requested_pa_size = 64 - clz64(vms->highest_gpa); |
95 | + if (old_is_psp != new_is_psp) { | 38 | + int pamax = arm_pamax(ARM_CPU(first_cpu)); |
96 | tmp = env->v7m.other_sp; | 39 | + |
97 | env->v7m.other_sp = env->regs[13]; | 40 | + if (pamax < requested_pa_size) { |
98 | env->regs[13] = tmp; | 41 | + error_report("VCPU supports less PA bits (%d) than " |
42 | + "requested by the memory map (%d)", | ||
43 | + pamax, requested_pa_size); | ||
44 | + exit(1); | ||
45 | + } | ||
46 | + } | ||
99 | + } | 47 | + } |
100 | +} | 48 | +} |
101 | + | 49 | + |
102 | +void write_v7m_exception(CPUARMState *env, uint32_t new_exc) | 50 | static void machvirt_init(MachineState *machine) |
103 | +{ | 51 | { |
104 | + /* Write a new value to v7m.exception, thus transitioning into or out | 52 | VirtMachineState *vms = VIRT_MACHINE(machine); |
105 | + * of Handler mode; this may result in a change of active stack pointer. | 53 | @@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine) |
106 | + */ | 54 | fdt_add_timer_nodes(vms); |
107 | + bool new_is_psp, old_is_psp = v7m_using_psp(env); | 55 | fdt_add_cpu_nodes(vms); |
108 | + uint32_t tmp; | 56 | |
109 | 57 | - if (!kvm_enabled()) { | |
110 | - env->v7m.control[env->v7m.secure] = deposit32(old_control, | 58 | - ARMCPU *cpu = ARM_CPU(first_cpu); |
111 | - R_V7M_CONTROL_SPSEL_SHIFT, | 59 | - bool aarch64 = object_property_get_bool(OBJECT(cpu), "aarch64", NULL); |
112 | - R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); | 60 | - |
113 | + env->v7m.exception = new_exc; | 61 | - if (aarch64 && vms->highmem) { |
62 | - int requested_pa_size, pamax = arm_pamax(cpu); | ||
63 | - | ||
64 | - requested_pa_size = 64 - clz64(vms->highest_gpa); | ||
65 | - if (pamax < requested_pa_size) { | ||
66 | - error_report("VCPU supports less PA bits (%d) than requested " | ||
67 | - "by the memory map (%d)", pamax, requested_pa_size); | ||
68 | - exit(1); | ||
69 | - } | ||
70 | - } | ||
71 | - } | ||
72 | - | ||
73 | memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, | ||
74 | machine->ram); | ||
75 | if (machine->device_memory) { | ||
76 | @@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine) | ||
77 | |||
78 | create_gic(vms); | ||
79 | |||
80 | + virt_cpu_post_init(vms); | ||
114 | + | 81 | + |
115 | + new_is_psp = v7m_using_psp(env); | 82 | fdt_add_pmu_nodes(vms); |
116 | + | 83 | |
117 | + if (old_is_psp != new_is_psp) { | 84 | create_uart(vms, VIRT_UART, sysmem, serial_hd(0)); |
118 | + tmp = env->v7m.other_sp; | ||
119 | + env->v7m.other_sp = env->regs[13]; | ||
120 | + env->regs[13] = tmp; | ||
121 | } | ||
122 | } | ||
123 | |||
124 | @@ -XXX,XX +XXX,XX @@ static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, | ||
125 | bool want_psp = threadmode && spsel; | ||
126 | |||
127 | if (secure == env->v7m.secure) { | ||
128 | - /* Currently switch_v7m_sp switches SP as it updates SPSEL, | ||
129 | - * so the SP we want is always in regs[13]. | ||
130 | - * When we decouple SPSEL from the actually selected SP | ||
131 | - * we need to check want_psp against v7m_using_psp() | ||
132 | - * to see whether we need regs[13] or v7m.other_sp. | ||
133 | - */ | ||
134 | - return &env->regs[13]; | ||
135 | + if (want_psp == v7m_using_psp(env)) { | ||
136 | + return &env->regs[13]; | ||
137 | + } else { | ||
138 | + return &env->v7m.other_sp; | ||
139 | + } | ||
140 | } else { | ||
141 | if (want_psp) { | ||
142 | return &env->v7m.other_ss_psp; | ||
143 | @@ -XXX,XX +XXX,XX @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr) | ||
144 | uint32_t addr; | ||
145 | |||
146 | armv7m_nvic_acknowledge_irq(env->nvic); | ||
147 | - switch_v7m_sp(env, 0); | ||
148 | + write_v7m_control_spsel(env, 0); | ||
149 | arm_clear_exclusive(env); | ||
150 | /* Clear IT bits */ | ||
151 | env->condexec_bits = 0; | ||
152 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
153 | return; | ||
154 | } | ||
155 | |||
156 | - /* Set CONTROL.SPSEL from excret.SPSEL. For QEMU this currently | ||
157 | - * causes us to switch the active SP, but we will change this | ||
158 | - * later to not do that so we can support v8M. | ||
159 | + /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in | ||
160 | + * Handler mode (and will be until we write the new XPSR.Interrupt | ||
161 | + * field) this does not switch around the current stack pointer. | ||
162 | */ | ||
163 | - switch_v7m_sp(env, return_to_sp_process); | ||
164 | + write_v7m_control_spsel(env, return_to_sp_process); | ||
165 | |||
166 | { | ||
167 | /* The stack pointer we should be reading the exception frame from | ||
168 | @@ -XXX,XX +XXX,XX @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) | ||
169 | case 20: /* CONTROL */ | ||
170 | /* Writing to the SPSEL bit only has an effect if we are in | ||
171 | * thread mode; other bits can be updated by any privileged code. | ||
172 | - * switch_v7m_sp() deals with updating the SPSEL bit in | ||
173 | + * write_v7m_control_spsel() deals with updating the SPSEL bit in | ||
174 | * env->v7m.control, so we only need update the others. | ||
175 | */ | ||
176 | if (!arm_v7m_is_handler_mode(env)) { | ||
177 | - switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); | ||
178 | + write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); | ||
179 | } | ||
180 | env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; | ||
181 | env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; | ||
182 | -- | 85 | -- |
183 | 2.7.4 | 86 | 2.20.1 |
184 | 87 | ||
185 | 88 | diff view generated by jsdifflib |
1 | In v8M, more bits are defined in the exception-return magic | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | values; update the code that checks these so we accept | ||
3 | the v8M values when the CPU permits them. | ||
4 | 2 | ||
3 | Move the KVM PMU setup part of fdt_add_pmu_nodes() to | ||
4 | virt_cpu_post_init(), which is a more appropriate location. Now | ||
5 | fdt_add_pmu_nodes() is also named more appropriately, because it | ||
6 | no longer does anything but fdt node creation. | ||
7 | |||
8 | No functional change intended. | ||
9 | |||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | ||
12 | Signed-off-by: Andrew Jones <drjones@redhat.com> | ||
13 | Message-id: 20201001061718.101915-5-drjones@redhat.com | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 1506092407-26985-11-git-send-email-peter.maydell@linaro.org | ||
8 | --- | 15 | --- |
9 | target/arm/helper.c | 73 ++++++++++++++++++++++++++++++++++++++++++----------- | 16 | hw/arm/virt.c | 34 ++++++++++++++++++---------------- |
10 | 1 file changed, 58 insertions(+), 15 deletions(-) | 17 | 1 file changed, 18 insertions(+), 16 deletions(-) |
11 | 18 | ||
12 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 19 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c |
13 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/helper.c | 21 | --- a/hw/arm/virt.c |
15 | +++ b/target/arm/helper.c | 22 | +++ b/hw/arm/virt.c |
16 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | 23 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_gic_node(VirtMachineState *vms) |
17 | uint32_t excret; | 24 | |
18 | uint32_t xpsr; | 25 | static void fdt_add_pmu_nodes(const VirtMachineState *vms) |
19 | bool ufault = false; | 26 | { |
20 | - bool return_to_sp_process = false; | 27 | - CPUState *cpu; |
21 | - bool return_to_handler = false; | 28 | - ARMCPU *armcpu; |
22 | + bool sfault = false; | 29 | + ARMCPU *armcpu = ARM_CPU(first_cpu); |
23 | + bool return_to_sp_process; | 30 | uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; |
24 | + bool return_to_handler; | 31 | |
25 | bool rettobase = false; | 32 | - CPU_FOREACH(cpu) { |
26 | bool exc_secure = false; | 33 | - armcpu = ARM_CPU(cpu); |
27 | bool return_to_secure; | 34 | - if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { |
28 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | 35 | - return; |
29 | excret); | 36 | - } |
37 | - if (kvm_enabled()) { | ||
38 | - if (kvm_irqchip_in_kernel()) { | ||
39 | - kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ)); | ||
40 | - } | ||
41 | - kvm_arm_pmu_init(cpu); | ||
42 | - } | ||
43 | + if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { | ||
44 | + assert(!object_property_get_bool(OBJECT(armcpu), "pmu", NULL)); | ||
45 | + return; | ||
30 | } | 46 | } |
31 | 47 | ||
32 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | 48 | if (vms->gic_version == VIRT_GIC_VERSION_2) { |
33 | + /* EXC_RETURN.ES validation check (R_SMFL). We must do this before | 49 | @@ -XXX,XX +XXX,XX @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms) |
34 | + * we pick which FAULTMASK to clear. | 50 | (1 << vms->smp_cpus) - 1); |
35 | + */ | ||
36 | + if (!env->v7m.secure && | ||
37 | + ((excret & R_V7M_EXCRET_ES_MASK) || | ||
38 | + !(excret & R_V7M_EXCRET_DCRS_MASK))) { | ||
39 | + sfault = 1; | ||
40 | + /* For all other purposes, treat ES as 0 (R_HXSR) */ | ||
41 | + excret &= ~R_V7M_EXCRET_ES_MASK; | ||
42 | + } | ||
43 | + } | ||
44 | + | ||
45 | if (env->v7m.exception != ARMV7M_EXCP_NMI) { | ||
46 | /* Auto-clear FAULTMASK on return from other than NMI. | ||
47 | * If the security extension is implemented then this only | ||
48 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
49 | g_assert_not_reached(); | ||
50 | } | 51 | } |
51 | 52 | ||
52 | + return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); | 53 | - armcpu = ARM_CPU(qemu_get_cpu(0)); |
53 | + return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; | 54 | qemu_fdt_add_subnode(vms->fdt, "/pmu"); |
54 | return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && | 55 | if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) { |
55 | (excret & R_V7M_EXCRET_S_MASK); | 56 | const char compat[] = "arm,armv8-pmuv3"; |
56 | 57 | @@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms) | |
57 | - switch (excret & 0xf) { | 58 | */ |
58 | - case 1: /* Return to Handler */ | 59 | static void virt_cpu_post_init(VirtMachineState *vms) |
59 | - return_to_handler = true; | 60 | { |
60 | - break; | 61 | - bool aarch64; |
61 | - case 13: /* Return to Thread using Process stack */ | 62 | + bool aarch64, pmu; |
62 | - return_to_sp_process = true; | 63 | + CPUState *cpu; |
63 | - /* fall through */ | 64 | |
64 | - case 9: /* Return to Thread using Main stack */ | 65 | aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL); |
65 | - if (!rettobase && | 66 | + pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL); |
66 | - !(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) { | 67 | |
67 | + if (arm_feature(env, ARM_FEATURE_V8)) { | 68 | - if (!kvm_enabled()) { |
68 | + if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { | 69 | + if (kvm_enabled()) { |
69 | + /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); | 70 | + CPU_FOREACH(cpu) { |
70 | + * we choose to take the UsageFault. | 71 | + if (pmu) { |
71 | + */ | 72 | + assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); |
72 | + if ((excret & R_V7M_EXCRET_S_MASK) || | 73 | + if (kvm_irqchip_in_kernel()) { |
73 | + (excret & R_V7M_EXCRET_ES_MASK) || | 74 | + kvm_arm_pmu_set_irq(cpu, PPI(VIRTUAL_PMU_IRQ)); |
74 | + !(excret & R_V7M_EXCRET_DCRS_MASK)) { | 75 | + } |
75 | + ufault = true; | 76 | + kvm_arm_pmu_init(cpu); |
76 | + } | 77 | + } |
77 | + } | 78 | + } |
78 | + if (excret & R_V7M_EXCRET_RES0_MASK) { | ||
79 | ufault = true; | ||
80 | } | ||
81 | - break; | ||
82 | - default: | ||
83 | - ufault = true; | ||
84 | + } else { | 79 | + } else { |
85 | + /* For v7M we only recognize certain combinations of the low bits */ | 80 | if (aarch64 && vms->highmem) { |
86 | + switch (excret & 0xf) { | 81 | int requested_pa_size = 64 - clz64(vms->highest_gpa); |
87 | + case 1: /* Return to Handler */ | 82 | int pamax = arm_pamax(ARM_CPU(first_cpu)); |
88 | + break; | ||
89 | + case 13: /* Return to Thread using Process stack */ | ||
90 | + case 9: /* Return to Thread using Main stack */ | ||
91 | + /* We only need to check NONBASETHRDENA for v7M, because in | ||
92 | + * v8M this bit does not exist (it is RES1). | ||
93 | + */ | ||
94 | + if (!rettobase && | ||
95 | + !(env->v7m.ccr[env->v7m.secure] & | ||
96 | + R_V7M_CCR_NONBASETHRDENA_MASK)) { | ||
97 | + ufault = true; | ||
98 | + } | ||
99 | + break; | ||
100 | + default: | ||
101 | + ufault = true; | ||
102 | + } | ||
103 | + } | ||
104 | + | ||
105 | + if (sfault) { | ||
106 | + env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; | ||
107 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
108 | + v7m_exception_taken(cpu, excret); | ||
109 | + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
110 | + "stackframe: failed EXC_RETURN.ES validity check\n"); | ||
111 | + return; | ||
112 | } | ||
113 | |||
114 | if (ufault) { | ||
115 | -- | 83 | -- |
116 | 2.7.4 | 84 | 2.20.1 |
117 | 85 | ||
118 | 86 | diff view generated by jsdifflib |
1 | From: Michael Olbrich <m.olbrich@pengutronix.de> | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | The current code checks if the next block exceeds the size of the card. | 3 | arm-cpu-features got dropped from the AArch64 tests during the meson |
4 | This generates an error while reading the last block of the card. | 4 | conversion shuffle. |
5 | Do the out-of-bounds check when starting to read a new block to fix this. | ||
6 | 5 | ||
7 | This issue became visible with increased error checking in Linux 4.13. | 6 | Signed-off-by: Andrew Jones <drjones@redhat.com> |
8 | 7 | Message-id: 20201001061718.101915-6-drjones@redhat.com | |
9 | Cc: qemu-stable@nongnu.org | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
10 | Signed-off-by: Michael Olbrich <m.olbrich@pengutronix.de> | ||
11 | Reviewed-by: Alistair Francis <alistair.francis@xilinx.com> | ||
12 | Message-id: 20170916091611.10241-1-m.olbrich@pengutronix.de | ||
13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
14 | --- | 10 | --- |
15 | hw/sd/sd.c | 12 ++++++------ | 11 | tests/qtest/meson.build | 3 ++- |
16 | 1 file changed, 6 insertions(+), 6 deletions(-) | 12 | 1 file changed, 2 insertions(+), 1 deletion(-) |
17 | 13 | ||
18 | diff --git a/hw/sd/sd.c b/hw/sd/sd.c | 14 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build |
19 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/hw/sd/sd.c | 16 | --- a/tests/qtest/meson.build |
21 | +++ b/hw/sd/sd.c | 17 | +++ b/tests/qtest/meson.build |
22 | @@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd) | 18 | @@ -XXX,XX +XXX,XX @@ qtests_aarch64 = \ |
23 | break; | 19 | (cpu != 'arm' ? ['bios-tables-test'] : []) + \ |
24 | 20 | (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-test'] : []) + \ | |
25 | case 18: /* CMD18: READ_MULTIPLE_BLOCK */ | 21 | (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-swtpm-test'] : []) + \ |
26 | - if (sd->data_offset == 0) | 22 | - ['numa-test', |
27 | + if (sd->data_offset == 0) { | 23 | + ['arm-cpu-features', |
28 | + if (sd->data_start + io_len > sd->size) { | 24 | + 'numa-test', |
29 | + sd->card_status |= ADDRESS_ERROR; | 25 | 'boot-serial-test', |
30 | + return 0x00; | 26 | 'migration-test'] |
31 | + } | ||
32 | BLK_READ_BLOCK(sd->data_start, io_len); | ||
33 | + } | ||
34 | ret = sd->data[sd->data_offset ++]; | ||
35 | |||
36 | if (sd->data_offset >= io_len) { | ||
37 | @@ -XXX,XX +XXX,XX @@ uint8_t sd_read_data(SDState *sd) | ||
38 | break; | ||
39 | } | ||
40 | } | ||
41 | - | ||
42 | - if (sd->data_start + io_len > sd->size) { | ||
43 | - sd->card_status |= ADDRESS_ERROR; | ||
44 | - break; | ||
45 | - } | ||
46 | } | ||
47 | break; | ||
48 | 27 | ||
49 | -- | 28 | -- |
50 | 2.7.4 | 29 | 2.20.1 |
51 | 30 | ||
52 | 31 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Thomas Huth <thuth@redhat.com> | ||
2 | 1 | ||
3 | The device uses serial_hds in its realize function and thus can't be | ||
4 | used twice. Apart from that, the comma in its name makes it quite hard | ||
5 | to use for the user anyway, since a comma is normally used to separate | ||
6 | the device name from its properties when using the "-device" parameter | ||
7 | or the "device_add" HMP command. | ||
8 | |||
9 | Signed-off-by: Thomas Huth <thuth@redhat.com> | ||
10 | Reviewed-by: Alistair Francis <alistair.francis@xilinx.com> | ||
11 | Message-id: 1506441116-16627-1-git-send-email-thuth@redhat.com | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | hw/arm/xlnx-zynqmp.c | 2 ++ | ||
15 | 1 file changed, 2 insertions(+) | ||
16 | |||
17 | diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/hw/arm/xlnx-zynqmp.c | ||
20 | +++ b/hw/arm/xlnx-zynqmp.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data) | ||
22 | |||
23 | dc->props = xlnx_zynqmp_props; | ||
24 | dc->realize = xlnx_zynqmp_realize; | ||
25 | + /* Reason: Uses serial_hds in realize function, thus can't be used twice */ | ||
26 | + dc->user_creatable = false; | ||
27 | } | ||
28 | |||
29 | static const TypeInfo xlnx_zynqmp_type_info = { | ||
30 | -- | ||
31 | 2.7.4 | ||
32 | |||
33 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Reset for devices does not include an automatic clear of the | ||
2 | device state (unlike CPU state, where most of the state | ||
3 | structure is cleared to zero). Add some missing initialization | ||
4 | of NVIC state that meant that the device was left in the wrong | ||
5 | state if the guest did a warm reset. | ||
6 | 1 | ||
7 | (In particular, since we were resetting the computed state like | ||
8 | s->exception_prio but not all the state it was computed | ||
9 | from like s->vectors[x].active, the NVIC wound up in an | ||
10 | inconsistent state that could later trigger assertion failures.) | ||
11 | |||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
15 | Message-id: 1506092407-26985-2-git-send-email-peter.maydell@linaro.org | ||
16 | --- | ||
17 | hw/intc/armv7m_nvic.c | 5 +++++ | ||
18 | 1 file changed, 5 insertions(+) | ||
19 | |||
20 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/hw/intc/armv7m_nvic.c | ||
23 | +++ b/hw/intc/armv7m_nvic.c | ||
24 | @@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_reset(DeviceState *dev) | ||
25 | int resetprio; | ||
26 | NVICState *s = NVIC(dev); | ||
27 | |||
28 | + memset(s->vectors, 0, sizeof(s->vectors)); | ||
29 | + memset(s->sec_vectors, 0, sizeof(s->sec_vectors)); | ||
30 | + s->prigroup[M_REG_NS] = 0; | ||
31 | + s->prigroup[M_REG_S] = 0; | ||
32 | + | ||
33 | s->vectors[ARMV7M_EXCP_NMI].enabled = 1; | ||
34 | /* MEM, BUS, and USAGE are enabled through | ||
35 | * the System Handler Control register | ||
36 | -- | ||
37 | 2.7.4 | ||
38 | |||
39 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Now that we can handle the CONTROL.SPSEL bit not necessarily being | ||
2 | in sync with the current stack pointer, we can restore the correct | ||
3 | security state on exception return. This happens before we start | ||
4 | to read registers off the stack frame, but after we have taken | ||
5 | possible usage faults for bad exception return magic values and | ||
6 | updated CONTROL.SPSEL. | ||
7 | 1 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 1506092407-26985-5-git-send-email-peter.maydell@linaro.org | ||
11 | --- | ||
12 | target/arm/helper.c | 2 ++ | ||
13 | 1 file changed, 2 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/helper.c | ||
18 | +++ b/target/arm/helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
20 | */ | ||
21 | write_v7m_control_spsel(env, return_to_sp_process); | ||
22 | |||
23 | + switch_v7m_security_state(env, return_to_secure); | ||
24 | + | ||
25 | { | ||
26 | /* The stack pointer we should be reading the exception frame from | ||
27 | * depends on bits in the magic exception return type value (and | ||
28 | -- | ||
29 | 2.7.4 | ||
30 | |||
31 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | ARM v8M specifies that the INVPC usage fault for mismatched | ||
2 | xPSR exception field and handler mode bit should be checked | ||
3 | before updating the PSR and SP, so that the fault is taken | ||
4 | with the existing stack frame rather than by pushing a new one. | ||
5 | Perform this check in the right place for v8M. | ||
6 | 1 | ||
7 | Since v7M specifies in its pseudocode that this usage fault | ||
8 | check should happen later, we have to retain the original | ||
9 | code for that check rather than being able to merge the two. | ||
10 | (The distinction is architecturally visible but only in | ||
11 | very obscure corner cases like attempting an invalid exception | ||
12 | return with an exception frame in read only memory.) | ||
13 | |||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
16 | Message-id: 1506092407-26985-7-git-send-email-peter.maydell@linaro.org | ||
17 | --- | ||
18 | target/arm/helper.c | 30 +++++++++++++++++++++++++++--- | ||
19 | 1 file changed, 27 insertions(+), 3 deletions(-) | ||
20 | |||
21 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/target/arm/helper.c | ||
24 | +++ b/target/arm/helper.c | ||
25 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
26 | } | ||
27 | xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
28 | |||
29 | + if (arm_feature(env, ARM_FEATURE_V8)) { | ||
30 | + /* For v8M we have to check whether the xPSR exception field | ||
31 | + * matches the EXCRET value for return to handler/thread | ||
32 | + * before we commit to changing the SP and xPSR. | ||
33 | + */ | ||
34 | + bool will_be_handler = (xpsr & XPSR_EXCP) != 0; | ||
35 | + if (return_to_handler != will_be_handler) { | ||
36 | + /* Take an INVPC UsageFault on the current stack. | ||
37 | + * By this point we will have switched to the security state | ||
38 | + * for the background state, so this UsageFault will target | ||
39 | + * that state. | ||
40 | + */ | ||
41 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, | ||
42 | + env->v7m.secure); | ||
43 | + env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
44 | + v7m_exception_taken(cpu, excret); | ||
45 | + qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " | ||
46 | + "stackframe: failed exception return integrity " | ||
47 | + "check\n"); | ||
48 | + return; | ||
49 | + } | ||
50 | + } | ||
51 | + | ||
52 | /* Commit to consuming the stack frame */ | ||
53 | frameptr += 0x20; | ||
54 | /* Undo stack alignment (the SPREALIGN bit indicates that the original | ||
55 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
56 | /* The restored xPSR exception field will be zero if we're | ||
57 | * resuming in Thread mode. If that doesn't match what the | ||
58 | * exception return excret specified then this is a UsageFault. | ||
59 | + * v7M requires we make this check here; v8M did it earlier. | ||
60 | */ | ||
61 | if (return_to_handler != arm_v7m_is_handler_mode(env)) { | ||
62 | - /* Take an INVPC UsageFault by pushing the stack again. | ||
63 | - * TODO: the v8M version of this code should target the | ||
64 | - * background state for this exception. | ||
65 | + /* Take an INVPC UsageFault by pushing the stack again; | ||
66 | + * we know we're v7M so this is never a Secure UsageFault. | ||
67 | */ | ||
68 | + assert(!arm_feature(env, ARM_FEATURE_V8)); | ||
69 | armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); | ||
70 | env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; | ||
71 | v7m_push_stack(cpu); | ||
72 | -- | ||
73 | 2.7.4 | ||
74 | |||
75 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | Attempting to do an exception return with an exception frame that | ||
2 | is not 8-aligned is UNPREDICTABLE in v8M; warn about this. | ||
3 | (It is not UNPREDICTABLE in v7M, and our implementation can | ||
4 | handle the merely-4-aligned case fine, so we don't need to | ||
5 | do anything except warn.) | ||
6 | 1 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-id: 1506092407-26985-8-git-send-email-peter.maydell@linaro.org | ||
11 | --- | ||
12 | target/arm/helper.c | 7 +++++++ | ||
13 | 1 file changed, 7 insertions(+) | ||
14 | |||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/target/arm/helper.c | ||
18 | +++ b/target/arm/helper.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
20 | return_to_sp_process); | ||
21 | uint32_t frameptr = *frame_sp_p; | ||
22 | |||
23 | + if (!QEMU_IS_ALIGNED(frameptr, 8) && | ||
24 | + arm_feature(env, ARM_FEATURE_V8)) { | ||
25 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
26 | + "M profile exception return with non-8-aligned SP " | ||
27 | + "for destination state is UNPREDICTABLE\n"); | ||
28 | + } | ||
29 | + | ||
30 | /* Pop registers. TODO: make these accesses use the correct | ||
31 | * attributes and address space (S/NS, priv/unpriv) and handle | ||
32 | * memory transaction failures. | ||
33 | -- | ||
34 | 2.7.4 | ||
35 | |||
36 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In the v8M architecture, return from an exception to a PC which | ||
2 | has bit 0 set is not UNPREDICTABLE; it is defined that bit 0 | ||
3 | is discarded [R_HRJH]. Restrict our complaint about this to v7M. | ||
4 | 1 | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 1506092407-26985-9-git-send-email-peter.maydell@linaro.org | ||
9 | --- | ||
10 | target/arm/helper.c | 22 +++++++++++++++------- | ||
11 | 1 file changed, 15 insertions(+), 7 deletions(-) | ||
12 | |||
13 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/helper.c | ||
16 | +++ b/target/arm/helper.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
18 | env->regs[12] = ldl_phys(cs->as, frameptr + 0x10); | ||
19 | env->regs[14] = ldl_phys(cs->as, frameptr + 0x14); | ||
20 | env->regs[15] = ldl_phys(cs->as, frameptr + 0x18); | ||
21 | + | ||
22 | + /* Returning from an exception with a PC with bit 0 set is defined | ||
23 | + * behaviour on v8M (bit 0 is ignored), but for v7M it was specified | ||
24 | + * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore | ||
25 | + * the lsbit, and there are several RTOSes out there which incorrectly | ||
26 | + * assume the r15 in the stack frame should be a Thumb-style "lsbit | ||
27 | + * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but | ||
28 | + * complain about the badly behaved guest. | ||
29 | + */ | ||
30 | if (env->regs[15] & 1) { | ||
31 | - qemu_log_mask(LOG_GUEST_ERROR, | ||
32 | - "M profile return from interrupt with misaligned " | ||
33 | - "PC is UNPREDICTABLE\n"); | ||
34 | - /* Actual hardware seems to ignore the lsbit, and there are several | ||
35 | - * RTOSes out there which incorrectly assume the r15 in the stack | ||
36 | - * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value. | ||
37 | - */ | ||
38 | env->regs[15] &= ~1U; | ||
39 | + if (!arm_feature(env, ARM_FEATURE_V8)) { | ||
40 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
41 | + "M profile return from interrupt with misaligned " | ||
42 | + "PC is UNPREDICTABLE on v7M\n"); | ||
43 | + } | ||
44 | } | ||
45 | + | ||
46 | xpsr = ldl_phys(cs->as, frameptr + 0x1c); | ||
47 | |||
48 | if (arm_feature(env, ARM_FEATURE_V8)) { | ||
49 | -- | ||
50 | 2.7.4 | ||
51 | |||
52 | diff view generated by jsdifflib |
1 | Add the new M profile Secure Fault Status Register | 1 | From: Andrew Jones <drjones@redhat.com> |
---|---|---|---|
2 | and Secure Fault Address Register. | ||
3 | 2 | ||
3 | We add the kvm-steal-time CPU property and implement it for machvirt. | ||
4 | A tiny bit of refactoring was also done to allow pmu and pvtime to | ||
5 | use the same vcpu device helper functions. | ||
6 | |||
7 | Reviewed-by: Eric Auger <eric.auger@redhat.com> | ||
8 | Signed-off-by: Andrew Jones <drjones@redhat.com> | ||
9 | Message-id: 20201001061718.101915-7-drjones@redhat.com | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 1506092407-26985-10-git-send-email-peter.maydell@linaro.org | ||
7 | --- | 11 | --- |
8 | target/arm/cpu.h | 12 ++++++++++++ | 12 | docs/system/arm/cpu-features.rst | 11 ++++++ |
9 | hw/intc/armv7m_nvic.c | 34 ++++++++++++++++++++++++++++++++++ | 13 | include/hw/arm/virt.h | 5 +++ |
10 | target/arm/machine.c | 2 ++ | 14 | target/arm/cpu.h | 4 ++ |
11 | 3 files changed, 48 insertions(+) | 15 | target/arm/kvm_arm.h | 43 +++++++++++++++++++++ |
16 | hw/arm/virt.c | 43 +++++++++++++++++++-- | ||
17 | target/arm/cpu.c | 8 ++++ | ||
18 | target/arm/kvm.c | 16 ++++++++ | ||
19 | target/arm/kvm64.c | 64 +++++++++++++++++++++++++++++--- | ||
20 | target/arm/monitor.c | 2 +- | ||
21 | tests/qtest/arm-cpu-features.c | 25 +++++++++++-- | ||
22 | 10 files changed, 208 insertions(+), 13 deletions(-) | ||
12 | 23 | ||
24 | diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/docs/system/arm/cpu-features.rst | ||
27 | +++ b/docs/system/arm/cpu-features.rst | ||
28 | @@ -XXX,XX +XXX,XX @@ the list of KVM VCPU features and their descriptions. | ||
29 | adjustment, also restoring the legacy (pre-5.0) | ||
30 | behavior. | ||
31 | |||
32 | + kvm-steal-time Since v5.2, kvm-steal-time is enabled by | ||
33 | + default when KVM is enabled, the feature is | ||
34 | + supported, and the guest is 64-bit. | ||
35 | + | ||
36 | + When kvm-steal-time is enabled a 64-bit guest | ||
37 | + can account for time its CPUs were not running | ||
38 | + due to the host not scheduling the corresponding | ||
39 | + VCPU threads. The accounting statistics may | ||
40 | + influence the guest scheduler behavior and/or be | ||
41 | + exposed to the guest userspace. | ||
42 | + | ||
43 | SVE CPU Properties | ||
44 | ================== | ||
45 | |||
46 | diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/include/hw/arm/virt.h | ||
49 | +++ b/include/hw/arm/virt.h | ||
50 | @@ -XXX,XX +XXX,XX @@ | ||
51 | |||
52 | #define PPI(irq) ((irq) + 16) | ||
53 | |||
54 | +/* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */ | ||
55 | +#define PVTIME_SIZE_PER_CPU 64 | ||
56 | + | ||
57 | enum { | ||
58 | VIRT_FLASH, | ||
59 | VIRT_MEM, | ||
60 | @@ -XXX,XX +XXX,XX @@ enum { | ||
61 | VIRT_PCDIMM_ACPI, | ||
62 | VIRT_ACPI_GED, | ||
63 | VIRT_NVDIMM_ACPI, | ||
64 | + VIRT_PVTIME, | ||
65 | VIRT_LOWMEMMAP_LAST, | ||
66 | }; | ||
67 | |||
68 | @@ -XXX,XX +XXX,XX @@ struct VirtMachineClass { | ||
69 | bool no_highmem_ecam; | ||
70 | bool no_ged; /* Machines < 4.2 has no support for ACPI GED device */ | ||
71 | bool kvm_no_adjvtime; | ||
72 | + bool no_kvm_steal_time; | ||
73 | bool acpi_expose_flash; | ||
74 | }; | ||
75 | |||
13 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 76 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h |
14 | index XXXXXXX..XXXXXXX 100644 | 77 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/cpu.h | 78 | --- a/target/arm/cpu.h |
16 | +++ b/target/arm/cpu.h | 79 | +++ b/target/arm/cpu.h |
17 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | 80 | @@ -XXX,XX +XXX,XX @@ |
18 | uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ | 81 | #include "hw/registerfields.h" |
19 | uint32_t hfsr; /* HardFault Status */ | 82 | #include "cpu-qom.h" |
20 | uint32_t dfsr; /* Debug Fault Status Register */ | 83 | #include "exec/cpu-defs.h" |
21 | + uint32_t sfsr; /* Secure Fault Status Register */ | 84 | +#include "qapi/qapi-types-common.h" |
22 | uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ | 85 | |
23 | uint32_t bfar; /* BusFault Address */ | 86 | /* ARM processors have a weak memory model */ |
24 | + uint32_t sfar; /* Secure Fault Address Register */ | 87 | #define TCG_GUEST_DEFAULT_MO (0) |
25 | unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ | 88 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { |
26 | int exception; | 89 | bool kvm_vtime_dirty; |
27 | uint32_t primask[M_REG_NUM_BANKS]; | 90 | uint64_t kvm_vtime; |
28 | @@ -XXX,XX +XXX,XX @@ FIELD(V7M_DFSR, DWTTRAP, 2, 1) | 91 | |
29 | FIELD(V7M_DFSR, VCATCH, 3, 1) | 92 | + /* KVM steal time */ |
30 | FIELD(V7M_DFSR, EXTERNAL, 4, 1) | 93 | + OnOffAuto kvm_steal_time; |
31 | 94 | + | |
32 | +/* V7M SFSR bits */ | 95 | /* Uniprocessor system with MP extensions */ |
33 | +FIELD(V7M_SFSR, INVEP, 0, 1) | 96 | bool mp_is_up; |
34 | +FIELD(V7M_SFSR, INVIS, 1, 1) | 97 | |
35 | +FIELD(V7M_SFSR, INVER, 2, 1) | 98 | diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h |
36 | +FIELD(V7M_SFSR, AUVIOL, 3, 1) | 99 | index XXXXXXX..XXXXXXX 100644 |
37 | +FIELD(V7M_SFSR, INVTRAN, 4, 1) | 100 | --- a/target/arm/kvm_arm.h |
38 | +FIELD(V7M_SFSR, LSPERR, 5, 1) | 101 | +++ b/target/arm/kvm_arm.h |
39 | +FIELD(V7M_SFSR, SFARVALID, 6, 1) | 102 | @@ -XXX,XX +XXX,XX @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu); |
40 | +FIELD(V7M_SFSR, LSERR, 7, 1) | 103 | */ |
41 | + | 104 | void kvm_arm_add_vcpu_properties(Object *obj); |
42 | /* v7M MPU_CTRL bits */ | 105 | |
43 | FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) | 106 | +/** |
44 | FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) | 107 | + * kvm_arm_steal_time_finalize: |
45 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 108 | + * @cpu: ARMCPU for which to finalize kvm-steal-time |
46 | index XXXXXXX..XXXXXXX 100644 | 109 | + * @errp: Pointer to Error* for error propagation |
47 | --- a/hw/intc/armv7m_nvic.c | 110 | + * |
48 | +++ b/hw/intc/armv7m_nvic.c | 111 | + * Validate the kvm-steal-time property selection and set its default |
49 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | 112 | + * based on KVM support and guest configuration. |
50 | goto bad_offset; | 113 | + */ |
114 | +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); | ||
115 | + | ||
116 | +/** | ||
117 | + * kvm_arm_steal_time_supported: | ||
118 | + * | ||
119 | + * Returns: true if KVM can enable steal time reporting | ||
120 | + * and false otherwise. | ||
121 | + */ | ||
122 | +bool kvm_arm_steal_time_supported(void); | ||
123 | + | ||
124 | /** | ||
125 | * kvm_arm_aarch32_supported: | ||
126 | * | ||
127 | @@ -XXX,XX +XXX,XX @@ int kvm_arm_vgic_probe(void); | ||
128 | |||
129 | void kvm_arm_pmu_set_irq(CPUState *cs, int irq); | ||
130 | void kvm_arm_pmu_init(CPUState *cs); | ||
131 | + | ||
132 | +/** | ||
133 | + * kvm_arm_pvtime_init: | ||
134 | + * @cs: CPUState | ||
135 | + * @ipa: Per-vcpu guest physical base address of the pvtime structures | ||
136 | + * | ||
137 | + * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. | ||
138 | + */ | ||
139 | +void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); | ||
140 | + | ||
141 | int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); | ||
142 | |||
143 | #else | ||
144 | @@ -XXX,XX +XXX,XX @@ static inline bool kvm_arm_sve_supported(void) | ||
145 | return false; | ||
146 | } | ||
147 | |||
148 | +static inline bool kvm_arm_steal_time_supported(void) | ||
149 | +{ | ||
150 | + return false; | ||
151 | +} | ||
152 | + | ||
153 | /* | ||
154 | * These functions should never actually be called without KVM support. | ||
155 | */ | ||
156 | @@ -XXX,XX +XXX,XX @@ static inline void kvm_arm_pmu_init(CPUState *cs) | ||
157 | g_assert_not_reached(); | ||
158 | } | ||
159 | |||
160 | +static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) | ||
161 | +{ | ||
162 | + g_assert_not_reached(); | ||
163 | +} | ||
164 | + | ||
165 | +static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) | ||
166 | +{ | ||
167 | + g_assert_not_reached(); | ||
168 | +} | ||
169 | + | ||
170 | static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) | ||
171 | { | ||
172 | g_assert_not_reached(); | ||
173 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | ||
174 | index XXXXXXX..XXXXXXX 100644 | ||
175 | --- a/hw/arm/virt.c | ||
176 | +++ b/hw/arm/virt.c | ||
177 | @@ -XXX,XX +XXX,XX @@ static const MemMapEntry base_memmap[] = { | ||
178 | [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN }, | ||
179 | [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN }, | ||
180 | [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN}, | ||
181 | + [VIRT_PVTIME] = { 0x090a0000, 0x00010000 }, | ||
182 | [VIRT_MMIO] = { 0x0a000000, 0x00000200 }, | ||
183 | /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */ | ||
184 | [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 }, | ||
185 | @@ -XXX,XX +XXX,XX @@ static void finalize_gic_version(VirtMachineState *vms) | ||
186 | * virt_cpu_post_init() must be called after the CPUs have | ||
187 | * been realized and the GIC has been created. | ||
188 | */ | ||
189 | -static void virt_cpu_post_init(VirtMachineState *vms) | ||
190 | +static void virt_cpu_post_init(VirtMachineState *vms, int max_cpus, | ||
191 | + MemoryRegion *sysmem) | ||
192 | { | ||
193 | - bool aarch64, pmu; | ||
194 | + bool aarch64, pmu, steal_time; | ||
195 | CPUState *cpu; | ||
196 | |||
197 | aarch64 = object_property_get_bool(OBJECT(first_cpu), "aarch64", NULL); | ||
198 | pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL); | ||
199 | + steal_time = object_property_get_bool(OBJECT(first_cpu), | ||
200 | + "kvm-steal-time", NULL); | ||
201 | |||
202 | if (kvm_enabled()) { | ||
203 | + hwaddr pvtime_reg_base = vms->memmap[VIRT_PVTIME].base; | ||
204 | + hwaddr pvtime_reg_size = vms->memmap[VIRT_PVTIME].size; | ||
205 | + | ||
206 | + if (steal_time) { | ||
207 | + MemoryRegion *pvtime = g_new(MemoryRegion, 1); | ||
208 | + hwaddr pvtime_size = max_cpus * PVTIME_SIZE_PER_CPU; | ||
209 | + | ||
210 | + /* The memory region size must be a multiple of host page size. */ | ||
211 | + pvtime_size = REAL_HOST_PAGE_ALIGN(pvtime_size); | ||
212 | + | ||
213 | + if (pvtime_size > pvtime_reg_size) { | ||
214 | + error_report("pvtime requires a %ld byte memory region for " | ||
215 | + "%d CPUs, but only %ld has been reserved", | ||
216 | + pvtime_size, max_cpus, pvtime_reg_size); | ||
217 | + exit(1); | ||
218 | + } | ||
219 | + | ||
220 | + memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL); | ||
221 | + memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime); | ||
222 | + } | ||
223 | + | ||
224 | CPU_FOREACH(cpu) { | ||
225 | if (pmu) { | ||
226 | assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); | ||
227 | @@ -XXX,XX +XXX,XX @@ static void virt_cpu_post_init(VirtMachineState *vms) | ||
228 | } | ||
229 | kvm_arm_pmu_init(cpu); | ||
230 | } | ||
231 | + if (steal_time) { | ||
232 | + kvm_arm_pvtime_init(cpu, pvtime_reg_base + | ||
233 | + cpu->cpu_index * PVTIME_SIZE_PER_CPU); | ||
234 | + } | ||
51 | } | 235 | } |
52 | return cpu->env.pmsav8.mair1[attrs.secure]; | 236 | } else { |
53 | + case 0xde4: /* SFSR */ | 237 | if (aarch64 && vms->highmem) { |
54 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | 238 | @@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine) |
55 | + goto bad_offset; | 239 | object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL); |
240 | } | ||
241 | |||
242 | + if (vmc->no_kvm_steal_time && | ||
243 | + object_property_find(cpuobj, "kvm-steal-time")) { | ||
244 | + object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL); | ||
56 | + } | 245 | + } |
57 | + if (!attrs.secure) { | 246 | + |
58 | + return 0; | 247 | if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) { |
59 | + } | 248 | object_property_set_bool(cpuobj, "pmu", false, NULL); |
60 | + return cpu->env.v7m.sfsr; | 249 | } |
61 | + case 0xde8: /* SFAR */ | 250 | @@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine) |
62 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | 251 | |
63 | + goto bad_offset; | 252 | create_gic(vms); |
64 | + } | 253 | |
65 | + if (!attrs.secure) { | 254 | - virt_cpu_post_init(vms); |
66 | + return 0; | 255 | + virt_cpu_post_init(vms, possible_cpus->len, sysmem); |
67 | + } | 256 | |
68 | + return cpu->env.v7m.sfar; | 257 | fdt_add_pmu_nodes(vms); |
69 | default: | 258 | |
70 | bad_offset: | 259 | @@ -XXX,XX +XXX,XX @@ DEFINE_VIRT_MACHINE_AS_LATEST(5, 2) |
71 | qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); | 260 | |
72 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | 261 | static void virt_machine_5_1_options(MachineClass *mc) |
73 | * only affect cacheability, and we don't implement caching. | 262 | { |
74 | */ | 263 | + VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc)); |
75 | break; | 264 | + |
76 | + case 0xde4: /* SFSR */ | 265 | virt_machine_5_2_options(mc); |
77 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | 266 | compat_props_add(mc->compat_props, hw_compat_5_1, hw_compat_5_1_len); |
78 | + goto bad_offset; | 267 | + vmc->no_kvm_steal_time = true; |
79 | + } | 268 | } |
80 | + if (!attrs.secure) { | 269 | DEFINE_VIRT_MACHINE(5, 1) |
270 | |||
271 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
272 | index XXXXXXX..XXXXXXX 100644 | ||
273 | --- a/target/arm/cpu.c | ||
274 | +++ b/target/arm/cpu.c | ||
275 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) | ||
276 | return; | ||
277 | } | ||
278 | } | ||
279 | + | ||
280 | + if (kvm_enabled()) { | ||
281 | + kvm_arm_steal_time_finalize(cpu, &local_err); | ||
282 | + if (local_err != NULL) { | ||
283 | + error_propagate(errp, local_err); | ||
81 | + return; | 284 | + return; |
82 | + } | 285 | + } |
83 | + cpu->env.v7m.sfsr &= ~value; /* W1C */ | 286 | + } |
84 | + break; | 287 | } |
85 | + case 0xde8: /* SFAR */ | 288 | |
86 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | 289 | static void arm_cpu_realizefn(DeviceState *dev, Error **errp) |
87 | + goto bad_offset; | 290 | diff --git a/target/arm/kvm.c b/target/arm/kvm.c |
291 | index XXXXXXX..XXXXXXX 100644 | ||
292 | --- a/target/arm/kvm.c | ||
293 | +++ b/target/arm/kvm.c | ||
294 | @@ -XXX,XX +XXX,XX @@ static void kvm_no_adjvtime_set(Object *obj, bool value, Error **errp) | ||
295 | ARM_CPU(obj)->kvm_adjvtime = !value; | ||
296 | } | ||
297 | |||
298 | +static bool kvm_steal_time_get(Object *obj, Error **errp) | ||
299 | +{ | ||
300 | + return ARM_CPU(obj)->kvm_steal_time != ON_OFF_AUTO_OFF; | ||
301 | +} | ||
302 | + | ||
303 | +static void kvm_steal_time_set(Object *obj, bool value, Error **errp) | ||
304 | +{ | ||
305 | + ARM_CPU(obj)->kvm_steal_time = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; | ||
306 | +} | ||
307 | + | ||
308 | /* KVM VCPU properties should be prefixed with "kvm-". */ | ||
309 | void kvm_arm_add_vcpu_properties(Object *obj) | ||
310 | { | ||
311 | @@ -XXX,XX +XXX,XX @@ void kvm_arm_add_vcpu_properties(Object *obj) | ||
312 | "the virtual counter. VM stopped time " | ||
313 | "will be counted."); | ||
314 | } | ||
315 | + | ||
316 | + cpu->kvm_steal_time = ON_OFF_AUTO_AUTO; | ||
317 | + object_property_add_bool(obj, "kvm-steal-time", kvm_steal_time_get, | ||
318 | + kvm_steal_time_set); | ||
319 | + object_property_set_description(obj, "kvm-steal-time", | ||
320 | + "Set off to disable KVM steal time."); | ||
321 | } | ||
322 | |||
323 | bool kvm_arm_pmu_supported(void) | ||
324 | diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c | ||
325 | index XXXXXXX..XXXXXXX 100644 | ||
326 | --- a/target/arm/kvm64.c | ||
327 | +++ b/target/arm/kvm64.c | ||
328 | @@ -XXX,XX +XXX,XX @@ | ||
329 | #include <linux/kvm.h> | ||
330 | |||
331 | #include "qemu-common.h" | ||
332 | +#include "qapi/error.h" | ||
333 | #include "cpu.h" | ||
334 | #include "qemu/timer.h" | ||
335 | #include "qemu/error-report.h" | ||
336 | @@ -XXX,XX +XXX,XX @@ static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) | ||
337 | return NULL; | ||
338 | } | ||
339 | |||
340 | -static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr) | ||
341 | +static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, | ||
342 | + const char *name) | ||
343 | { | ||
344 | int err; | ||
345 | |||
346 | err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); | ||
347 | if (err != 0) { | ||
348 | - error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err)); | ||
349 | + error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); | ||
350 | return false; | ||
351 | } | ||
352 | |||
353 | err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); | ||
354 | if (err != 0) { | ||
355 | - error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err)); | ||
356 | + error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); | ||
357 | return false; | ||
358 | } | ||
359 | |||
360 | @@ -XXX,XX +XXX,XX @@ void kvm_arm_pmu_init(CPUState *cs) | ||
361 | if (!ARM_CPU(cs)->has_pmu) { | ||
362 | return; | ||
363 | } | ||
364 | - if (!kvm_arm_pmu_set_attr(cs, &attr)) { | ||
365 | + if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { | ||
366 | error_report("failed to init PMU"); | ||
367 | abort(); | ||
368 | } | ||
369 | @@ -XXX,XX +XXX,XX @@ void kvm_arm_pmu_set_irq(CPUState *cs, int irq) | ||
370 | if (!ARM_CPU(cs)->has_pmu) { | ||
371 | return; | ||
372 | } | ||
373 | - if (!kvm_arm_pmu_set_attr(cs, &attr)) { | ||
374 | + if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { | ||
375 | error_report("failed to set irq for PMU"); | ||
376 | abort(); | ||
377 | } | ||
378 | } | ||
379 | |||
380 | +void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) | ||
381 | +{ | ||
382 | + struct kvm_device_attr attr = { | ||
383 | + .group = KVM_ARM_VCPU_PVTIME_CTRL, | ||
384 | + .attr = KVM_ARM_VCPU_PVTIME_IPA, | ||
385 | + .addr = (uint64_t)&ipa, | ||
386 | + }; | ||
387 | + | ||
388 | + if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) { | ||
389 | + return; | ||
390 | + } | ||
391 | + if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) { | ||
392 | + error_report("failed to init PVTIME IPA"); | ||
393 | + abort(); | ||
394 | + } | ||
395 | +} | ||
396 | + | ||
397 | static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) | ||
398 | { | ||
399 | uint64_t ret; | ||
400 | @@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) | ||
401 | return true; | ||
402 | } | ||
403 | |||
404 | +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) | ||
405 | +{ | ||
406 | + bool has_steal_time = kvm_arm_steal_time_supported(); | ||
407 | + | ||
408 | + if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { | ||
409 | + if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | ||
410 | + cpu->kvm_steal_time = ON_OFF_AUTO_OFF; | ||
411 | + } else { | ||
412 | + cpu->kvm_steal_time = ON_OFF_AUTO_ON; | ||
88 | + } | 413 | + } |
89 | + if (!attrs.secure) { | 414 | + } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { |
415 | + if (!has_steal_time) { | ||
416 | + error_setg(errp, "'kvm-steal-time' cannot be enabled " | ||
417 | + "on this host"); | ||
418 | + return; | ||
419 | + } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | ||
420 | + /* | ||
421 | + * DEN0057A chapter 2 says "This specification only covers | ||
422 | + * systems in which the Execution state of the hypervisor | ||
423 | + * as well as EL1 of virtual machines is AArch64.". And, | ||
424 | + * to ensure that, the smc/hvc calls are only specified as | ||
425 | + * smc64/hvc64. | ||
426 | + */ | ||
427 | + error_setg(errp, "'kvm-steal-time' cannot be enabled " | ||
428 | + "for AArch32 guests"); | ||
90 | + return; | 429 | + return; |
91 | + } | 430 | + } |
92 | + cpu->env.v7m.sfsr = value; | 431 | + } |
93 | + break; | 432 | +} |
94 | case 0xf00: /* Software Triggered Interrupt Register */ | 433 | + |
95 | { | 434 | bool kvm_arm_aarch32_supported(void) |
96 | int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ; | 435 | { |
97 | diff --git a/target/arm/machine.c b/target/arm/machine.c | 436 | return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); |
98 | index XXXXXXX..XXXXXXX 100644 | 437 | @@ -XXX,XX +XXX,XX @@ bool kvm_arm_sve_supported(void) |
99 | --- a/target/arm/machine.c | 438 | return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); |
100 | +++ b/target/arm/machine.c | 439 | } |
101 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = { | 440 | |
102 | VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU), | 441 | +bool kvm_arm_steal_time_supported(void) |
103 | VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU), | 442 | +{ |
104 | VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU), | 443 | + return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); |
105 | + VMSTATE_UINT32(env.v7m.sfsr, ARMCPU), | 444 | +} |
106 | + VMSTATE_UINT32(env.v7m.sfar, ARMCPU), | 445 | + |
107 | VMSTATE_END_OF_LIST() | 446 | QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); |
108 | } | 447 | |
448 | void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) | ||
449 | diff --git a/target/arm/monitor.c b/target/arm/monitor.c | ||
450 | index XXXXXXX..XXXXXXX 100644 | ||
451 | --- a/target/arm/monitor.c | ||
452 | +++ b/target/arm/monitor.c | ||
453 | @@ -XXX,XX +XXX,XX @@ static const char *cpu_model_advertised_features[] = { | ||
454 | "sve128", "sve256", "sve384", "sve512", | ||
455 | "sve640", "sve768", "sve896", "sve1024", "sve1152", "sve1280", | ||
456 | "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048", | ||
457 | - "kvm-no-adjvtime", | ||
458 | + "kvm-no-adjvtime", "kvm-steal-time", | ||
459 | NULL | ||
109 | }; | 460 | }; |
461 | |||
462 | diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c | ||
463 | index XXXXXXX..XXXXXXX 100644 | ||
464 | --- a/tests/qtest/arm-cpu-features.c | ||
465 | +++ b/tests/qtest/arm-cpu-features.c | ||
466 | @@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion(const void *data) | ||
467 | assert_set_feature(qts, "max", "pmu", true); | ||
468 | |||
469 | assert_has_not_feature(qts, "max", "kvm-no-adjvtime"); | ||
470 | + assert_has_not_feature(qts, "max", "kvm-steal-time"); | ||
471 | |||
472 | if (g_str_equal(qtest_get_arch(), "aarch64")) { | ||
473 | assert_has_feature_enabled(qts, "max", "aarch64"); | ||
474 | @@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data) | ||
475 | assert_set_feature(qts, "host", "kvm-no-adjvtime", false); | ||
476 | |||
477 | if (g_str_equal(qtest_get_arch(), "aarch64")) { | ||
478 | + bool kvm_supports_steal_time; | ||
479 | bool kvm_supports_sve; | ||
480 | char max_name[8], name[8]; | ||
481 | uint32_t max_vq, vq; | ||
482 | @@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data) | ||
483 | QDict *resp; | ||
484 | char *error; | ||
485 | |||
486 | + assert_error(qts, "cortex-a15", | ||
487 | + "We cannot guarantee the CPU type 'cortex-a15' works " | ||
488 | + "with KVM on this host", NULL); | ||
489 | + | ||
490 | assert_has_feature_enabled(qts, "host", "aarch64"); | ||
491 | |||
492 | /* Enabling and disabling pmu should always work. */ | ||
493 | @@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data) | ||
494 | assert_set_feature(qts, "host", "pmu", false); | ||
495 | assert_set_feature(qts, "host", "pmu", true); | ||
496 | |||
497 | - assert_error(qts, "cortex-a15", | ||
498 | - "We cannot guarantee the CPU type 'cortex-a15' works " | ||
499 | - "with KVM on this host", NULL); | ||
500 | - | ||
501 | + /* | ||
502 | + * Some features would be enabled by default, but they're disabled | ||
503 | + * because this instance of KVM doesn't support them. Test that the | ||
504 | + * features are present, and, when enabled, issue further tests. | ||
505 | + */ | ||
506 | + assert_has_feature(qts, "host", "kvm-steal-time"); | ||
507 | assert_has_feature(qts, "host", "sve"); | ||
508 | + | ||
509 | resp = do_query_no_props(qts, "host"); | ||
510 | + kvm_supports_steal_time = resp_get_feature(resp, "kvm-steal-time"); | ||
511 | kvm_supports_sve = resp_get_feature(resp, "sve"); | ||
512 | vls = resp_get_sve_vls(resp); | ||
513 | qobject_unref(resp); | ||
514 | |||
515 | + if (kvm_supports_steal_time) { | ||
516 | + /* If we have steal-time then we should be able to toggle it. */ | ||
517 | + assert_set_feature(qts, "host", "kvm-steal-time", false); | ||
518 | + assert_set_feature(qts, "host", "kvm-steal-time", true); | ||
519 | + } | ||
520 | + | ||
521 | if (kvm_supports_sve) { | ||
522 | g_assert(vls != 0); | ||
523 | max_vq = 64 - __builtin_clzll(vls); | ||
524 | @@ -XXX,XX +XXX,XX @@ static void test_query_cpu_model_expansion_kvm(const void *data) | ||
525 | assert_has_not_feature(qts, "host", "aarch64"); | ||
526 | assert_has_not_feature(qts, "host", "pmu"); | ||
527 | assert_has_not_feature(qts, "host", "sve"); | ||
528 | + assert_has_not_feature(qts, "host", "kvm-steal-time"); | ||
529 | } | ||
530 | |||
531 | qtest_quit(qts); | ||
110 | -- | 532 | -- |
111 | 2.7.4 | 533 | 2.20.1 |
112 | 534 | ||
113 | 535 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | For v8M, exceptions from Secure to Non-Secure state will save | ||
2 | callee-saved registers to the exception frame as well as the | ||
3 | caller-saved registers. Add support for unstacking these | ||
4 | registers in exception exit when necessary. | ||
5 | 1 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
8 | Message-id: 1506092407-26985-12-git-send-email-peter.maydell@linaro.org | ||
9 | --- | ||
10 | target/arm/helper.c | 30 ++++++++++++++++++++++++++++++ | ||
11 | 1 file changed, 30 insertions(+) | ||
12 | |||
13 | diff --git a/target/arm/helper.c b/target/arm/helper.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/target/arm/helper.c | ||
16 | +++ b/target/arm/helper.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void do_v7m_exception_exit(ARMCPU *cpu) | ||
18 | "for destination state is UNPREDICTABLE\n"); | ||
19 | } | ||
20 | |||
21 | + /* Do we need to pop callee-saved registers? */ | ||
22 | + if (return_to_secure && | ||
23 | + ((excret & R_V7M_EXCRET_ES_MASK) == 0 || | ||
24 | + (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { | ||
25 | + uint32_t expected_sig = 0xfefa125b; | ||
26 | + uint32_t actual_sig = ldl_phys(cs->as, frameptr); | ||
27 | + | ||
28 | + if (expected_sig != actual_sig) { | ||
29 | + /* Take a SecureFault on the current stack */ | ||
30 | + env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; | ||
31 | + armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); | ||
32 | + v7m_exception_taken(cpu, excret); | ||
33 | + qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " | ||
34 | + "stackframe: failed exception return integrity " | ||
35 | + "signature check\n"); | ||
36 | + return; | ||
37 | + } | ||
38 | + | ||
39 | + env->regs[4] = ldl_phys(cs->as, frameptr + 0x8); | ||
40 | + env->regs[5] = ldl_phys(cs->as, frameptr + 0xc); | ||
41 | + env->regs[6] = ldl_phys(cs->as, frameptr + 0x10); | ||
42 | + env->regs[7] = ldl_phys(cs->as, frameptr + 0x14); | ||
43 | + env->regs[8] = ldl_phys(cs->as, frameptr + 0x18); | ||
44 | + env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c); | ||
45 | + env->regs[10] = ldl_phys(cs->as, frameptr + 0x20); | ||
46 | + env->regs[11] = ldl_phys(cs->as, frameptr + 0x24); | ||
47 | + | ||
48 | + frameptr += 0x28; | ||
49 | + } | ||
50 | + | ||
51 | /* Pop registers. TODO: make these accesses use the correct | ||
52 | * attributes and address space (S/NS, priv/unpriv) and handle | ||
53 | * memory transaction failures. | ||
54 | -- | ||
55 | 2.7.4 | ||
56 | |||
57 | diff view generated by jsdifflib |
1 | Implement the register interface for the SAU: SAU_CTRL, | 1 | QEMU supports a 48-bit physical address range, but we don't currently |
---|---|---|---|
2 | SAU_TYPE, SAU_RNR, SAU_RBAR and SAU_RLAR. None of the | 2 | expose it in the '-cpu max' ID registers (you get the same range as |
3 | actual behaviour is implemented here; registers just | 3 | Cortex-A57, which is 44 bits). |
4 | read back as written. | ||
5 | 4 | ||
6 | When the CPU definition for Cortex-M33 is eventually | 5 | Set the ID_AA64MMFR0.PARange field to indicate 48 bits. |
7 | added, its initfn will set cpu->sau_sregion, in the same | ||
8 | way that we currently set cpu->pmsav7_dregion for the | ||
9 | M3 and M4. | ||
10 | |||
11 | Number of SAU regions is typically a configurable | ||
12 | CPU parameter, but this patch doesn't provide a | ||
13 | QEMU CPU property for it. We can easily add one when | ||
14 | we have a board that requires it. | ||
15 | 6 | ||
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
18 | Message-id: 1506092407-26985-14-git-send-email-peter.maydell@linaro.org | 9 | Message-id: 20201001160116.18095-1-peter.maydell@linaro.org |
19 | --- | 10 | --- |
20 | target/arm/cpu.h | 10 +++++ | 11 | target/arm/cpu64.c | 4 ++++ |
21 | hw/intc/armv7m_nvic.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++ | 12 | 1 file changed, 4 insertions(+) |
22 | target/arm/cpu.c | 27 ++++++++++++ | ||
23 | target/arm/machine.c | 14 ++++++ | ||
24 | 4 files changed, 167 insertions(+) | ||
25 | 13 | ||
26 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | 14 | diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c |
27 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/target/arm/cpu.h | 16 | --- a/target/arm/cpu64.c |
29 | +++ b/target/arm/cpu.h | 17 | +++ b/target/arm/cpu64.c |
30 | @@ -XXX,XX +XXX,XX @@ typedef struct CPUARMState { | 18 | @@ -XXX,XX +XXX,XX @@ static void aarch64_max_initfn(Object *obj) |
31 | uint32_t mair1[M_REG_NUM_BANKS]; | 19 | t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2); |
32 | } pmsav8; | 20 | cpu->isar.id_aa64pfr1 = t; |
33 | 21 | ||
34 | + /* v8M SAU */ | 22 | + t = cpu->isar.id_aa64mmfr0; |
35 | + struct { | 23 | + t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */ |
36 | + uint32_t *rbar; | 24 | + cpu->isar.id_aa64mmfr0 = t; |
37 | + uint32_t *rlar; | ||
38 | + uint32_t rnr; | ||
39 | + uint32_t ctrl; | ||
40 | + } sau; | ||
41 | + | 25 | + |
42 | void *nvic; | 26 | t = cpu->isar.id_aa64mmfr1; |
43 | const struct arm_boot_info *boot_info; | 27 | t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ |
44 | /* Store GICv3CPUState to access from this struct */ | 28 | t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); |
45 | @@ -XXX,XX +XXX,XX @@ struct ARMCPU { | ||
46 | bool has_mpu; | ||
47 | /* PMSAv7 MPU number of supported regions */ | ||
48 | uint32_t pmsav7_dregion; | ||
49 | + /* v8M SAU number of supported regions */ | ||
50 | + uint32_t sau_sregion; | ||
51 | |||
52 | /* PSCI conduit used to invoke PSCI methods | ||
53 | * 0 - disabled, 1 - smc, 2 - hvc | ||
54 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/hw/intc/armv7m_nvic.c | ||
57 | +++ b/hw/intc/armv7m_nvic.c | ||
58 | @@ -XXX,XX +XXX,XX @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | ||
59 | goto bad_offset; | ||
60 | } | ||
61 | return cpu->env.pmsav8.mair1[attrs.secure]; | ||
62 | + case 0xdd0: /* SAU_CTRL */ | ||
63 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
64 | + goto bad_offset; | ||
65 | + } | ||
66 | + if (!attrs.secure) { | ||
67 | + return 0; | ||
68 | + } | ||
69 | + return cpu->env.sau.ctrl; | ||
70 | + case 0xdd4: /* SAU_TYPE */ | ||
71 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
72 | + goto bad_offset; | ||
73 | + } | ||
74 | + if (!attrs.secure) { | ||
75 | + return 0; | ||
76 | + } | ||
77 | + return cpu->sau_sregion; | ||
78 | + case 0xdd8: /* SAU_RNR */ | ||
79 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
80 | + goto bad_offset; | ||
81 | + } | ||
82 | + if (!attrs.secure) { | ||
83 | + return 0; | ||
84 | + } | ||
85 | + return cpu->env.sau.rnr; | ||
86 | + case 0xddc: /* SAU_RBAR */ | ||
87 | + { | ||
88 | + int region = cpu->env.sau.rnr; | ||
89 | + | ||
90 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
91 | + goto bad_offset; | ||
92 | + } | ||
93 | + if (!attrs.secure) { | ||
94 | + return 0; | ||
95 | + } | ||
96 | + if (region >= cpu->sau_sregion) { | ||
97 | + return 0; | ||
98 | + } | ||
99 | + return cpu->env.sau.rbar[region]; | ||
100 | + } | ||
101 | + case 0xde0: /* SAU_RLAR */ | ||
102 | + { | ||
103 | + int region = cpu->env.sau.rnr; | ||
104 | + | ||
105 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
106 | + goto bad_offset; | ||
107 | + } | ||
108 | + if (!attrs.secure) { | ||
109 | + return 0; | ||
110 | + } | ||
111 | + if (region >= cpu->sau_sregion) { | ||
112 | + return 0; | ||
113 | + } | ||
114 | + return cpu->env.sau.rlar[region]; | ||
115 | + } | ||
116 | case 0xde4: /* SFSR */ | ||
117 | if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
118 | goto bad_offset; | ||
119 | @@ -XXX,XX +XXX,XX @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value, | ||
120 | * only affect cacheability, and we don't implement caching. | ||
121 | */ | ||
122 | break; | ||
123 | + case 0xdd0: /* SAU_CTRL */ | ||
124 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
125 | + goto bad_offset; | ||
126 | + } | ||
127 | + if (!attrs.secure) { | ||
128 | + return; | ||
129 | + } | ||
130 | + cpu->env.sau.ctrl = value & 3; | ||
131 | + case 0xdd4: /* SAU_TYPE */ | ||
132 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
133 | + goto bad_offset; | ||
134 | + } | ||
135 | + break; | ||
136 | + case 0xdd8: /* SAU_RNR */ | ||
137 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
138 | + goto bad_offset; | ||
139 | + } | ||
140 | + if (!attrs.secure) { | ||
141 | + return; | ||
142 | + } | ||
143 | + if (value >= cpu->sau_sregion) { | ||
144 | + qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %" | ||
145 | + PRIu32 "/%" PRIu32 "\n", | ||
146 | + value, cpu->sau_sregion); | ||
147 | + } else { | ||
148 | + cpu->env.sau.rnr = value; | ||
149 | + } | ||
150 | + break; | ||
151 | + case 0xddc: /* SAU_RBAR */ | ||
152 | + { | ||
153 | + int region = cpu->env.sau.rnr; | ||
154 | + | ||
155 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
156 | + goto bad_offset; | ||
157 | + } | ||
158 | + if (!attrs.secure) { | ||
159 | + return; | ||
160 | + } | ||
161 | + if (region >= cpu->sau_sregion) { | ||
162 | + return; | ||
163 | + } | ||
164 | + cpu->env.sau.rbar[region] = value & ~0x1f; | ||
165 | + tlb_flush(CPU(cpu)); | ||
166 | + break; | ||
167 | + } | ||
168 | + case 0xde0: /* SAU_RLAR */ | ||
169 | + { | ||
170 | + int region = cpu->env.sau.rnr; | ||
171 | + | ||
172 | + if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
173 | + goto bad_offset; | ||
174 | + } | ||
175 | + if (!attrs.secure) { | ||
176 | + return; | ||
177 | + } | ||
178 | + if (region >= cpu->sau_sregion) { | ||
179 | + return; | ||
180 | + } | ||
181 | + cpu->env.sau.rlar[region] = value & ~0x1c; | ||
182 | + tlb_flush(CPU(cpu)); | ||
183 | + break; | ||
184 | + } | ||
185 | case 0xde4: /* SFSR */ | ||
186 | if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) { | ||
187 | goto bad_offset; | ||
188 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
189 | index XXXXXXX..XXXXXXX 100644 | ||
190 | --- a/target/arm/cpu.c | ||
191 | +++ b/target/arm/cpu.c | ||
192 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(CPUState *s) | ||
193 | env->pmsav8.mair1[M_REG_S] = 0; | ||
194 | } | ||
195 | |||
196 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
197 | + if (cpu->sau_sregion > 0) { | ||
198 | + memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion); | ||
199 | + memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion); | ||
200 | + } | ||
201 | + env->sau.rnr = 0; | ||
202 | + /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what | ||
203 | + * the Cortex-M33 does. | ||
204 | + */ | ||
205 | + env->sau.ctrl = 0; | ||
206 | + } | ||
207 | + | ||
208 | set_flush_to_zero(1, &env->vfp.standard_fp_status); | ||
209 | set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); | ||
210 | set_default_nan_mode(1, &env->vfp.standard_fp_status); | ||
211 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) | ||
212 | } | ||
213 | } | ||
214 | |||
215 | + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | ||
216 | + uint32_t nr = cpu->sau_sregion; | ||
217 | + | ||
218 | + if (nr > 0xff) { | ||
219 | + error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr); | ||
220 | + return; | ||
221 | + } | ||
222 | + | ||
223 | + if (nr) { | ||
224 | + env->sau.rbar = g_new0(uint32_t, nr); | ||
225 | + env->sau.rlar = g_new0(uint32_t, nr); | ||
226 | + } | ||
227 | + } | ||
228 | + | ||
229 | if (arm_feature(env, ARM_FEATURE_EL3)) { | ||
230 | set_feature(env, ARM_FEATURE_VBAR); | ||
231 | } | ||
232 | @@ -XXX,XX +XXX,XX @@ static void cortex_m4_initfn(Object *obj) | ||
233 | cpu->midr = 0x410fc240; /* r0p0 */ | ||
234 | cpu->pmsav7_dregion = 8; | ||
235 | } | ||
236 | + | ||
237 | static void arm_v7m_class_init(ObjectClass *oc, void *data) | ||
238 | { | ||
239 | CPUClass *cc = CPU_CLASS(oc); | ||
240 | diff --git a/target/arm/machine.c b/target/arm/machine.c | ||
241 | index XXXXXXX..XXXXXXX 100644 | ||
242 | --- a/target/arm/machine.c | ||
243 | +++ b/target/arm/machine.c | ||
244 | @@ -XXX,XX +XXX,XX @@ static bool s_rnr_vmstate_validate(void *opaque, int version_id) | ||
245 | return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion; | ||
246 | } | ||
247 | |||
248 | +static bool sau_rnr_vmstate_validate(void *opaque, int version_id) | ||
249 | +{ | ||
250 | + ARMCPU *cpu = opaque; | ||
251 | + | ||
252 | + return cpu->env.sau.rnr < cpu->sau_sregion; | ||
253 | +} | ||
254 | + | ||
255 | static bool m_security_needed(void *opaque) | ||
256 | { | ||
257 | ARMCPU *cpu = opaque; | ||
258 | @@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m_security = { | ||
259 | VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU), | ||
260 | VMSTATE_UINT32(env.v7m.sfsr, ARMCPU), | ||
261 | VMSTATE_UINT32(env.v7m.sfar, ARMCPU), | ||
262 | + VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0, | ||
263 | + vmstate_info_uint32, uint32_t), | ||
264 | + VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0, | ||
265 | + vmstate_info_uint32, uint32_t), | ||
266 | + VMSTATE_UINT32(env.sau.rnr, ARMCPU), | ||
267 | + VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate), | ||
268 | + VMSTATE_UINT32(env.sau.ctrl, ARMCPU), | ||
269 | VMSTATE_END_OF_LIST() | ||
270 | } | ||
271 | }; | ||
272 | -- | 29 | -- |
273 | 2.7.4 | 30 | 2.20.1 |
274 | 31 | ||
275 | 32 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | In cpu_mmu_index() we try to do this: | ||
2 | if (env->v7m.secure) { | ||
3 | mmu_idx += ARMMMUIdx_MSUser; | ||
4 | } | ||
5 | but it will give the wrong answer, because ARMMMUIdx_MSUser | ||
6 | includes the 0x40 ARM_MMU_IDX_M field, and so does the | ||
7 | mmu_idx we're adding to, and we'll end up with 0x8n rather | ||
8 | than 0x4n. This error is then nullified by the call to | ||
9 | arm_to_core_mmu_idx() which masks out the high part, but | ||
10 | we're about to factor out the code that calculates the | ||
11 | ARMMMUIdx values so it can be used without passing it through | ||
12 | arm_to_core_mmu_idx(), so fix this bug first. | ||
13 | 1 | ||
14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
16 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
17 | Message-id: 1506092407-26985-16-git-send-email-peter.maydell@linaro.org | ||
18 | --- | ||
19 | target/arm/cpu.h | 12 +++++++----- | ||
20 | 1 file changed, 7 insertions(+), 5 deletions(-) | ||
21 | |||
22 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/target/arm/cpu.h | ||
25 | +++ b/target/arm/cpu.h | ||
26 | @@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) | ||
27 | int el = arm_current_el(env); | ||
28 | |||
29 | if (arm_feature(env, ARM_FEATURE_M)) { | ||
30 | - ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv; | ||
31 | + ARMMMUIdx mmu_idx; | ||
32 | |||
33 | - if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | ||
34 | - mmu_idx = ARMMMUIdx_MNegPri; | ||
35 | + if (el == 0) { | ||
36 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser; | ||
37 | + } else { | ||
38 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv; | ||
39 | } | ||
40 | |||
41 | - if (env->v7m.secure) { | ||
42 | - mmu_idx += ARMMMUIdx_MSUser; | ||
43 | + if (armv7m_nvic_neg_prio_requested(env->nvic, env->v7m.secure)) { | ||
44 | + mmu_idx = env->v7m.secure ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri; | ||
45 | } | ||
46 | |||
47 | return arm_to_core_mmu_idx(mmu_idx); | ||
48 | -- | ||
49 | 2.7.4 | ||
50 | |||
51 | diff view generated by jsdifflib |