1 | Hi; this is a collection of mostly GIC related patches for rc3. | 1 | A last small test of bug fixes before rc1. |
---|---|---|---|
2 | The "Update cached state after LPI state changes" fix is important | ||
3 | and fixes what would otherwise be a regression since we enable the | ||
4 | ITS by default in the virt board now. The others are not regressions | ||
5 | but I think are OK for rc3 as they're fairly self contained (and two | ||
6 | of them are fixes to new-in-6.2 functionality). | ||
7 | 2 | ||
8 | thanks | 3 | thanks |
9 | -- PMM | 4 | -- PMM |
10 | 5 | ||
11 | The following changes since commit dd4b0de45965538f19bb40c7ddaaba384a8c613a: | 6 | The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637: |
12 | 7 | ||
13 | Fix version for v6.2.0-rc2 release (2021-11-26 11:58:54 +0100) | 8 | Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100) |
14 | 9 | ||
15 | are available in the Git repository at: | 10 | are available in the Git repository at: |
16 | 11 | ||
17 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20211129 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717 |
18 | 13 | ||
19 | for you to fetch changes up to 90feffad2aafe856ed2af75313b2c1669ba671e9: | 14 | for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4: |
20 | 15 | ||
21 | hw/intc/arm_gicv3: fix handling of LPIs in list registers (2021-11-29 10:10:21 +0000) | 16 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100) |
22 | 17 | ||
23 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
24 | target-arm queue: | 19 | target-arm queue: |
25 | * virt: Diagnose attempts to enable MTE or virt when using HVF accelerator | 20 | * hw/arm/sbsa-ref: set 'slots' property of xhci |
26 | * GICv3 ITS: Allow clearing of ITS CTLR Enabled bit | 21 | * linux-user: Remove pointless NULL check in clock_adjtime handling |
27 | * GICv3: Update cached state after LPI state changes | 22 | * ptw: Fix S1_ptw_translate() debug path |
28 | * GICv3: Fix handling of LPIs in list registers | 23 | * ptw: Account for FEAT_RME when applying {N}SW, SA bits |
24 | * accel/tcg: Zero-pad PC in TCG CPU exec trace lines | ||
25 | * hw/nvram: Avoid unnecessary Xilinx eFuse backstore write | ||
29 | 26 | ||
30 | ---------------------------------------------------------------- | 27 | ---------------------------------------------------------------- |
31 | Alexander Graf (1): | 28 | Peter Maydell (5): |
32 | hw/arm/virt: Extend nested and mte checks to hvf | 29 | linux-user: Remove pointless NULL check in clock_adjtime handling |
30 | target/arm/ptw.c: Add comments to S1Translate struct fields | ||
31 | target/arm: Fix S1_ptw_translate() debug path | ||
32 | target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits | ||
33 | accel/tcg: Zero-pad PC in TCG CPU exec trace lines | ||
33 | 34 | ||
34 | Peter Maydell (3): | 35 | Tong Ho (1): |
35 | hw/intc/arm_gicv3: Update cached state after LPI state changes | 36 | hw/nvram: Avoid unnecessary Xilinx eFuse backstore write |
36 | hw/intc/arm_gicv3: Add new gicv3_intid_is_special() function | ||
37 | hw/intc/arm_gicv3: fix handling of LPIs in list registers | ||
38 | 37 | ||
39 | Shashi Mallela (1): | 38 | Yuquan Wang (1): |
40 | hw/intc: cannot clear GICv3 ITS CTLR[Enabled] bit | 39 | hw/arm/sbsa-ref: set 'slots' property of xhci |
41 | 40 | ||
42 | hw/intc/gicv3_internal.h | 30 ++++++++++++++++++++++++++++++ | 41 | accel/tcg/cpu-exec.c | 4 +-- |
43 | hw/arm/virt.c | 15 +++++++++------ | 42 | accel/tcg/translate-all.c | 2 +- |
44 | hw/intc/arm_gicv3.c | 6 ++++-- | 43 | hw/arm/sbsa-ref.c | 1 + |
45 | hw/intc/arm_gicv3_cpuif.c | 9 ++++----- | 44 | hw/nvram/xlnx-efuse.c | 11 ++++-- |
46 | hw/intc/arm_gicv3_its.c | 7 ++++--- | 45 | linux-user/syscall.c | 12 +++---- |
47 | hw/intc/arm_gicv3_redist.c | 14 ++++++++++---- | 46 | target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------ |
48 | 6 files changed, 61 insertions(+), 20 deletions(-) | 47 | 6 files changed, 98 insertions(+), 22 deletions(-) |
49 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Yuquan Wang <wangyuquan1236@phytium.com.cn> | ||
1 | 2 | ||
3 | This extends the slots of xhci to 64, since the default xhci_sysbus | ||
4 | just supports one slot. | ||
5 | |||
6 | Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn> | ||
7 | Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> | ||
10 | Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> | ||
11 | Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | hw/arm/sbsa-ref.c | 1 + | ||
15 | 1 file changed, 1 insertion(+) | ||
16 | |||
17 | diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/hw/arm/sbsa-ref.c | ||
20 | +++ b/hw/arm/sbsa-ref.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms) | ||
22 | hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base; | ||
23 | int irq = sbsa_ref_irqmap[SBSA_XHCI]; | ||
24 | DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS); | ||
25 | + qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS); | ||
26 | |||
27 | sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); | ||
28 | sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base); | ||
29 | -- | ||
30 | 2.34.1 | diff view generated by jsdifflib |
1 | It is valid for an OS to put virtual interrupt ID values into the | 1 | In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to |
---|---|---|---|
2 | list registers ICH_LR<n> which are greater than 1023. This | 2 | the address of the local variable htx. This means it can never be |
3 | corresponds to (for example) KVM using the in-kernel emulated ITS to | 3 | NULL, but later in the code we check it for NULL anyway. Coverity |
4 | give a (nested) guest an ITS. LPIs are delivered by the L1 kernel to | 4 | complains about this (CID 1507683) because the NULL check comes after |
5 | the L2 guest via the list registers in the same way as non-LPI | 5 | a call to clock_adjtime() that assumes it is non-NULL. |
6 | interrupts. | ||
7 | 6 | ||
8 | QEMU's code for handling writes to ICV_IARn (which happen when the L2 | 7 | Since phtx is always &htx, and is used only in three places, it's not |
9 | guest acknowledges an interrupt) and to ICV_EOIRn (which happen at | 8 | really necessary. Remove it, bringing the code structure in to line |
10 | the end of the interrupt) did not consider LPIs, so it would | 9 | with that for TARGET_NR_clock_adjtime64, which already uses a simple |
11 | incorrectly treat interrupt IDs above 1023 as invalid. Fix this by | 10 | '&htx' when it wants a pointer to 'htx'. |
12 | using the correct condition, which is gicv3_intid_is_special(). | ||
13 | |||
14 | Note that the condition in icv_dir_write() is correct -- LPIs | ||
15 | are not valid there and so we want to ignore both "special" ID | ||
16 | values and LPIs. | ||
17 | |||
18 | (In the pseudocode this logic is in: | ||
19 | - VirtualReadIAR0(), VirtualReadIAR1(), which call IsSpecial() | ||
20 | - VirtualWriteEOIR0(), VirtualWriteEOIR1(), which call | ||
21 | VirtualIdentifierValid(data, TRUE) meaning "LPIs OK" | ||
22 | - VirtualWriteDIR(), which calls VirtualIdentifierValid(data, FALSE) | ||
23 | meaning "LPIs not OK") | ||
24 | |||
25 | This bug doesn't seem to have any visible effect on Linux L2 guests | ||
26 | most of the time, because the two bugs cancel each other out: we | ||
27 | neither mark the interrupt active nor deactivate it. However it does | ||
28 | mean that the L2 vCPU priority while the LPI handler is running will | ||
29 | not be correct, so the interrupt handler could be unexpectedly | ||
30 | interrupted by a different interrupt. | ||
31 | |||
32 | (NB: this has nothing to do with using QEMU's emulated ITS.) | ||
33 | 11 | ||
34 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
35 | Reviewed-by: Marc Zyngier <maz@kernel.org> | 13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
14 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
15 | Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org | ||
36 | --- | 16 | --- |
37 | hw/intc/arm_gicv3_cpuif.c | 5 ++--- | 17 | linux-user/syscall.c | 12 +++++------- |
38 | 1 file changed, 2 insertions(+), 3 deletions(-) | 18 | 1 file changed, 5 insertions(+), 7 deletions(-) |
39 | 19 | ||
40 | diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c | 20 | diff --git a/linux-user/syscall.c b/linux-user/syscall.c |
41 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
42 | --- a/hw/intc/arm_gicv3_cpuif.c | 22 | --- a/linux-user/syscall.c |
43 | +++ b/hw/intc/arm_gicv3_cpuif.c | 23 | +++ b/linux-user/syscall.c |
44 | @@ -XXX,XX +XXX,XX @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) | 24 | @@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, |
45 | 25 | #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME) | |
46 | if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { | 26 | case TARGET_NR_clock_adjtime: |
47 | intid = ich_lr_vintid(lr); | 27 | { |
48 | - if (intid < INTID_SECURE) { | 28 | - struct timex htx, *phtx = &htx; |
49 | + if (!gicv3_intid_is_special(intid)) { | 29 | + struct timex htx; |
50 | icv_activate_irq(cs, idx, grp); | 30 | |
51 | } else { | 31 | - if (target_to_host_timex(phtx, arg2) != 0) { |
52 | /* Interrupt goes from Pending to Invalid */ | 32 | + if (target_to_host_timex(&htx, arg2) != 0) { |
53 | @@ -XXX,XX +XXX,XX @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, | 33 | return -TARGET_EFAULT; |
54 | trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, | 34 | } |
55 | gicv3_redist_affid(cs), value); | 35 | - ret = get_errno(clock_adjtime(arg1, phtx)); |
56 | 36 | - if (!is_error(ret) && phtx) { | |
57 | - if (irq >= GICV3_MAXIRQ) { | 37 | - if (host_to_target_timex(arg2, phtx) != 0) { |
58 | - /* Also catches special interrupt numbers and LPIs */ | 38 | - return -TARGET_EFAULT; |
59 | + if (gicv3_intid_is_special(irq)) { | 39 | - } |
60 | return; | 40 | + ret = get_errno(clock_adjtime(arg1, &htx)); |
61 | } | 41 | + if (!is_error(ret) && host_to_target_timex(arg2, &htx)) { |
62 | 42 | + return -TARGET_EFAULT; | |
43 | } | ||
44 | } | ||
45 | return ret; | ||
63 | -- | 46 | -- |
64 | 2.25.1 | 47 | 2.34.1 |
65 | 48 | ||
66 | 49 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Add comments to the in_* fields in the S1Translate struct | ||
2 | that explain what they're doing. | ||
1 | 3 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org | ||
7 | --- | ||
8 | target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++ | ||
9 | 1 file changed, 40 insertions(+) | ||
10 | |||
11 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/target/arm/ptw.c | ||
14 | +++ b/target/arm/ptw.c | ||
15 | @@ -XXX,XX +XXX,XX @@ | ||
16 | #endif | ||
17 | |||
18 | typedef struct S1Translate { | ||
19 | + /* | ||
20 | + * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. | ||
21 | + * Together with in_space, specifies the architectural translation regime. | ||
22 | + */ | ||
23 | ARMMMUIdx in_mmu_idx; | ||
24 | + /* | ||
25 | + * in_ptw_idx: specifies which mmuidx to use for the actual | ||
26 | + * page table descriptor load operations. This will be one of the | ||
27 | + * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. | ||
28 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
29 | + * this field is updated accordingly. | ||
30 | + */ | ||
31 | ARMMMUIdx in_ptw_idx; | ||
32 | + /* | ||
33 | + * in_space: the security space for this walk. This plus | ||
34 | + * the in_mmu_idx specify the architectural translation regime. | ||
35 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
36 | + * this field is updated accordingly. | ||
37 | + * | ||
38 | + * Note that the security space for the in_ptw_idx may be different | ||
39 | + * from that for the in_mmu_idx. We do not need to explicitly track | ||
40 | + * the in_ptw_idx security space because: | ||
41 | + * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx | ||
42 | + * itself specifies the security space | ||
43 | + * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security | ||
44 | + * space used for ptw reads is the same as that of the security | ||
45 | + * space of the stage 1 translation for all cases except where | ||
46 | + * stage 1 is Secure; in that case the only possibilities for | ||
47 | + * the ptw read are Secure and NonSecure, and the in_ptw_idx | ||
48 | + * value being Stage2 vs Stage2_S distinguishes those. | ||
49 | + */ | ||
50 | ARMSecuritySpace in_space; | ||
51 | + /* | ||
52 | + * in_secure: whether the translation regime is a Secure one. | ||
53 | + * This is always equal to arm_space_is_secure(in_space). | ||
54 | + * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, | ||
55 | + * this field is updated accordingly. | ||
56 | + */ | ||
57 | bool in_secure; | ||
58 | + /* | ||
59 | + * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug | ||
60 | + * accesses will not update the guest page table access flags | ||
61 | + * and will not change the state of the softmmu TLBs. | ||
62 | + */ | ||
63 | bool in_debug; | ||
64 | /* | ||
65 | * If this is stage 2 of a stage 1+2 page table walk, then this must | ||
66 | -- | ||
67 | 2.34.1 | diff view generated by jsdifflib |
1 | The logic of gicv3_redist_update() is as follows: | 1 | In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate() |
---|---|---|---|
2 | * it must be called in any code path that changes the state of | 2 | so that the debug-access "call get_phys_addr_*" codepath is used both |
3 | (only) redistributor interrupts | 3 | when S1 is doing ptw reads from stage 2 and when it is doing ptw |
4 | * if it finds a redistributor interrupt that is (now) higher | 4 | reads from physical memory. However, we didn't update the |
5 | priority than the previous highest-priority pending interrupt, | 5 | calculation of s2ptw->in_space and s2ptw->in_secure to account for |
6 | then this must be the new highest-priority pending interrupt | 6 | the "ptw reads from physical memory" case. This meant that debug |
7 | * if it does *not* find a better redistributor interrupt, then: | 7 | accesses when in Secure state broke. |
8 | - if the previous state was "no interrupts pending" then | ||
9 | the new state is still "no interrupts pending" | ||
10 | - if the previous best interrupt was not a redistributor | ||
11 | interrupt then that remains the best interrupt | ||
12 | - if the previous best interrupt *was* a redistributor interrupt, | ||
13 | then the new best interrupt must be some non-redistributor | ||
14 | interrupt, but we don't know which so must do a full scan | ||
15 | 8 | ||
16 | In commit 17fb5e36aabd4b2c125 we effectively added the LPI interrupts | 9 | Create a new function S2_security_space() which returns the |
17 | as a kind of "redistributor interrupt" for this purpose, by adding | 10 | correct security space to use for the ptw load, and use it to |
18 | cs->hpplpi to the set of things that gicv3_redist_update() considers | 11 | determine the correct .in_secure and .in_space fields for the |
19 | before it gives up and decides to do a full scan of distributor | 12 | stage 2 lookup for the ptw load. |
20 | interrupts. However we didn't quite get this right: | ||
21 | * the condition check for "was the previous best interrupt a | ||
22 | redistributor interrupt" must be updated to include LPIs | ||
23 | in what it considers to be redistributor interrupts | ||
24 | * every code path which updates the LPI state which | ||
25 | gicv3_redist_update() checks must also call gicv3_redist_update(): | ||
26 | this is cs->hpplpi and the GICR_CTLR ENABLE_LPIS bit | ||
27 | 13 | ||
28 | This commit fixes this by: | 14 | Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org> |
29 | * correcting the test on cs->hppi.irq in gicv3_redist_update() | 15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
30 | * making gicv3_redist_update_lpi() always call gicv3_redist_update() | 16 | Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org> |
31 | * introducing a new gicv3_redist_update_lpi_only() for the one | 17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
32 | callsite (the post-load hook) which must not call | 18 | Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org |
33 | gicv3_redist_update() | 19 | Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate") |
34 | * making gicv3_redist_lpi_pending() always call gicv3_redist_update(), | 20 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
35 | either directly or via gicv3_redist_update_lpi() | 21 | --- |
36 | * removing a couple of now-unnecessary calls to gicv3_redist_update() | 22 | target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++----- |
37 | from some callers of those two functions | 23 | 1 file changed, 32 insertions(+), 5 deletions(-) |
38 | * calling gicv3_redist_update() when the GICR_CTLR ENABLE_LPIS | ||
39 | bit is cleared | ||
40 | 24 | ||
41 | (This means that the not-file-local gicv3_redist_* LPI related | 25 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
42 | functions now all take care of the updates of internally cached | ||
43 | GICv3 information, in the same way the older functions | ||
44 | gicv3_redist_set_irq() and gicv3_redist_send_sgi() do.) | ||
45 | |||
46 | The visible effect of this bug was that when the guest acknowledged | ||
47 | an LPI by reading ICC_IAR1_EL1, we marked it as not pending in the | ||
48 | LPI data structure but still left it in cs->hppi so we would offer it | ||
49 | to the guest again. In particular for setups using an emulated GICv3 | ||
50 | and ITS and using devices which use LPIs (ie PCI devices) a Linux | ||
51 | guest would complain "irq 54: nobody cared" and then hang. (The hang | ||
52 | was intermittent, presumably depending on the timing between | ||
53 | different interrupts arriving and being completed.) | ||
54 | |||
55 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
56 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
57 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
58 | Message-id: 20211124202005.989935-1-peter.maydell@linaro.org | ||
59 | --- | ||
60 | hw/intc/gicv3_internal.h | 17 +++++++++++++++++ | ||
61 | hw/intc/arm_gicv3.c | 6 ++++-- | ||
62 | hw/intc/arm_gicv3_redist.c | 14 ++++++++++---- | ||
63 | 3 files changed, 31 insertions(+), 6 deletions(-) | ||
64 | |||
65 | diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h | ||
66 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
67 | --- a/hw/intc/gicv3_internal.h | 27 | --- a/target/arm/ptw.c |
68 | +++ b/hw/intc/gicv3_internal.h | 28 | +++ b/target/arm/ptw.c |
69 | @@ -XXX,XX +XXX,XX @@ void gicv3_dist_set_irq(GICv3State *s, int irq, int level); | 29 | @@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) |
70 | void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level); | ||
71 | void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level); | ||
72 | void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level); | ||
73 | +/** | ||
74 | + * gicv3_redist_update_lpi: | ||
75 | + * @cs: GICv3CPUState | ||
76 | + * | ||
77 | + * Scan the LPI pending table and recalculate the highest priority | ||
78 | + * pending LPI and also the overall highest priority pending interrupt. | ||
79 | + */ | ||
80 | void gicv3_redist_update_lpi(GICv3CPUState *cs); | ||
81 | +/** | ||
82 | + * gicv3_redist_update_lpi_only: | ||
83 | + * @cs: GICv3CPUState | ||
84 | + * | ||
85 | + * Scan the LPI pending table and recalculate cs->hpplpi only, | ||
86 | + * without calling gicv3_redist_update() to recalculate the overall | ||
87 | + * highest priority pending interrupt. This should be called after | ||
88 | + * an incoming migration has loaded new state. | ||
89 | + */ | ||
90 | +void gicv3_redist_update_lpi_only(GICv3CPUState *cs); | ||
91 | void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); | ||
92 | void gicv3_init_cpuif(GICv3State *s); | ||
93 | |||
94 | diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | ||
96 | --- a/hw/intc/arm_gicv3.c | ||
97 | +++ b/hw/intc/arm_gicv3.c | ||
98 | @@ -XXX,XX +XXX,XX @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) | ||
99 | * interrupt has reduced in priority and any other interrupt could | ||
100 | * now be the new best one). | ||
101 | */ | ||
102 | - if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) { | ||
103 | + if (!seenbetter && cs->hppi.prio != 0xff && | ||
104 | + (cs->hppi.irq < GIC_INTERNAL || | ||
105 | + cs->hppi.irq >= GICV3_LPI_INTID_START)) { | ||
106 | gicv3_full_update_noirqset(cs->gic); | ||
107 | } | 30 | } |
108 | } | 31 | } |
109 | @@ -XXX,XX +XXX,XX @@ static void arm_gicv3_post_load(GICv3State *s) | 32 | |
110 | * pending interrupt, but don't set IRQ or FIQ lines. | 33 | +static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, |
111 | */ | 34 | + ARMMMUIdx s2_mmu_idx) |
112 | for (i = 0; i < s->num_cpu; i++) { | ||
113 | - gicv3_redist_update_lpi(&s->cpu[i]); | ||
114 | + gicv3_redist_update_lpi_only(&s->cpu[i]); | ||
115 | } | ||
116 | gicv3_full_update_noirqset(s); | ||
117 | /* Repopulate the cache of GICv3CPUState pointers for target CPUs */ | ||
118 | diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/hw/intc/arm_gicv3_redist.c | ||
121 | +++ b/hw/intc/arm_gicv3_redist.c | ||
122 | @@ -XXX,XX +XXX,XX @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, | ||
123 | cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS; | ||
124 | /* Check for any pending interr in pending table */ | ||
125 | gicv3_redist_update_lpi(cs); | ||
126 | - gicv3_redist_update(cs); | ||
127 | } else { | ||
128 | cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS; | ||
129 | + /* cs->hppi might have been an LPI; recalculate */ | ||
130 | + gicv3_redist_update(cs); | ||
131 | } | ||
132 | } | ||
133 | return MEMTX_OK; | ||
134 | @@ -XXX,XX +XXX,XX @@ static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq) | ||
135 | } | ||
136 | } | ||
137 | |||
138 | -void gicv3_redist_update_lpi(GICv3CPUState *cs) | ||
139 | +void gicv3_redist_update_lpi_only(GICv3CPUState *cs) | ||
140 | { | ||
141 | /* | ||
142 | * This function scans the LPI pending table and for each pending | ||
143 | @@ -XXX,XX +XXX,XX @@ void gicv3_redist_update_lpi(GICv3CPUState *cs) | ||
144 | } | ||
145 | } | ||
146 | |||
147 | +void gicv3_redist_update_lpi(GICv3CPUState *cs) | ||
148 | +{ | 35 | +{ |
149 | + gicv3_redist_update_lpi_only(cs); | 36 | + /* |
150 | + gicv3_redist_update(cs); | 37 | + * Return the security space to use for stage 2 when doing |
38 | + * the S1 page table descriptor load. | ||
39 | + */ | ||
40 | + if (regime_is_stage2(s2_mmu_idx)) { | ||
41 | + /* | ||
42 | + * The security space for ptw reads is almost always the same | ||
43 | + * as that of the security space of the stage 1 translation. | ||
44 | + * The only exception is when stage 1 is Secure; in that case | ||
45 | + * the ptw read might be to the Secure or the NonSecure space | ||
46 | + * (but never Realm or Root), and the s2_mmu_idx tells us which. | ||
47 | + * Root translations are always single-stage. | ||
48 | + */ | ||
49 | + if (s1_space == ARMSS_Secure) { | ||
50 | + return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); | ||
51 | + } else { | ||
52 | + assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); | ||
53 | + assert(s1_space != ARMSS_Root); | ||
54 | + return s1_space; | ||
55 | + } | ||
56 | + } else { | ||
57 | + /* ptw loads are from phys: the mmu idx itself says which space */ | ||
58 | + return arm_phys_to_space(s2_mmu_idx); | ||
59 | + } | ||
151 | +} | 60 | +} |
152 | + | 61 | + |
153 | void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) | 62 | /* Translate a S1 pagetable walk through S2 if needed. */ |
63 | static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, | ||
64 | hwaddr addr, ARMMMUFaultInfo *fi) | ||
154 | { | 65 | { |
155 | /* | 66 | - ARMSecuritySpace space = ptw->in_space; |
156 | @@ -XXX,XX +XXX,XX @@ void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) | 67 | bool is_secure = ptw->in_secure; |
157 | */ | 68 | ARMMMUIdx mmu_idx = ptw->in_mmu_idx; |
158 | if (level) { | 69 | ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; |
159 | gicv3_redist_check_lpi_priority(cs, irq); | 70 | @@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, |
160 | + gicv3_redist_update(cs); | 71 | * From gdbstub, do not use softmmu so that we don't modify the |
161 | } else { | 72 | * state of the cpu at all, including softmmu tlb contents. |
162 | if (irq == cs->hpplpi.irq) { | 73 | */ |
163 | gicv3_redist_update_lpi(cs); | 74 | + ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); |
164 | @@ -XXX,XX +XXX,XX @@ void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level) | 75 | S1Translate s2ptw = { |
165 | 76 | .in_mmu_idx = s2_mmu_idx, | |
166 | /* set/clear the pending bit for this irq */ | 77 | .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), |
167 | gicv3_redist_lpi_pending(cs, irq, level); | 78 | - .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S, |
168 | - | 79 | - .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure |
169 | - gicv3_redist_update(cs); | 80 | - : space == ARMSS_Realm ? ARMSS_Realm |
170 | } | 81 | - : ARMSS_NonSecure), |
171 | 82 | + .in_secure = arm_space_is_secure(s2_space), | |
172 | void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level) | 83 | + .in_space = s2_space, |
84 | .in_debug = true, | ||
85 | }; | ||
86 | GetPhysAddrResult s2 = { }; | ||
173 | -- | 87 | -- |
174 | 2.25.1 | 88 | 2.34.1 |
175 | |||
176 | diff view generated by jsdifflib |
1 | The GICv3/v4 pseudocode has a function IsSpecial() which returns true | 1 | In get_phys_addr_twostage() the code that applies the effects of |
---|---|---|---|
2 | if passed a "special" interrupt ID number (anything between 1020 and | 2 | VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure. |
3 | 1023 inclusive). We open-code this condition in a couple of places, | 3 | Now we also have f.attrs.space for FEAT_RME, we need to keep the two |
4 | so abstract it out into a new function gicv3_intid_is_special(). | 4 | in sync. |
5 | |||
6 | These bits only have an effect for Secure space translations, not | ||
7 | for Root, so use the input in_space field to determine whether to | ||
8 | apply them rather than the input is_secure. This doesn't actually | ||
9 | make a difference because Root translations are never two-stage, | ||
10 | but it's a little clearer. | ||
5 | 11 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Marc Zyngier <maz@kernel.org> | 13 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 14 | Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org |
9 | --- | 15 | --- |
10 | hw/intc/gicv3_internal.h | 13 +++++++++++++ | 16 | target/arm/ptw.c | 13 ++++++++----- |
11 | hw/intc/arm_gicv3_cpuif.c | 4 ++-- | 17 | 1 file changed, 8 insertions(+), 5 deletions(-) |
12 | 2 files changed, 15 insertions(+), 2 deletions(-) | ||
13 | 18 | ||
14 | diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h | 19 | diff --git a/target/arm/ptw.c b/target/arm/ptw.c |
15 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/intc/gicv3_internal.h | 21 | --- a/target/arm/ptw.c |
17 | +++ b/hw/intc/gicv3_internal.h | 22 | +++ b/target/arm/ptw.c |
18 | @@ -XXX,XX +XXX,XX @@ FIELD(MAPC, RDBASE, 16, 32) | 23 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
19 | 24 | hwaddr ipa; | |
20 | /* Functions internal to the emulated GICv3 */ | 25 | int s1_prot, s1_lgpgsz; |
21 | 26 | bool is_secure = ptw->in_secure; | |
22 | +/** | 27 | + ARMSecuritySpace in_space = ptw->in_space; |
23 | + * gicv3_intid_is_special: | 28 | bool ret, ipa_secure; |
24 | + * @intid: interrupt ID | 29 | ARMCacheAttrs cacheattrs1; |
25 | + * | 30 | ARMSecuritySpace ipa_space; |
26 | + * Return true if @intid is a special interrupt ID (1020 to | 31 | @@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, |
27 | + * 1023 inclusive). This corresponds to the GIC spec pseudocode | 32 | * Check if IPA translates to secure or non-secure PA space. |
28 | + * IsSpecial() function. | 33 | * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. |
29 | + */ | 34 | */ |
30 | +static inline bool gicv3_intid_is_special(int intid) | 35 | - result->f.attrs.secure = |
31 | +{ | 36 | - (is_secure |
32 | + return intid >= INTID_SECURE && intid <= INTID_SPURIOUS; | 37 | - && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) |
33 | +} | 38 | - && (ipa_secure |
34 | + | 39 | - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); |
35 | /** | 40 | + if (in_space == ARMSS_Secure) { |
36 | * gicv3_redist_update: | 41 | + result->f.attrs.secure = |
37 | * @cs: GICv3CPUState for this redistributor | 42 | + !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) |
38 | diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c | 43 | + && (ipa_secure |
39 | index XXXXXXX..XXXXXXX 100644 | 44 | + || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); |
40 | --- a/hw/intc/arm_gicv3_cpuif.c | 45 | + result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); |
41 | +++ b/hw/intc/arm_gicv3_cpuif.c | 46 | + } |
42 | @@ -XXX,XX +XXX,XX @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) | 47 | |
43 | intid = icc_hppir0_value(cs, env); | 48 | return false; |
44 | } | 49 | } |
45 | |||
46 | - if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { | ||
47 | + if (!gicv3_intid_is_special(intid)) { | ||
48 | icc_activate_irq(cs, intid); | ||
49 | } | ||
50 | |||
51 | @@ -XXX,XX +XXX,XX @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) | ||
52 | intid = icc_hppir1_value(cs, env); | ||
53 | } | ||
54 | |||
55 | - if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) { | ||
56 | + if (!gicv3_intid_is_special(intid)) { | ||
57 | icc_activate_irq(cs, intid); | ||
58 | } | ||
59 | |||
60 | -- | 50 | -- |
61 | 2.25.1 | 51 | 2.34.1 |
62 | |||
63 | diff view generated by jsdifflib |
1 | From: Shashi Mallela <shashi.mallela@linaro.org> | 1 | In commit f0a08b0913befbd we changed the type of the PC from |
---|---|---|---|
2 | target_ulong to vaddr. In doing so we inadvertently dropped the | ||
3 | zero-padding on the PC in trace lines (the second item inside the [] | ||
4 | in these lines). They used to look like this on AArch64, for | ||
5 | instance: | ||
2 | 6 | ||
3 | When Enabled bit is cleared in GITS_CTLR,ITS feature continues | 7 | Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000] |
4 | to be enabled.This patch fixes the issue. | ||
5 | 8 | ||
6 | Signed-off-by: Shashi Mallela <shashi.mallela@linaro.org> | 9 | and now they look like this: |
7 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | 10 | Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000] |
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 11 | |
9 | Message-id: 20211124182246.67691-1-shashi.mallela@linaro.org | 12 | and if the PC happens to be somewhere low like 0x5000 |
13 | then the field is shown as /5000/. | ||
14 | |||
15 | This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier, | ||
16 | depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64 | ||
17 | with no width specifier. | ||
18 | |||
19 | Restore the zero-padding by adding an 016 width specifier to | ||
20 | this tracing and a couple of others that were similarly recently | ||
21 | changed to use VADDR_PRIx without a width specifier. | ||
22 | |||
23 | We can't unfortunately restore the "32-bit guests are padded to | ||
24 | 8 hex digits and 64-bit guests to 16 hex digits" behaviour so | ||
25 | easily. | ||
26 | |||
27 | Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr") | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 28 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
29 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
30 | Reviewed-by: Anton Johansson <anjo@rev.ng> | ||
31 | Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org | ||
11 | --- | 32 | --- |
12 | hw/intc/arm_gicv3_its.c | 7 ++++--- | 33 | accel/tcg/cpu-exec.c | 4 ++-- |
13 | 1 file changed, 4 insertions(+), 3 deletions(-) | 34 | accel/tcg/translate-all.c | 2 +- |
35 | 2 files changed, 3 insertions(+), 3 deletions(-) | ||
14 | 36 | ||
15 | diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c | 37 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c |
16 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/hw/intc/arm_gicv3_its.c | 39 | --- a/accel/tcg/cpu-exec.c |
18 | +++ b/hw/intc/arm_gicv3_its.c | 40 | +++ b/accel/tcg/cpu-exec.c |
19 | @@ -XXX,XX +XXX,XX @@ static bool its_writel(GICv3ITSState *s, hwaddr offset, | 41 | @@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu, |
20 | 42 | if (qemu_log_in_addr_range(pc)) { | |
21 | switch (offset) { | 43 | qemu_log_mask(CPU_LOG_EXEC, |
22 | case GITS_CTLR: | 44 | "Trace %d: %p [%08" PRIx64 |
23 | - s->ctlr |= (value & ~(s->ctlr)); | 45 | - "/%" VADDR_PRIx "/%08x/%08x] %s\n", |
24 | - | 46 | + "/%016" VADDR_PRIx "/%08x/%08x] %s\n", |
25 | - if (s->ctlr & ITS_CTLR_ENABLED) { | 47 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
26 | + if (value & R_GITS_CTLR_ENABLED_MASK) { | 48 | tb->flags, tb->cflags, lookup_symbol(pc)); |
27 | + s->ctlr |= ITS_CTLR_ENABLED; | 49 | |
28 | extract_table_params(s); | 50 | @@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) |
29 | extract_cmdq_params(s); | 51 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { |
30 | s->creadr = 0; | 52 | vaddr pc = log_pc(cpu, last_tb); |
31 | process_cmdq(s); | 53 | if (qemu_log_in_addr_range(pc)) { |
32 | + } else { | 54 | - qemu_log("Stopped execution of TB chain before %p [%" |
33 | + s->ctlr &= ~ITS_CTLR_ENABLED; | 55 | + qemu_log("Stopped execution of TB chain before %p [%016" |
56 | VADDR_PRIx "] %s\n", | ||
57 | last_tb->tc.ptr, pc, lookup_symbol(pc)); | ||
58 | } | ||
59 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/accel/tcg/translate-all.c | ||
62 | +++ b/accel/tcg/translate-all.c | ||
63 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
64 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | ||
65 | vaddr pc = log_pc(cpu, tb); | ||
66 | if (qemu_log_in_addr_range(pc)) { | ||
67 | - qemu_log("cpu_io_recompile: rewound execution of TB to %" | ||
68 | + qemu_log("cpu_io_recompile: rewound execution of TB to %016" | ||
69 | VADDR_PRIx "\n", pc); | ||
34 | } | 70 | } |
35 | break; | 71 | } |
36 | case GITS_CBASER: | ||
37 | -- | 72 | -- |
38 | 2.25.1 | 73 | 2.34.1 |
39 | 74 | ||
40 | 75 | diff view generated by jsdifflib |
1 | From: Alexander Graf <agraf@csgraf.de> | 1 | From: Tong Ho <tong.ho@amd.com> |
---|---|---|---|
2 | 2 | ||
3 | The virt machine has properties to enable MTE and Nested Virtualization | 3 | Add a check in the bit-set operation to write the backstore |
4 | support. However, its check to ensure the backing accel implementation | 4 | only if the affected bit is 0 before. |
5 | supports it today only looks for KVM and bails out if it finds it. | ||
6 | 5 | ||
7 | Extend the checks to HVF as well as it does not support either today. | 6 | With this in place, there will be no need for callers to |
8 | This will cause QEMU to print a useful error message rather than | 7 | do the checking in order to avoid unnecessary writes. |
9 | silently ignoring the attempt by the user to enable either MTE or | ||
10 | the Virtualization extensions. | ||
11 | 8 | ||
12 | Reported-by: saar amar <saaramar5@gmail.com> | 9 | Signed-off-by: Tong Ho <tong.ho@amd.com> |
13 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | 10 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> |
14 | Message-id: 20211123122859.22452-1-agraf@csgraf.de | 11 | Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com> |
15 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
17 | --- | 14 | --- |
18 | hw/arm/virt.c | 15 +++++++++------ | 15 | hw/nvram/xlnx-efuse.c | 11 +++++++++-- |
19 | 1 file changed, 9 insertions(+), 6 deletions(-) | 16 | 1 file changed, 9 insertions(+), 2 deletions(-) |
20 | 17 | ||
21 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | 18 | diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c |
22 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/hw/arm/virt.c | 20 | --- a/hw/nvram/xlnx-efuse.c |
24 | +++ b/hw/arm/virt.c | 21 | +++ b/hw/nvram/xlnx-efuse.c |
25 | @@ -XXX,XX +XXX,XX @@ | 22 | @@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) |
26 | #include "sysemu/runstate.h" | 23 | |
27 | #include "sysemu/tpm.h" | 24 | bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) |
28 | #include "sysemu/kvm.h" | 25 | { |
29 | +#include "sysemu/hvf.h" | 26 | + uint32_t set, *row; |
30 | #include "hw/loader.h" | 27 | + |
31 | #include "qapi/error.h" | 28 | if (efuse_ro_bits_find(s, bit)) { |
32 | #include "qemu/bitops.h" | 29 | g_autofree char *path = object_get_canonical_path(OBJECT(s)); |
33 | @@ -XXX,XX +XXX,XX @@ static void machvirt_init(MachineState *machine) | 30 | |
34 | exit(1); | 31 | @@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) |
32 | return false; | ||
35 | } | 33 | } |
36 | 34 | ||
37 | - if (vms->virt && kvm_enabled()) { | 35 | - s->fuse32[bit / 32] |= 1 << (bit % 32); |
38 | - error_report("mach-virt: KVM does not support providing " | 36 | - efuse_bdrv_sync(s, bit); |
39 | - "Virtualization extensions to the guest CPU"); | 37 | + /* Avoid back-end write unless there is a real update */ |
40 | + if (vms->virt && (kvm_enabled() || hvf_enabled())) { | 38 | + row = &s->fuse32[bit / 32]; |
41 | + error_report("mach-virt: %s does not support providing " | 39 | + set = 1 << (bit % 32); |
42 | + "Virtualization extensions to the guest CPU", | 40 | + if (!(set & *row)) { |
43 | + kvm_enabled() ? "KVM" : "HVF"); | 41 | + *row |= set; |
44 | exit(1); | 42 | + efuse_bdrv_sync(s, bit); |
45 | } | 43 | + } |
46 | 44 | return true; | |
47 | - if (vms->mte && kvm_enabled()) { | 45 | } |
48 | - error_report("mach-virt: KVM does not support providing " | ||
49 | - "MTE to the guest CPU"); | ||
50 | + if (vms->mte && (kvm_enabled() || hvf_enabled())) { | ||
51 | + error_report("mach-virt: %s does not support providing " | ||
52 | + "MTE to the guest CPU", | ||
53 | + kvm_enabled() ? "KVM" : "HVF"); | ||
54 | exit(1); | ||
55 | } | ||
56 | 46 | ||
57 | -- | 47 | -- |
58 | 2.25.1 | 48 | 2.34.1 |
59 | 49 | ||
60 | 50 | diff view generated by jsdifflib |