1 | Some more outstanding target-arm patches; nothing terribly | 1 | arm queue: big stuff here is my MVE codegen optimisation, |
---|---|---|---|
2 | exciting. Mostly they're mine; I'm trying to reduce the | 2 | and Alex's Apple Silicon hvf support. |
3 | number of patches I still have in flight, so I've picked | ||
4 | out some of the reviewed patches from a couple of sets I've | ||
5 | sent out and will resend v2 versions of those sets with the | ||
6 | remaining patches with fixes for issues noted in review once | ||
7 | this is in master. | ||
8 | 3 | ||
9 | thanks | ||
10 | -- PMM | 4 | -- PMM |
11 | 5 | ||
6 | The following changes since commit 7adb961995a3744f51396502b33ad04a56a317c3: | ||
12 | 7 | ||
13 | The following changes since commit adaec191bfb31e12d40af8ab1b869f5b40d61ee9: | 8 | Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-virtiofs-20210916' into staging (2021-09-19 18:53:29 +0100) |
14 | |||
15 | Merge remote-tracking branch 'remotes/ehabkost/tags/machine-next-pull-request' into staging (2018-08-20 09:48:03 +0100) | ||
16 | 9 | ||
17 | are available in the Git repository at: | 10 | are available in the Git repository at: |
18 | 11 | ||
19 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180820 | 12 | https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20210920 |
20 | 13 | ||
21 | for you to fetch changes up to b85fad1588e812566f897f747e38da345a7016d6: | 14 | for you to fetch changes up to 1dc5a60bfe406bc1122d68cbdefda38d23134b27: |
22 | 15 | ||
23 | hw/dma/pl080: Remove hw_error() if DMA is enabled (2018-08-20 11:24:33 +0100) | 16 | target/arm: Optimize MVE 1op-immediate insns (2021-09-20 14:18:01 +0100) |
24 | 17 | ||
25 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
26 | target-arm queue: | 19 | target-arm queue: |
27 | * Fix crash on conditional instruction in an IT block | 20 | * Optimize codegen for MVE when predication not active |
28 | * docs/generic-loader: mention U-Boot and Intel HEX executable formats | 21 | * hvf: Add Apple Silicon support |
29 | * hw/intc/arm_gicv3_its: downgrade error_report to warn_report in kvm_arm_its_reset | 22 | * hw/intc: Set GIC maintenance interrupt level to only 0 or 1 |
30 | * imx_serial: Generate interrupt on receive data ready if enabled | 23 | * Fix mishandling of MVE FPSCR.LTPSIZE reset for usermode emulator |
31 | * Fix various minor bugs in AArch32 Hyp related coprocessor registers | 24 | * elf2dmp: Fix coverity nits |
32 | * Permit accesses to ELR_Hyp from Hyp mode via MSR/MRS (banked) | ||
33 | * Implement AArch32 ERET instruction | ||
34 | * hw/arm/virt: Add virt-3.1 machine type | ||
35 | * sdhci: add i.MX SD Stable Clock bit | ||
36 | * Remove now-obsolete MMIO request_ptr APIs | ||
37 | * hw/timer/m48t59: Move away from old_mmio accessors | ||
38 | * hw/watchdog/cmsdk_apb_watchdog: Implement CMSDK APB watchdog module | ||
39 | * nvic: Expose NMI line | ||
40 | * hw/dma/pl080: cleanups and new features required for use in MPS boards | ||
41 | 25 | ||
42 | ---------------------------------------------------------------- | 26 | ---------------------------------------------------------------- |
43 | Andrew Jones (1): | 27 | Alexander Graf (7): |
44 | hw/arm/virt: Add virt-3.1 machine type | 28 | arm: Move PMC register definitions to internals.h |
29 | hvf: Add execute to dirty log permission bitmap | ||
30 | hvf: Introduce hvf_arch_init() callback | ||
31 | hvf: Add Apple Silicon support | ||
32 | hvf: arm: Implement PSCI handling | ||
33 | arm: Add Hypervisor.framework build target | ||
34 | hvf: arm: Add rudimentary PMC support | ||
45 | 35 | ||
46 | Hans-Erik Floryd (2): | 36 | Peter Collingbourne (1): |
47 | imx_serial: Generate interrupt on receive data ready if enabled | 37 | arm/hvf: Add a WFI handler |
48 | sdhci: add i.MX SD Stable Clock bit | ||
49 | 38 | ||
50 | Jia He (1): | 39 | Peter Maydell (18): |
51 | hw/intc/arm_gicv3_its: downgrade error_report to warn_report in kvm_arm_its_reset | 40 | elf2dmp: Check curl_easy_setopt() return value |
41 | elf2dmp: Fail cleanly if PDB file specifies zero block_size | ||
42 | target/arm: Don't skip M-profile reset entirely in user mode | ||
43 | target/arm: Always clear exclusive monitor on reset | ||
44 | target/arm: Consolidate ifdef blocks in reset | ||
45 | hvf: arm: Implement -cpu host | ||
46 | target/arm: Avoid goto_tb if we're trying to exit to the main loop | ||
47 | target/arm: Enforce that FPDSCR.LTPSIZE is 4 on inbound migration | ||
48 | target/arm: Add TB flag for "MVE insns not predicated" | ||
49 | target/arm: Optimize MVE logic ops | ||
50 | target/arm: Optimize MVE arithmetic ops | ||
51 | target/arm: Optimize MVE VNEG, VABS | ||
52 | target/arm: Optimize MVE VDUP | ||
53 | target/arm: Optimize MVE VMVN | ||
54 | target/arm: Optimize MVE VSHL, VSHR immediate forms | ||
55 | target/arm: Optimize MVE VSHLL and VMOVL | ||
56 | target/arm: Optimize MVE VSLI and VSRI | ||
57 | target/arm: Optimize MVE 1op-immediate insns | ||
52 | 58 | ||
53 | Peter Maydell (19): | 59 | Shashi Mallela (1): |
54 | target/arm: Correct typo in HAMAIR1 regdef name | 60 | hw/intc: Set GIC maintenance interrupt level to only 0 or 1 |
55 | target/arm: Add missing .cp = 15 to HMAIR1 and HAMAIR1 regdefs | ||
56 | target/arm: Implement AArch32 HVBAR | ||
57 | target/arm: Implement AArch32 Hyp FARs | ||
58 | target/arm: Implement ESR_EL2/HSR for AArch32 and no-EL2 | ||
59 | target/arm: Permit accesses to ELR_Hyp from Hyp mode via MSR/MRS (banked) | ||
60 | target/arm: Implement AArch32 ERET instruction | ||
61 | hw/ssi/xilinx_spips: Remove unneeded MMIO request_ptr code | ||
62 | memory: Remove MMIO request_ptr APIs | ||
63 | hw/misc: Remove mmio_interface device | ||
64 | hw/timer/m48t59: Move away from old_mmio accessors | ||
65 | hw/watchdog/cmsdk_apb_watchdog: Implement CMSDK APB watchdog module | ||
66 | nvic: Expose NMI line | ||
67 | hw/dma/pl080: Allow use as embedded-struct device | ||
68 | hw/dma/pl080: Support all three interrupt lines | ||
69 | hw/dma/pl080: Don't use CPU address space for DMA accesses | ||
70 | hw/dma/pl080: Provide device reset function | ||
71 | hw/dma/pl080: Correct bug in register address decode logic | ||
72 | hw/dma/pl080: Remove hw_error() if DMA is enabled | ||
73 | 61 | ||
74 | Roman Kapl (1): | 62 | meson.build | 8 + |
75 | target/arm: Fix crash on conditional instruction in an IT block | 63 | include/sysemu/hvf_int.h | 12 +- |
64 | target/arm/cpu.h | 6 +- | ||
65 | target/arm/hvf_arm.h | 18 + | ||
66 | target/arm/internals.h | 44 ++ | ||
67 | target/arm/kvm_arm.h | 2 - | ||
68 | target/arm/translate.h | 2 + | ||
69 | accel/hvf/hvf-accel-ops.c | 21 +- | ||
70 | contrib/elf2dmp/download.c | 22 +- | ||
71 | contrib/elf2dmp/pdb.c | 4 + | ||
72 | hw/intc/arm_gicv3_cpuif.c | 5 +- | ||
73 | target/arm/cpu.c | 56 +- | ||
74 | target/arm/helper.c | 77 ++- | ||
75 | target/arm/hvf/hvf.c | 1278 +++++++++++++++++++++++++++++++++++++++++ | ||
76 | target/arm/machine.c | 13 + | ||
77 | target/arm/translate-m-nocp.c | 8 +- | ||
78 | target/arm/translate-mve.c | 310 +++++++--- | ||
79 | target/arm/translate-vfp.c | 33 +- | ||
80 | target/arm/translate.c | 42 +- | ||
81 | target/i386/hvf/hvf.c | 10 + | ||
82 | MAINTAINERS | 5 + | ||
83 | target/arm/hvf/meson.build | 3 + | ||
84 | target/arm/hvf/trace-events | 11 + | ||
85 | target/arm/meson.build | 2 + | ||
86 | 24 files changed, 1824 insertions(+), 168 deletions(-) | ||
87 | create mode 100644 target/arm/hvf_arm.h | ||
88 | create mode 100644 target/arm/hvf/hvf.c | ||
89 | create mode 100644 target/arm/hvf/meson.build | ||
90 | create mode 100644 target/arm/hvf/trace-events | ||
76 | 91 | ||
77 | Stefan Hajnoczi (1): | ||
78 | docs/generic-loader: mention U-Boot and Intel HEX executable formats | ||
79 | |||
80 | docs/generic-loader.txt | 20 +- | ||
81 | Makefile.objs | 1 + | ||
82 | hw/misc/Makefile.objs | 1 - | ||
83 | hw/watchdog/Makefile.objs | 1 + | ||
84 | hw/sd/sdhci-internal.h | 2 + | ||
85 | include/exec/memory.h | 35 ---- | ||
86 | include/hw/char/imx_serial.h | 1 + | ||
87 | include/hw/dma/pl080.h | 71 +++++++ | ||
88 | include/hw/misc/mmio_interface.h | 49 ----- | ||
89 | include/hw/watchdog/cmsdk-apb-watchdog.h | 59 ++++++ | ||
90 | hw/arm/armv7m.c | 1 + | ||
91 | hw/arm/realview.c | 8 +- | ||
92 | hw/arm/versatilepb.c | 9 +- | ||
93 | hw/arm/virt.c | 23 ++- | ||
94 | hw/char/imx_serial.c | 3 +- | ||
95 | hw/dma/pl080.c | 113 ++++++----- | ||
96 | hw/intc/arm_gicv3_its_kvm.c | 2 +- | ||
97 | hw/intc/armv7m_nvic.c | 19 ++ | ||
98 | hw/misc/mmio_interface.c | 135 ------------- | ||
99 | hw/sd/sdhci.c | 8 + | ||
100 | hw/ssi/xilinx_spips.c | 46 ----- | ||
101 | hw/timer/m48t59.c | 59 ++---- | ||
102 | hw/watchdog/cmsdk-apb-watchdog.c | 326 +++++++++++++++++++++++++++++++ | ||
103 | memory.c | 110 ----------- | ||
104 | target/arm/helper.c | 36 +++- | ||
105 | target/arm/op_helper.c | 22 +-- | ||
106 | target/arm/translate.c | 76 +++++-- | ||
107 | MAINTAINERS | 3 + | ||
108 | default-configs/arm-softmmu.mak | 1 + | ||
109 | hw/intc/trace-events | 1 + | ||
110 | hw/watchdog/trace-events | 6 + | ||
111 | 31 files changed, 717 insertions(+), 530 deletions(-) | ||
112 | create mode 100644 include/hw/dma/pl080.h | ||
113 | delete mode 100644 include/hw/misc/mmio_interface.h | ||
114 | create mode 100644 include/hw/watchdog/cmsdk-apb-watchdog.h | ||
115 | delete mode 100644 hw/misc/mmio_interface.c | ||
116 | create mode 100644 hw/watchdog/cmsdk-apb-watchdog.c | ||
117 | create mode 100644 hw/watchdog/trace-events | ||
118 | diff view generated by jsdifflib |
1 | From: Hans-Erik Floryd <hans-erik.floryd@rt-labs.com> | 1 | Coverity points out that we aren't checking the return value |
---|---|---|---|
2 | from curl_easy_setopt(). | ||
2 | 3 | ||
3 | Add the ESDHC PRSSTAT_SDSTB bit, using the value of SDHC_CLOCK_INT_STABLE. | 4 | Fixes: Coverity CID 1458895 |
4 | Freescale recommends checking this bit when changing clock frequency. | 5 | Inspired-by: Peter Maydell <peter.maydell@linaro.org> |
5 | 6 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> | |
6 | Signed-off-by: Hans-Erik Floryd <hans-erik.floryd@rt-labs.com> | 7 | Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu> |
7 | Message-id: 1534507843-4251-1-git-send-email-hans-erik.floryd@rt-labs.com | 8 | Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu> |
8 | [PMM: fixed indentation] | 9 | Message-id: 20210910170656.366592-2-philmd@redhat.com |
9 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | --- | 11 | --- |
12 | hw/sd/sdhci-internal.h | 2 ++ | 12 | contrib/elf2dmp/download.c | 22 ++++++++++------------ |
13 | hw/sd/sdhci.c | 8 ++++++++ | 13 | 1 file changed, 10 insertions(+), 12 deletions(-) |
14 | 2 files changed, 10 insertions(+) | ||
15 | 14 | ||
16 | diff --git a/hw/sd/sdhci-internal.h b/hw/sd/sdhci-internal.h | 15 | diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c |
17 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/hw/sd/sdhci-internal.h | 17 | --- a/contrib/elf2dmp/download.c |
19 | +++ b/hw/sd/sdhci-internal.h | 18 | +++ b/contrib/elf2dmp/download.c |
20 | @@ -XXX,XX +XXX,XX @@ extern const VMStateDescription sdhci_vmstate; | 19 | @@ -XXX,XX +XXX,XX @@ int download_url(const char *name, const char *url) |
21 | #define ESDHC_CTRL_4BITBUS (0x1 << 1) | 20 | goto out_curl; |
22 | #define ESDHC_CTRL_8BITBUS (0x2 << 1) | 21 | } |
23 | 22 | ||
24 | +#define ESDHC_PRNSTS_SDSTB (1 << 3) | 23 | - curl_easy_setopt(curl, CURLOPT_URL, url); |
25 | + | 24 | - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL); |
26 | #endif | 25 | - curl_easy_setopt(curl, CURLOPT_WRITEDATA, file); |
27 | diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c | 26 | - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); |
28 | index XXXXXXX..XXXXXXX 100644 | 27 | - curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0); |
29 | --- a/hw/sd/sdhci.c | 28 | - |
30 | +++ b/hw/sd/sdhci.c | 29 | - if (curl_easy_perform(curl) != CURLE_OK) { |
31 | @@ -XXX,XX +XXX,XX @@ static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size) | 30 | - err = 1; |
32 | 31 | - fclose(file); | |
33 | break; | 32 | + if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK |
34 | 33 | + || curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK | |
35 | + case SDHC_PRNSTS: | 34 | + || curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK |
36 | + /* Add SDSTB (SD Clock Stable) bit to PRNSTS */ | 35 | + || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK |
37 | + ret = sdhci_read(opaque, offset, size) & ~ESDHC_PRNSTS_SDSTB; | 36 | + || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK |
38 | + if (s->clkcon & SDHC_CLOCK_INT_STABLE) { | 37 | + || curl_easy_perform(curl) != CURLE_OK) { |
39 | + ret |= ESDHC_PRNSTS_SDSTB; | 38 | unlink(name); |
40 | + } | 39 | - goto out_curl; |
41 | + break; | 40 | + fclose(file); |
42 | + | 41 | + err = 1; |
43 | case ESDHC_DLL_CTRL: | 42 | + } else { |
44 | case ESDHC_TUNE_CTRL_STATUS: | 43 | + err = fclose(file); |
45 | case ESDHC_UNDOCUMENTED_REG27: | 44 | } |
45 | |||
46 | - err = fclose(file); | ||
47 | - | ||
48 | out_curl: | ||
49 | curl_easy_cleanup(curl); | ||
50 | |||
46 | -- | 51 | -- |
47 | 2.18.0 | 52 | 2.20.1 |
48 | 53 | ||
49 | 54 | diff view generated by jsdifflib |
1 | The MSR (banked) and MRS (banked) instructions allow accesses to ELR_Hyp | 1 | Coverity points out that if the PDB file we're trying to read |
---|---|---|---|
2 | from either Monitor or Hyp mode. Our translate time check | 2 | has a header specifying a block_size of zero then we will |
3 | was overly strict and only permitted access from Monitor mode. | 3 | end up trying to divide by zero in pdb_ds_read_file(). |
4 | Check for this and fail cleanly instead. | ||
4 | 5 | ||
5 | The runtime check we do in msr_mrs_banked_exc_checks() had the | 6 | Fixes: Coverity CID 1458869 |
6 | correct code in it, but never got there because of the earlier | 7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | "currmode == tgtmode" check. Special case ELR_Hyp. | 8 | Reviewed-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu> |
9 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
10 | Tested-by: Viktor Prutyanov <viktor.prutyanov@phystech.edu> | ||
11 | Message-id: 20210910170656.366592-3-philmd@redhat.com | ||
12 | Message-Id: <20210901143910.17112-3-peter.maydell@linaro.org> | ||
13 | Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
14 | --- | ||
15 | contrib/elf2dmp/pdb.c | 4 ++++ | ||
16 | 1 file changed, 4 insertions(+) | ||
8 | 17 | ||
9 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 18 | diff --git a/contrib/elf2dmp/pdb.c b/contrib/elf2dmp/pdb.c |
10 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | ||
11 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | ||
12 | Message-id: 20180814124254.5229-9-peter.maydell@linaro.org | ||
13 | --- | ||
14 | target/arm/op_helper.c | 22 +++++++++++----------- | ||
15 | target/arm/translate.c | 10 +++++++--- | ||
16 | 2 files changed, 18 insertions(+), 14 deletions(-) | ||
17 | |||
18 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/op_helper.c | 20 | --- a/contrib/elf2dmp/pdb.c |
21 | +++ b/target/arm/op_helper.c | 21 | +++ b/contrib/elf2dmp/pdb.c |
22 | @@ -XXX,XX +XXX,XX @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, | 22 | @@ -XXX,XX +XXX,XX @@ out_symbols: |
23 | */ | 23 | |
24 | int curmode = env->uncached_cpsr & CPSR_M; | 24 | static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) |
25 | 25 | { | |
26 | + if (regno == 17) { | 26 | + if (hdr->block_size == 0) { |
27 | + /* ELR_Hyp: a special case because access from tgtmode is OK */ | 27 | + return 1; |
28 | + if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { | ||
29 | + goto undef; | ||
30 | + } | ||
31 | + return; | ||
32 | + } | 28 | + } |
33 | + | 29 | + |
34 | if (curmode == tgtmode) { | 30 | memset(r->file_used, 0, sizeof(r->file_used)); |
35 | goto undef; | 31 | r->ds.header = hdr; |
36 | } | 32 | r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr + |
37 | @@ -XXX,XX +XXX,XX @@ static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, | ||
38 | } | ||
39 | |||
40 | if (tgtmode == ARM_CPU_MODE_HYP) { | ||
41 | - switch (regno) { | ||
42 | - case 17: /* ELR_Hyp */ | ||
43 | - if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { | ||
44 | - goto undef; | ||
45 | - } | ||
46 | - break; | ||
47 | - default: | ||
48 | - if (curmode != ARM_CPU_MODE_MON) { | ||
49 | - goto undef; | ||
50 | - } | ||
51 | - break; | ||
52 | + /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */ | ||
53 | + if (curmode != ARM_CPU_MODE_MON) { | ||
54 | + goto undef; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/target/arm/translate.c | ||
61 | +++ b/target/arm/translate.c | ||
62 | @@ -XXX,XX +XXX,XX @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, | ||
63 | } | ||
64 | break; | ||
65 | case ARM_CPU_MODE_HYP: | ||
66 | - /* Note that we can forbid accesses from EL2 here because they | ||
67 | - * must be from Hyp mode itself | ||
68 | + /* | ||
69 | + * SPSR_hyp and r13_hyp can only be accessed from Monitor mode | ||
70 | + * (and so we can forbid accesses from EL2 or below). elr_hyp | ||
71 | + * can be accessed also from Hyp mode, so forbid accesses from | ||
72 | + * EL0 or EL1. | ||
73 | */ | ||
74 | - if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) { | ||
75 | + if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 || | ||
76 | + (s->current_el < 3 && *regno != 17)) { | ||
77 | goto undef; | ||
78 | } | ||
79 | break; | ||
80 | -- | 33 | -- |
81 | 2.18.0 | 34 | 2.20.1 |
82 | 35 | ||
83 | 36 | diff view generated by jsdifflib |
1 | A bug in the handling of the register address decode logic | 1 | Currently all of the M-profile specific code in arm_cpu_reset() is |
---|---|---|---|
2 | for the PL08x meant that we were incorrectly treating | 2 | inside a !defined(CONFIG_USER_ONLY) ifdef block. This is |
3 | accesses to the DMA channel registers (DMACCxSrcAddr, | 3 | unintentional: it happened because originally the only |
4 | DMACCxDestaddr, DMACCxLLI, DMACCxControl, DMACCxConfiguration) | 4 | M-profile-specific handling was the setup of the initial SP and PC |
5 | as bad offsets. Fix this long-standing bug. | 5 | from the vector table, which is system-emulation only. But then we |
6 | added a lot of other M-profile setup to the same "if (ARM_FEATURE_M)" | ||
7 | code block without noticing that it was all inside a not-user-mode | ||
8 | ifdef. This has generally been harmless, but with the addition of | ||
9 | v8.1M low-overhead-loop support we ran into a problem: the reset of | ||
10 | FPSCR.LTPSIZE to 4 was only being done for system emulation mode, so | ||
11 | if a user-mode guest tried to execute the LE instruction it would | ||
12 | incorrectly take a UsageFault. | ||
6 | 13 | ||
7 | Fixes: https://bugs.launchpad.net/qemu/+bug/1637974 | 14 | Adjust the ifdefs so only the really system-emulation specific parts |
15 | are covered. Because this means we now run some reset code that sets | ||
16 | up initial values in the FPCCR and similar FPU related registers, | ||
17 | explicitly set up the registers controlling FPU context handling in | ||
18 | user-emulation mode so that the FPU works by design and not by | ||
19 | chance. | ||
20 | |||
21 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/613 | ||
22 | Cc: qemu-stable@nongnu.org | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 24 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
25 | Message-id: 20210914120725.24992-2-peter.maydell@linaro.org | ||
10 | --- | 26 | --- |
11 | hw/dma/pl080.c | 5 +++-- | 27 | target/arm/cpu.c | 19 +++++++++++++++++++ |
12 | 1 file changed, 3 insertions(+), 2 deletions(-) | 28 | 1 file changed, 19 insertions(+) |
13 | 29 | ||
14 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | 30 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
15 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/dma/pl080.c | 32 | --- a/target/arm/cpu.c |
17 | +++ b/hw/dma/pl080.c | 33 | +++ b/target/arm/cpu.c |
18 | @@ -XXX,XX +XXX,XX @@ static uint64_t pl080_read(void *opaque, hwaddr offset, | 34 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
19 | i = (offset & 0xe0) >> 5; | 35 | env->uncached_cpsr = ARM_CPU_MODE_SVC; |
20 | if (i >= s->nchannels) | 36 | } |
21 | goto bad_offset; | 37 | env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; |
22 | - switch (offset >> 2) { | 38 | +#endif |
23 | + switch ((offset >> 2) & 7) { | 39 | |
24 | case 0: /* SrcAddr */ | 40 | if (arm_feature(env, ARM_FEATURE_M)) { |
25 | return s->chan[i].src; | 41 | +#ifndef CONFIG_USER_ONLY |
26 | case 1: /* DestAddr */ | 42 | uint32_t initial_msp; /* Loaded from 0x0 */ |
27 | @@ -XXX,XX +XXX,XX @@ static void pl080_write(void *opaque, hwaddr offset, | 43 | uint32_t initial_pc; /* Loaded from 0x4 */ |
28 | i = (offset & 0xe0) >> 5; | 44 | uint8_t *rom; |
29 | if (i >= s->nchannels) | 45 | uint32_t vecbase; |
30 | goto bad_offset; | 46 | +#endif |
31 | - switch (offset >> 2) { | 47 | |
32 | + switch ((offset >> 2) & 7) { | 48 | if (cpu_isar_feature(aa32_lob, cpu)) { |
33 | case 0: /* SrcAddr */ | 49 | /* |
34 | s->chan[i].src = value; | 50 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
35 | break; | 51 | env->v7m.fpccr[M_REG_S] = R_V7M_FPCCR_ASPEN_MASK | |
36 | @@ -XXX,XX +XXX,XX @@ static void pl080_write(void *opaque, hwaddr offset, | 52 | R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK; |
37 | pl080_run(s); | ||
38 | break; | ||
39 | } | 53 | } |
40 | + return; | 54 | + |
55 | +#ifndef CONFIG_USER_ONLY | ||
56 | /* Unlike A/R profile, M profile defines the reset LR value */ | ||
57 | env->regs[14] = 0xffffffff; | ||
58 | |||
59 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) | ||
60 | env->regs[13] = initial_msp & 0xFFFFFFFC; | ||
61 | env->regs[15] = initial_pc & ~1; | ||
62 | env->thumb = initial_pc & 1; | ||
63 | +#else | ||
64 | + /* | ||
65 | + * For user mode we run non-secure and with access to the FPU. | ||
66 | + * The FPU context is active (ie does not need further setup) | ||
67 | + * and is owned by non-secure. | ||
68 | + */ | ||
69 | + env->v7m.secure = false; | ||
70 | + env->v7m.nsacr = 0xcff; | ||
71 | + env->v7m.cpacr[M_REG_NS] = 0xf0ffff; | ||
72 | + env->v7m.fpccr[M_REG_S] &= | ||
73 | + ~(R_V7M_FPCCR_LSPEN_MASK | R_V7M_FPCCR_S_MASK); | ||
74 | + env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK; | ||
75 | +#endif | ||
41 | } | 76 | } |
42 | switch (offset >> 2) { | 77 | |
43 | case 2: /* IntTCClear */ | 78 | +#ifndef CONFIG_USER_ONLY |
79 | /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently | ||
80 | * executing as AArch32 then check if highvecs are enabled and | ||
81 | * adjust the PC accordingly. | ||
44 | -- | 82 | -- |
45 | 2.18.0 | 83 | 2.20.1 |
46 | 84 | ||
47 | 85 | diff view generated by jsdifflib |
1 | The mmio_interface device was a purely internal artifact | 1 | There's no particular reason why the exclusive monitor should |
---|---|---|---|
2 | of the implementation of the memory subsystem's request_ptr | 2 | be only cleared on reset in system emulation mode. It doesn't |
3 | APIs. Now that we have removed those APIs, we can remove | 3 | hurt if it isn't cleared in user mode, but we might as well |
4 | the mmio_interface device too. | 4 | reduce the amount of code we have that's inside an ifdef. |
5 | 5 | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 7 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
8 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 8 | Message-id: 20210914120725.24992-3-peter.maydell@linaro.org |
9 | Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com> | ||
10 | Message-id: 20180817114619.22354-4-peter.maydell@linaro.org | ||
11 | --- | 9 | --- |
12 | hw/misc/Makefile.objs | 1 - | 10 | target/arm/cpu.c | 6 +++--- |
13 | include/hw/misc/mmio_interface.h | 49 ----------- | 11 | 1 file changed, 3 insertions(+), 3 deletions(-) |
14 | hw/misc/mmio_interface.c | 135 ------------------------------- | ||
15 | 3 files changed, 185 deletions(-) | ||
16 | delete mode 100644 include/hw/misc/mmio_interface.h | ||
17 | delete mode 100644 hw/misc/mmio_interface.c | ||
18 | 12 | ||
19 | diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs | 13 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
20 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/misc/Makefile.objs | 15 | --- a/target/arm/cpu.c |
22 | +++ b/hw/misc/Makefile.objs | 16 | +++ b/target/arm/cpu.c |
23 | @@ -XXX,XX +XXX,XX @@ obj-$(CONFIG_PVPANIC) += pvpanic.o | 17 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
24 | obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o | 18 | env->regs[15] = 0xFFFF0000; |
25 | obj-$(CONFIG_AUX) += auxbus.o | 19 | } |
26 | obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o | 20 | |
27 | -obj-y += mmio_interface.o | 21 | + env->vfp.xregs[ARM_VFP_FPEXC] = 0; |
28 | obj-$(CONFIG_MSF2) += msf2-sysreg.o | 22 | +#endif |
29 | diff --git a/include/hw/misc/mmio_interface.h b/include/hw/misc/mmio_interface.h | 23 | + |
30 | deleted file mode 100644 | 24 | /* M profile requires that reset clears the exclusive monitor; |
31 | index XXXXXXX..XXXXXXX | 25 | * A profile does not, but clearing it makes more sense than having it |
32 | --- a/include/hw/misc/mmio_interface.h | 26 | * set with an exclusive access on address zero. |
33 | +++ /dev/null | 27 | */ |
34 | @@ -XXX,XX +XXX,XX @@ | 28 | arm_clear_exclusive(env); |
35 | -/* | 29 | |
36 | - * mmio_interface.h | 30 | - env->vfp.xregs[ARM_VFP_FPEXC] = 0; |
37 | - * | ||
38 | - * Copyright (C) 2017 : GreenSocs | ||
39 | - * http://www.greensocs.com/ , email: info@greensocs.com | ||
40 | - * | ||
41 | - * Developed by : | ||
42 | - * Frederic Konrad <fred.konrad@greensocs.com> | ||
43 | - * | ||
44 | - * This program is free software; you can redistribute it and/or modify | ||
45 | - * it under the terms of the GNU General Public License as published by | ||
46 | - * the Free Software Foundation, either version 2 of the License, or | ||
47 | - * (at your option)any later version. | ||
48 | - * | ||
49 | - * This program is distributed in the hope that it will be useful, | ||
50 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
51 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
52 | - * GNU General Public License for more details. | ||
53 | - * | ||
54 | - * You should have received a copy of the GNU General Public License along | ||
55 | - * with this program; if not, see <http://www.gnu.org/licenses/>. | ||
56 | - * | ||
57 | - */ | ||
58 | - | ||
59 | -#ifndef MMIO_INTERFACE_H | ||
60 | -#define MMIO_INTERFACE_H | ||
61 | - | ||
62 | -#include "exec/memory.h" | ||
63 | - | ||
64 | -#define TYPE_MMIO_INTERFACE "mmio_interface" | ||
65 | -#define MMIO_INTERFACE(obj) OBJECT_CHECK(MMIOInterface, (obj), \ | ||
66 | - TYPE_MMIO_INTERFACE) | ||
67 | - | ||
68 | -typedef struct MMIOInterface { | ||
69 | - DeviceState parent_obj; | ||
70 | - | ||
71 | - MemoryRegion *subregion; | ||
72 | - MemoryRegion ram_mem; | ||
73 | - uint64_t start; | ||
74 | - uint64_t end; | ||
75 | - bool ro; | ||
76 | - uint64_t id; | ||
77 | - void *host_ptr; | ||
78 | -} MMIOInterface; | ||
79 | - | ||
80 | -void mmio_interface_map(MMIOInterface *s); | ||
81 | -void mmio_interface_unmap(MMIOInterface *s); | ||
82 | - | ||
83 | -#endif /* MMIO_INTERFACE_H */ | ||
84 | diff --git a/hw/misc/mmio_interface.c b/hw/misc/mmio_interface.c | ||
85 | deleted file mode 100644 | ||
86 | index XXXXXXX..XXXXXXX | ||
87 | --- a/hw/misc/mmio_interface.c | ||
88 | +++ /dev/null | ||
89 | @@ -XXX,XX +XXX,XX @@ | ||
90 | -/* | ||
91 | - * mmio_interface.c | ||
92 | - * | ||
93 | - * Copyright (C) 2017 : GreenSocs | ||
94 | - * http://www.greensocs.com/ , email: info@greensocs.com | ||
95 | - * | ||
96 | - * Developed by : | ||
97 | - * Frederic Konrad <fred.konrad@greensocs.com> | ||
98 | - * | ||
99 | - * This program is free software; you can redistribute it and/or modify | ||
100 | - * it under the terms of the GNU General Public License as published by | ||
101 | - * the Free Software Foundation, either version 2 of the License, or | ||
102 | - * (at your option)any later version. | ||
103 | - * | ||
104 | - * This program is distributed in the hope that it will be useful, | ||
105 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
106 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
107 | - * GNU General Public License for more details. | ||
108 | - * | ||
109 | - * You should have received a copy of the GNU General Public License along | ||
110 | - * with this program; if not, see <http://www.gnu.org/licenses/>. | ||
111 | - * | ||
112 | - */ | ||
113 | - | ||
114 | -#include "qemu/osdep.h" | ||
115 | -#include "qemu/log.h" | ||
116 | -#include "trace.h" | ||
117 | -#include "hw/qdev-properties.h" | ||
118 | -#include "hw/misc/mmio_interface.h" | ||
119 | -#include "qapi/error.h" | ||
120 | - | ||
121 | -#ifndef DEBUG_MMIO_INTERFACE | ||
122 | -#define DEBUG_MMIO_INTERFACE 0 | ||
123 | -#endif | 31 | -#endif |
124 | - | 32 | - |
125 | -static uint64_t mmio_interface_counter; | 33 | if (arm_feature(env, ARM_FEATURE_PMSA)) { |
126 | - | 34 | if (cpu->pmsav7_dregion > 0) { |
127 | -#define DPRINTF(fmt, ...) do { \ | 35 | if (arm_feature(env, ARM_FEATURE_V8)) { |
128 | - if (DEBUG_MMIO_INTERFACE) { \ | ||
129 | - qemu_log("mmio_interface: 0x%" PRIX64 ": " fmt, s->id, ## __VA_ARGS__);\ | ||
130 | - } \ | ||
131 | -} while (0) | ||
132 | - | ||
133 | -static void mmio_interface_init(Object *obj) | ||
134 | -{ | ||
135 | - MMIOInterface *s = MMIO_INTERFACE(obj); | ||
136 | - | ||
137 | - if (DEBUG_MMIO_INTERFACE) { | ||
138 | - s->id = mmio_interface_counter++; | ||
139 | - } | ||
140 | - | ||
141 | - DPRINTF("interface created\n"); | ||
142 | - s->host_ptr = 0; | ||
143 | - s->subregion = 0; | ||
144 | -} | ||
145 | - | ||
146 | -static void mmio_interface_realize(DeviceState *dev, Error **errp) | ||
147 | -{ | ||
148 | - MMIOInterface *s = MMIO_INTERFACE(dev); | ||
149 | - | ||
150 | - DPRINTF("realize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" | ||
151 | - " %p\n", s->start, s->end, s->host_ptr); | ||
152 | - | ||
153 | - if (!s->host_ptr) { | ||
154 | - error_setg(errp, "host_ptr property must be set"); | ||
155 | - return; | ||
156 | - } | ||
157 | - | ||
158 | - if (!s->subregion) { | ||
159 | - error_setg(errp, "subregion property must be set"); | ||
160 | - return; | ||
161 | - } | ||
162 | - | ||
163 | - memory_region_init_ram_ptr(&s->ram_mem, OBJECT(s), "ram", | ||
164 | - s->end - s->start + 1, s->host_ptr); | ||
165 | - memory_region_set_readonly(&s->ram_mem, s->ro); | ||
166 | - memory_region_add_subregion(s->subregion, s->start, &s->ram_mem); | ||
167 | -} | ||
168 | - | ||
169 | -static void mmio_interface_unrealize(DeviceState *dev, Error **errp) | ||
170 | -{ | ||
171 | - MMIOInterface *s = MMIO_INTERFACE(dev); | ||
172 | - | ||
173 | - DPRINTF("unrealize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" | ||
174 | - " %p\n", s->start, s->end, s->host_ptr); | ||
175 | - memory_region_del_subregion(s->subregion, &s->ram_mem); | ||
176 | -} | ||
177 | - | ||
178 | -static void mmio_interface_finalize(Object *obj) | ||
179 | -{ | ||
180 | - MMIOInterface *s = MMIO_INTERFACE(obj); | ||
181 | - | ||
182 | - DPRINTF("finalize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" | ||
183 | - " %p\n", s->start, s->end, s->host_ptr); | ||
184 | - object_unparent(OBJECT(&s->ram_mem)); | ||
185 | -} | ||
186 | - | ||
187 | -static Property mmio_interface_properties[] = { | ||
188 | - DEFINE_PROP_UINT64("start", MMIOInterface, start, 0), | ||
189 | - DEFINE_PROP_UINT64("end", MMIOInterface, end, 0), | ||
190 | - DEFINE_PROP_PTR("host_ptr", MMIOInterface, host_ptr), | ||
191 | - DEFINE_PROP_BOOL("ro", MMIOInterface, ro, false), | ||
192 | - DEFINE_PROP_MEMORY_REGION("subregion", MMIOInterface, subregion), | ||
193 | - DEFINE_PROP_END_OF_LIST(), | ||
194 | -}; | ||
195 | - | ||
196 | -static void mmio_interface_class_init(ObjectClass *oc, void *data) | ||
197 | -{ | ||
198 | - DeviceClass *dc = DEVICE_CLASS(oc); | ||
199 | - | ||
200 | - dc->realize = mmio_interface_realize; | ||
201 | - dc->unrealize = mmio_interface_unrealize; | ||
202 | - dc->props = mmio_interface_properties; | ||
203 | - /* Reason: pointer property "host_ptr", and this device | ||
204 | - * is an implementation detail of the memory subsystem, | ||
205 | - * not intended to be created directly by the user. | ||
206 | - */ | ||
207 | - dc->user_creatable = false; | ||
208 | -} | ||
209 | - | ||
210 | -static const TypeInfo mmio_interface_info = { | ||
211 | - .name = TYPE_MMIO_INTERFACE, | ||
212 | - .parent = TYPE_DEVICE, | ||
213 | - .instance_size = sizeof(MMIOInterface), | ||
214 | - .instance_init = mmio_interface_init, | ||
215 | - .instance_finalize = mmio_interface_finalize, | ||
216 | - .class_init = mmio_interface_class_init, | ||
217 | -}; | ||
218 | - | ||
219 | -static void mmio_interface_register_types(void) | ||
220 | -{ | ||
221 | - type_register_static(&mmio_interface_info); | ||
222 | -} | ||
223 | - | ||
224 | -type_init(mmio_interface_register_types) | ||
225 | -- | 36 | -- |
226 | 2.18.0 | 37 | 2.20.1 |
227 | 38 | ||
228 | 39 | diff view generated by jsdifflib |
1 | Remove the obsolete MMIO request_ptr APIs; they have no | 1 | Move an ifndef CONFIG_USER_ONLY code block up in arm_cpu_reset() so |
---|---|---|---|
2 | users now. | 2 | it can be merged with another earlier one. |
3 | 3 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 6 | Message-id: 20210914120725.24992-4-peter.maydell@linaro.org |
7 | Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com> | ||
8 | Message-id: 20180817114619.22354-3-peter.maydell@linaro.org | ||
9 | --- | 7 | --- |
10 | include/exec/memory.h | 35 -------------- | 8 | target/arm/cpu.c | 22 ++++++++++------------ |
11 | memory.c | 110 ------------------------------------------ | 9 | 1 file changed, 10 insertions(+), 12 deletions(-) |
12 | 2 files changed, 145 deletions(-) | ||
13 | 10 | ||
14 | diff --git a/include/exec/memory.h b/include/exec/memory.h | 11 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c |
15 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/include/exec/memory.h | 13 | --- a/target/arm/cpu.c |
17 | +++ b/include/exec/memory.h | 14 | +++ b/target/arm/cpu.c |
18 | @@ -XXX,XX +XXX,XX @@ struct MemoryRegionOps { | 15 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) |
19 | uint64_t data, | 16 | env->uncached_cpsr = ARM_CPU_MODE_SVC; |
20 | unsigned size, | 17 | } |
21 | MemTxAttrs attrs); | 18 | env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; |
22 | - /* Instruction execution pre-callback: | 19 | + |
23 | - * @addr is the address of the access relative to the @mr. | 20 | + /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently |
24 | - * @size is the size of the area returned by the callback. | 21 | + * executing as AArch32 then check if highvecs are enabled and |
25 | - * @offset is the location of the pointer inside @mr. | 22 | + * adjust the PC accordingly. |
26 | - * | 23 | + */ |
27 | - * Returns a pointer to a location which contains guest code. | 24 | + if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { |
25 | + env->regs[15] = 0xFFFF0000; | ||
26 | + } | ||
27 | + | ||
28 | + env->vfp.xregs[ARM_VFP_FPEXC] = 0; | ||
29 | #endif | ||
30 | |||
31 | if (arm_feature(env, ARM_FEATURE_M)) { | ||
32 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_reset(DeviceState *dev) | ||
33 | #endif | ||
34 | } | ||
35 | |||
36 | -#ifndef CONFIG_USER_ONLY | ||
37 | - /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently | ||
38 | - * executing as AArch32 then check if highvecs are enabled and | ||
39 | - * adjust the PC accordingly. | ||
28 | - */ | 40 | - */ |
29 | - void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size, | 41 | - if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { |
30 | - unsigned *offset); | 42 | - env->regs[15] = 0xFFFF0000; |
31 | |||
32 | enum device_endian endianness; | ||
33 | /* Guest-visible constraints: */ | ||
34 | @@ -XXX,XX +XXX,XX @@ void memory_global_dirty_log_stop(void); | ||
35 | void mtree_info(fprintf_function mon_printf, void *f, bool flatview, | ||
36 | bool dispatch_tree, bool owner); | ||
37 | |||
38 | -/** | ||
39 | - * memory_region_request_mmio_ptr: request a pointer to an mmio | ||
40 | - * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer. | ||
41 | - * When the device wants to invalidate the pointer it will call | ||
42 | - * memory_region_invalidate_mmio_ptr. | ||
43 | - * | ||
44 | - * @mr: #MemoryRegion to check | ||
45 | - * @addr: address within that region | ||
46 | - * | ||
47 | - * Returns true on success, false otherwise. | ||
48 | - */ | ||
49 | -bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr); | ||
50 | - | ||
51 | -/** | ||
52 | - * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio | ||
53 | - * previously requested. | ||
54 | - * In the end that means that if something wants to execute from this area it | ||
55 | - * will need to request the pointer again. | ||
56 | - * | ||
57 | - * @mr: #MemoryRegion associated to the pointer. | ||
58 | - * @offset: offset within the memory region | ||
59 | - * @size: size of that area. | ||
60 | - */ | ||
61 | -void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, | ||
62 | - unsigned size); | ||
63 | - | ||
64 | /** | ||
65 | * memory_region_dispatch_read: perform a read directly to the specified | ||
66 | * MemoryRegion. | ||
67 | diff --git a/memory.c b/memory.c | ||
68 | index XXXXXXX..XXXXXXX 100644 | ||
69 | --- a/memory.c | ||
70 | +++ b/memory.c | ||
71 | @@ -XXX,XX +XXX,XX @@ | ||
72 | #include "exec/ram_addr.h" | ||
73 | #include "sysemu/kvm.h" | ||
74 | #include "sysemu/sysemu.h" | ||
75 | -#include "hw/misc/mmio_interface.h" | ||
76 | #include "hw/qdev-properties.h" | ||
77 | #include "migration/vmstate.h" | ||
78 | |||
79 | @@ -XXX,XX +XXX,XX @@ void memory_listener_unregister(MemoryListener *listener) | ||
80 | listener->address_space = NULL; | ||
81 | } | ||
82 | |||
83 | -bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr) | ||
84 | -{ | ||
85 | - void *host; | ||
86 | - unsigned size = 0; | ||
87 | - unsigned offset = 0; | ||
88 | - Object *new_interface; | ||
89 | - | ||
90 | - if (!mr || !mr->ops->request_ptr) { | ||
91 | - return false; | ||
92 | - } | 43 | - } |
93 | - | 44 | - |
94 | - /* | 45 | - env->vfp.xregs[ARM_VFP_FPEXC] = 0; |
95 | - * Avoid an update if the request_ptr call | 46 | -#endif |
96 | - * memory_region_invalidate_mmio_ptr which seems to be likely when we use | ||
97 | - * a cache. | ||
98 | - */ | ||
99 | - memory_region_transaction_begin(); | ||
100 | - | 47 | - |
101 | - host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset); | 48 | /* M profile requires that reset clears the exclusive monitor; |
102 | - | 49 | * A profile does not, but clearing it makes more sense than having it |
103 | - if (!host || !size) { | 50 | * set with an exclusive access on address zero. |
104 | - memory_region_transaction_commit(); | ||
105 | - return false; | ||
106 | - } | ||
107 | - | ||
108 | - new_interface = object_new("mmio_interface"); | ||
109 | - qdev_prop_set_uint64(DEVICE(new_interface), "start", offset); | ||
110 | - qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1); | ||
111 | - qdev_prop_set_bit(DEVICE(new_interface), "ro", true); | ||
112 | - qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host); | ||
113 | - qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr); | ||
114 | - object_property_set_bool(OBJECT(new_interface), true, "realized", NULL); | ||
115 | - | ||
116 | - memory_region_transaction_commit(); | ||
117 | - return true; | ||
118 | -} | ||
119 | - | ||
120 | -typedef struct MMIOPtrInvalidate { | ||
121 | - MemoryRegion *mr; | ||
122 | - hwaddr offset; | ||
123 | - unsigned size; | ||
124 | - int busy; | ||
125 | - int allocated; | ||
126 | -} MMIOPtrInvalidate; | ||
127 | - | ||
128 | -#define MAX_MMIO_INVALIDATE 10 | ||
129 | -static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE]; | ||
130 | - | ||
131 | -static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu, | ||
132 | - run_on_cpu_data data) | ||
133 | -{ | ||
134 | - MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr; | ||
135 | - MemoryRegion *mr = invalidate_data->mr; | ||
136 | - hwaddr offset = invalidate_data->offset; | ||
137 | - unsigned size = invalidate_data->size; | ||
138 | - MemoryRegionSection section = memory_region_find(mr, offset, size); | ||
139 | - | ||
140 | - qemu_mutex_lock_iothread(); | ||
141 | - | ||
142 | - /* Reset dirty so this doesn't happen later. */ | ||
143 | - cpu_physical_memory_test_and_clear_dirty(offset, size, 1); | ||
144 | - | ||
145 | - if (section.mr != mr) { | ||
146 | - /* memory_region_find add a ref on section.mr */ | ||
147 | - memory_region_unref(section.mr); | ||
148 | - if (MMIO_INTERFACE(section.mr->owner)) { | ||
149 | - /* We found the interface just drop it. */ | ||
150 | - object_property_set_bool(section.mr->owner, false, "realized", | ||
151 | - NULL); | ||
152 | - object_unref(section.mr->owner); | ||
153 | - object_unparent(section.mr->owner); | ||
154 | - } | ||
155 | - } | ||
156 | - | ||
157 | - qemu_mutex_unlock_iothread(); | ||
158 | - | ||
159 | - if (invalidate_data->allocated) { | ||
160 | - g_free(invalidate_data); | ||
161 | - } else { | ||
162 | - invalidate_data->busy = 0; | ||
163 | - } | ||
164 | -} | ||
165 | - | ||
166 | -void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, | ||
167 | - unsigned size) | ||
168 | -{ | ||
169 | - size_t i; | ||
170 | - MMIOPtrInvalidate *invalidate_data = NULL; | ||
171 | - | ||
172 | - for (i = 0; i < MAX_MMIO_INVALIDATE; i++) { | ||
173 | - if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) { | ||
174 | - invalidate_data = &mmio_ptr_invalidate_list[i]; | ||
175 | - break; | ||
176 | - } | ||
177 | - } | ||
178 | - | ||
179 | - if (!invalidate_data) { | ||
180 | - invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate)); | ||
181 | - invalidate_data->allocated = 1; | ||
182 | - } | ||
183 | - | ||
184 | - invalidate_data->mr = mr; | ||
185 | - invalidate_data->offset = offset; | ||
186 | - invalidate_data->size = size; | ||
187 | - | ||
188 | - async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr, | ||
189 | - RUN_ON_CPU_HOST_PTR(invalidate_data)); | ||
190 | -} | ||
191 | - | ||
192 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) | ||
193 | { | ||
194 | memory_region_ref(root); | ||
195 | -- | 51 | -- |
196 | 2.18.0 | 52 | 2.20.1 |
197 | 53 | ||
198 | 54 | diff view generated by jsdifflib |
1 | From: Jia He <hejianet@gmail.com> | 1 | From: Shashi Mallela <shashi.mallela@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | In scripts/arch-run.bash of kvm-unit-tests, it will check the qemu | 3 | During sbsa acs level 3 testing, it is seen that the GIC maintenance |
4 | output log with: | 4 | interrupts are not triggered and the related test cases fail. This |
5 | if [ -z "$(echo "$errors" | grep -vi warning)" ]; then | 5 | is because we were incorrectly passing the value of the MISR register |
6 | (from maintenance_interrupt_state()) to qemu_set_irq() as the level | ||
7 | argument, whereas the device on the other end of this irq line | ||
8 | expects a 0/1 value. | ||
6 | 9 | ||
7 | Thus without the warning prefix, all of the test fail. | 10 | Fix the logic to pass a 0/1 level indication, rather than a |
11 | 0/not-0 value. | ||
8 | 12 | ||
9 | Since it is not unrecoverable error in kvm_arm_its_reset for | 13 | Fixes: c5fc89b36c0 ("hw/intc/arm_gicv3: Implement gicv3_cpuif_virt_update()") |
10 | current implementation, downgrading the report from error to | 14 | Signed-off-by: Shashi Mallela <shashi.mallela@linaro.org> |
11 | warn makes sense. | 15 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
12 | 16 | Message-id: 20210915205809.59068-1-shashi.mallela@linaro.org | |
13 | Signed-off-by: Jia He <jia.he@hxt-semitech.com> | 17 | [PMM: tweaked commit message; collapsed nested if()s into one] |
14 | Message-id: 1531969910-32843-1-git-send-email-jia.he@hxt-semitech.com | ||
15 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 18 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
17 | --- | 20 | --- |
18 | hw/intc/arm_gicv3_its_kvm.c | 2 +- | 21 | hw/intc/arm_gicv3_cpuif.c | 5 +++-- |
19 | 1 file changed, 1 insertion(+), 1 deletion(-) | 22 | 1 file changed, 3 insertions(+), 2 deletions(-) |
20 | 23 | ||
21 | diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c | 24 | diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c |
22 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/hw/intc/arm_gicv3_its_kvm.c | 26 | --- a/hw/intc/arm_gicv3_cpuif.c |
24 | +++ b/hw/intc/arm_gicv3_its_kvm.c | 27 | +++ b/hw/intc/arm_gicv3_cpuif.c |
25 | @@ -XXX,XX +XXX,XX @@ static void kvm_arm_its_reset(DeviceState *dev) | 28 | @@ -XXX,XX +XXX,XX @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs) |
26 | return; | 29 | } |
27 | } | 30 | } |
28 | 31 | ||
29 | - error_report("ITS KVM: full reset is not supported by the host kernel"); | 32 | - if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) { |
30 | + warn_report("ITS KVM: full reset is not supported by the host kernel"); | 33 | - maintlevel = maintenance_interrupt_state(cs); |
31 | 34 | + if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && | |
32 | if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, | 35 | + maintenance_interrupt_state(cs) != 0) { |
33 | GITS_CTLR)) { | 36 | + maintlevel = 1; |
37 | } | ||
38 | |||
39 | trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, | ||
34 | -- | 40 | -- |
35 | 2.18.0 | 41 | 2.20.1 |
36 | 42 | ||
37 | 43 | diff view generated by jsdifflib |
1 | The AArch32 virtualization extensions support these fault address | 1 | From: Alexander Graf <agraf@csgraf.de> |
---|---|---|---|
2 | registers: | ||
3 | * HDFAR: aliased with AArch64 FAR_EL2[31:0] and AArch32 DFAR(S) | ||
4 | * HIFAR: aliased with AArch64 FAR_EL2[63:32] and AArch32 IFAR(S) | ||
5 | 2 | ||
6 | Implement the accessors for these. This fixes in passing a bug | 3 | We will need PMC register definitions in accel specific code later. |
7 | where we weren't implementing the "RES0 from EL3 if EL2 not | 4 | Move all constant definitions to common arm headers so we can reuse |
8 | implemented" behaviour for AArch64 FAR_EL2. | 5 | them. |
9 | 6 | ||
7 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
9 | Message-id: 20210916155404.86958-2-agraf@csgraf.de | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | ||
12 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | ||
13 | Message-id: 20180814124254.5229-7-peter.maydell@linaro.org | ||
14 | --- | 11 | --- |
15 | target/arm/helper.c | 14 +++++++++++++- | 12 | target/arm/internals.h | 44 ++++++++++++++++++++++++++++++++++++++++++ |
16 | 1 file changed, 13 insertions(+), 1 deletion(-) | 13 | target/arm/helper.c | 44 ------------------------------------------ |
14 | 2 files changed, 44 insertions(+), 44 deletions(-) | ||
17 | 15 | ||
16 | diff --git a/target/arm/internals.h b/target/arm/internals.h | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/target/arm/internals.h | ||
19 | +++ b/target/arm/internals.h | ||
20 | @@ -XXX,XX +XXX,XX @@ enum MVEECIState { | ||
21 | /* All other values reserved */ | ||
22 | }; | ||
23 | |||
24 | +/* Definitions for the PMU registers */ | ||
25 | +#define PMCRN_MASK 0xf800 | ||
26 | +#define PMCRN_SHIFT 11 | ||
27 | +#define PMCRLC 0x40 | ||
28 | +#define PMCRDP 0x20 | ||
29 | +#define PMCRX 0x10 | ||
30 | +#define PMCRD 0x8 | ||
31 | +#define PMCRC 0x4 | ||
32 | +#define PMCRP 0x2 | ||
33 | +#define PMCRE 0x1 | ||
34 | +/* | ||
35 | + * Mask of PMCR bits writeable by guest (not including WO bits like C, P, | ||
36 | + * which can be written as 1 to trigger behaviour but which stay RAZ). | ||
37 | + */ | ||
38 | +#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) | ||
39 | + | ||
40 | +#define PMXEVTYPER_P 0x80000000 | ||
41 | +#define PMXEVTYPER_U 0x40000000 | ||
42 | +#define PMXEVTYPER_NSK 0x20000000 | ||
43 | +#define PMXEVTYPER_NSU 0x10000000 | ||
44 | +#define PMXEVTYPER_NSH 0x08000000 | ||
45 | +#define PMXEVTYPER_M 0x04000000 | ||
46 | +#define PMXEVTYPER_MT 0x02000000 | ||
47 | +#define PMXEVTYPER_EVTCOUNT 0x0000ffff | ||
48 | +#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ | ||
49 | + PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ | ||
50 | + PMXEVTYPER_M | PMXEVTYPER_MT | \ | ||
51 | + PMXEVTYPER_EVTCOUNT) | ||
52 | + | ||
53 | +#define PMCCFILTR 0xf8000000 | ||
54 | +#define PMCCFILTR_M PMXEVTYPER_M | ||
55 | +#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) | ||
56 | + | ||
57 | +static inline uint32_t pmu_num_counters(CPUARMState *env) | ||
58 | +{ | ||
59 | + return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; | ||
60 | +} | ||
61 | + | ||
62 | +/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ | ||
63 | +static inline uint64_t pmu_counter_mask(CPUARMState *env) | ||
64 | +{ | ||
65 | + return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); | ||
66 | +} | ||
67 | + | ||
68 | #endif | ||
18 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 69 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
19 | index XXXXXXX..XXXXXXX 100644 | 70 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/target/arm/helper.c | 71 | --- a/target/arm/helper.c |
21 | +++ b/target/arm/helper.c | 72 | +++ b/target/arm/helper.c |
22 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { | 73 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = { |
23 | { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, | ||
24 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, | ||
25 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
26 | + { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, | ||
27 | + .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, | ||
28 | + .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | ||
29 | + { .name = "HIFAR", .state = ARM_CP_STATE_AA32, | ||
30 | + .type = ARM_CP_CONST, | ||
31 | + .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, | ||
32 | + .access = PL2_RW, .resetvalue = 0 }, | ||
33 | REGINFO_SENTINEL | 74 | REGINFO_SENTINEL |
34 | }; | 75 | }; |
35 | 76 | ||
36 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 77 | -/* Definitions for the PMU registers */ |
37 | { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64, | 78 | -#define PMCRN_MASK 0xf800 |
38 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, | 79 | -#define PMCRN_SHIFT 11 |
39 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, | 80 | -#define PMCRLC 0x40 |
40 | - { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64, | 81 | -#define PMCRDP 0x20 |
41 | + { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, | 82 | -#define PMCRX 0x10 |
42 | .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, | 83 | -#define PMCRD 0x8 |
43 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, | 84 | -#define PMCRC 0x4 |
44 | + { .name = "HIFAR", .state = ARM_CP_STATE_AA32, | 85 | -#define PMCRP 0x2 |
45 | + .type = ARM_CP_ALIAS, | 86 | -#define PMCRE 0x1 |
46 | + .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, | 87 | -/* |
47 | + .access = PL2_RW, | 88 | - * Mask of PMCR bits writeable by guest (not including WO bits like C, P, |
48 | + .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, | 89 | - * which can be written as 1 to trigger behaviour but which stay RAZ). |
49 | { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, | 90 | - */ |
50 | .type = ARM_CP_ALIAS, | 91 | -#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) |
51 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, | 92 | - |
93 | -#define PMXEVTYPER_P 0x80000000 | ||
94 | -#define PMXEVTYPER_U 0x40000000 | ||
95 | -#define PMXEVTYPER_NSK 0x20000000 | ||
96 | -#define PMXEVTYPER_NSU 0x10000000 | ||
97 | -#define PMXEVTYPER_NSH 0x08000000 | ||
98 | -#define PMXEVTYPER_M 0x04000000 | ||
99 | -#define PMXEVTYPER_MT 0x02000000 | ||
100 | -#define PMXEVTYPER_EVTCOUNT 0x0000ffff | ||
101 | -#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ | ||
102 | - PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ | ||
103 | - PMXEVTYPER_M | PMXEVTYPER_MT | \ | ||
104 | - PMXEVTYPER_EVTCOUNT) | ||
105 | - | ||
106 | -#define PMCCFILTR 0xf8000000 | ||
107 | -#define PMCCFILTR_M PMXEVTYPER_M | ||
108 | -#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) | ||
109 | - | ||
110 | -static inline uint32_t pmu_num_counters(CPUARMState *env) | ||
111 | -{ | ||
112 | - return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; | ||
113 | -} | ||
114 | - | ||
115 | -/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ | ||
116 | -static inline uint64_t pmu_counter_mask(CPUARMState *env) | ||
117 | -{ | ||
118 | - return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); | ||
119 | -} | ||
120 | - | ||
121 | typedef struct pm_event { | ||
122 | uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ | ||
123 | /* If the event is supported on this CPU (used to generate PMCEID[01]) */ | ||
52 | -- | 124 | -- |
53 | 2.18.0 | 125 | 2.20.1 |
54 | 126 | ||
55 | 127 | diff view generated by jsdifflib |
1 | From: Hans-Erik Floryd <hans-erik.floryd@rt-labs.com> | 1 | From: Alexander Graf <agraf@csgraf.de> |
---|---|---|---|
2 | 2 | ||
3 | Generate an interrupt if USR2_RDR and UCR4_DREN are both set. | 3 | Hvf's permission bitmap during and after dirty logging does not include |
4 | the HV_MEMORY_EXEC permission. At least on Apple Silicon, this leads to | ||
5 | instruction faults once dirty logging was enabled. | ||
4 | 6 | ||
5 | Signed-off-by: Hans-Erik Floryd <hans-erik.floryd@rt-labs.com> | 7 | Add the bit to make it work properly. |
6 | Message-id: 1534341354-11956-1-git-send-email-hans-erik.floryd@rt-labs.com | 8 | |
9 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
7 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Message-id: 20210916155404.86958-3-agraf@csgraf.de | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | --- | 13 | --- |
10 | include/hw/char/imx_serial.h | 1 + | 14 | accel/hvf/hvf-accel-ops.c | 4 ++-- |
11 | hw/char/imx_serial.c | 3 ++- | 15 | 1 file changed, 2 insertions(+), 2 deletions(-) |
12 | 2 files changed, 3 insertions(+), 1 deletion(-) | ||
13 | 16 | ||
14 | diff --git a/include/hw/char/imx_serial.h b/include/hw/char/imx_serial.h | 17 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/include/hw/char/imx_serial.h | 19 | --- a/accel/hvf/hvf-accel-ops.c |
17 | +++ b/include/hw/char/imx_serial.h | 20 | +++ b/accel/hvf/hvf-accel-ops.c |
18 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) |
19 | #define UCR2_RXEN (1<<1) /* Receiver enable */ | 22 | if (on) { |
20 | #define UCR2_SRST (1<<0) /* Reset complete */ | 23 | slot->flags |= HVF_SLOT_LOG; |
21 | 24 | hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, | |
22 | +#define UCR4_DREN BIT(0) /* Receive Data Ready interrupt enable */ | 25 | - HV_MEMORY_READ); |
23 | #define UCR4_TCEN BIT(3) /* TX complete interrupt enable */ | 26 | + HV_MEMORY_READ | HV_MEMORY_EXEC); |
24 | 27 | /* stop tracking region*/ | |
25 | #define UTS1_TXEMPTY (1<<6) | 28 | } else { |
26 | diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c | 29 | slot->flags &= ~HVF_SLOT_LOG; |
27 | index XXXXXXX..XXXXXXX 100644 | 30 | hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, |
28 | --- a/hw/char/imx_serial.c | 31 | - HV_MEMORY_READ | HV_MEMORY_WRITE); |
29 | +++ b/hw/char/imx_serial.c | 32 | + HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); |
30 | @@ -XXX,XX +XXX,XX @@ static void imx_update(IMXSerialState *s) | 33 | } |
31 | mask = (s->ucr1 & UCR1_TXMPTYEN) ? USR2_TXFE : 0; | 34 | } |
32 | /* | ||
33 | * TCEN and TXDC are both bit 3 | ||
34 | + * RDR and DREN are both bit 0 | ||
35 | */ | ||
36 | - mask |= s->ucr4 & UCR4_TCEN; | ||
37 | + mask |= s->ucr4 & (UCR4_TCEN | UCR4_DREN); | ||
38 | |||
39 | usr2 = s->usr2 & mask; | ||
40 | 35 | ||
41 | -- | 36 | -- |
42 | 2.18.0 | 37 | 2.20.1 |
43 | 38 | ||
44 | 39 | diff view generated by jsdifflib |
1 | From: Andrew Jones <drjones@redhat.com> | 1 | From: Alexander Graf <agraf@csgraf.de> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Andrew Jones <drjones@redhat.com> | 3 | We will need to install a migration helper for the ARM hvf backend. |
4 | Reviewed-by: Igor Mammedov <imammedo@redhat.com> | 4 | Let's introduce an arch callback for the overall hvf init chain to |
5 | do so. | ||
6 | |||
7 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
5 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 8 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Message-id: 20210916155404.86958-4-agraf@csgraf.de | ||
6 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
7 | --- | 11 | --- |
8 | hw/arm/virt.c | 23 +++++++++++++++++------ | 12 | include/sysemu/hvf_int.h | 1 + |
9 | 1 file changed, 17 insertions(+), 6 deletions(-) | 13 | accel/hvf/hvf-accel-ops.c | 3 ++- |
14 | target/i386/hvf/hvf.c | 5 +++++ | ||
15 | 3 files changed, 8 insertions(+), 1 deletion(-) | ||
10 | 16 | ||
11 | diff --git a/hw/arm/virt.c b/hw/arm/virt.c | 17 | diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h |
12 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/hw/arm/virt.c | 19 | --- a/include/sysemu/hvf_int.h |
14 | +++ b/hw/arm/virt.c | 20 | +++ b/include/sysemu/hvf_int.h |
15 | @@ -XXX,XX +XXX,XX @@ static void machvirt_machine_init(void) | 21 | @@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state { |
22 | }; | ||
23 | |||
24 | void assert_hvf_ok(hv_return_t ret); | ||
25 | +int hvf_arch_init(void); | ||
26 | int hvf_arch_init_vcpu(CPUState *cpu); | ||
27 | void hvf_arch_vcpu_destroy(CPUState *cpu); | ||
28 | int hvf_vcpu_exec(CPUState *); | ||
29 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/accel/hvf/hvf-accel-ops.c | ||
32 | +++ b/accel/hvf/hvf-accel-ops.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms) | ||
34 | |||
35 | hvf_state = s; | ||
36 | memory_listener_register(&hvf_memory_listener, &address_space_memory); | ||
37 | - return 0; | ||
38 | + | ||
39 | + return hvf_arch_init(); | ||
16 | } | 40 | } |
17 | type_init(machvirt_machine_init); | 41 | |
18 | 42 | static void hvf_accel_class_init(ObjectClass *oc, void *data) | |
19 | -#define VIRT_COMPAT_2_12 \ | 43 | diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c |
20 | - HW_COMPAT_2_12 | 44 | index XXXXXXX..XXXXXXX 100644 |
21 | - | 45 | --- a/target/i386/hvf/hvf.c |
22 | -static void virt_3_0_instance_init(Object *obj) | 46 | +++ b/target/i386/hvf/hvf.c |
23 | +static void virt_3_1_instance_init(Object *obj) | 47 | @@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env) |
24 | { | 48 | return env->apic_bus_freq != 0; |
25 | VirtMachineState *vms = VIRT_MACHINE(obj); | ||
26 | VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); | ||
27 | @@ -XXX,XX +XXX,XX @@ static void virt_3_0_instance_init(Object *obj) | ||
28 | vms->irqmap = a15irqmap; | ||
29 | } | 49 | } |
30 | 50 | ||
31 | -static void virt_machine_3_0_options(MachineClass *mc) | 51 | +int hvf_arch_init(void) |
32 | +static void virt_machine_3_1_options(MachineClass *mc) | ||
33 | { | ||
34 | } | ||
35 | -DEFINE_VIRT_MACHINE_AS_LATEST(3, 0) | ||
36 | +DEFINE_VIRT_MACHINE_AS_LATEST(3, 1) | ||
37 | + | ||
38 | +static void virt_3_0_instance_init(Object *obj) | ||
39 | +{ | 52 | +{ |
40 | + virt_3_1_instance_init(obj); | 53 | + return 0; |
41 | +} | 54 | +} |
42 | + | 55 | + |
43 | +static void virt_machine_3_0_options(MachineClass *mc) | 56 | int hvf_arch_init_vcpu(CPUState *cpu) |
44 | +{ | ||
45 | + virt_machine_3_1_options(mc); | ||
46 | +} | ||
47 | +DEFINE_VIRT_MACHINE(3, 0) | ||
48 | + | ||
49 | +#define VIRT_COMPAT_2_12 \ | ||
50 | + HW_COMPAT_2_12 | ||
51 | |||
52 | static void virt_2_12_instance_init(Object *obj) | ||
53 | { | 57 | { |
58 | X86CPU *x86cpu = X86_CPU(cpu); | ||
54 | -- | 59 | -- |
55 | 2.18.0 | 60 | 2.20.1 |
56 | 61 | ||
57 | 62 | diff view generated by jsdifflib |
1 | The Arm Cortex-M System Design Kit includes a simple watchdog module | 1 | From: Alexander Graf <agraf@csgraf.de> |
---|---|---|---|
2 | based on a 32-bit down-counter. Implement this. | ||
3 | 2 | ||
3 | With Apple Silicon available to the masses, it's a good time to add support | ||
4 | for driving its virtualization extensions from QEMU. | ||
5 | |||
6 | This patch adds all necessary architecture specific code to get basic VMs | ||
7 | working, including save/restore. | ||
8 | |||
9 | Known limitations: | ||
10 | |||
11 | - WFI handling is missing (follows in later patch) | ||
12 | - No watchpoint/breakpoint support | ||
13 | |||
14 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
15 | Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com> | ||
16 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
17 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
18 | Message-id: 20210916155404.86958-5-agraf@csgraf.de | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 19 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | --- | 20 | --- |
6 | Makefile.objs | 1 + | 21 | meson.build | 1 + |
7 | hw/watchdog/Makefile.objs | 1 + | 22 | include/sysemu/hvf_int.h | 10 +- |
8 | include/hw/watchdog/cmsdk-apb-watchdog.h | 59 ++++ | 23 | accel/hvf/hvf-accel-ops.c | 9 + |
9 | hw/watchdog/cmsdk-apb-watchdog.c | 326 +++++++++++++++++++++++ | 24 | target/arm/hvf/hvf.c | 794 ++++++++++++++++++++++++++++++++++++ |
10 | MAINTAINERS | 2 + | 25 | target/i386/hvf/hvf.c | 5 + |
11 | default-configs/arm-softmmu.mak | 1 + | 26 | MAINTAINERS | 5 + |
12 | hw/watchdog/trace-events | 6 + | 27 | target/arm/hvf/trace-events | 10 + |
13 | 7 files changed, 396 insertions(+) | 28 | 7 files changed, 833 insertions(+), 1 deletion(-) |
14 | create mode 100644 include/hw/watchdog/cmsdk-apb-watchdog.h | 29 | create mode 100644 target/arm/hvf/hvf.c |
15 | create mode 100644 hw/watchdog/cmsdk-apb-watchdog.c | 30 | create mode 100644 target/arm/hvf/trace-events |
16 | create mode 100644 hw/watchdog/trace-events | ||
17 | 31 | ||
18 | diff --git a/Makefile.objs b/Makefile.objs | 32 | diff --git a/meson.build b/meson.build |
19 | index XXXXXXX..XXXXXXX 100644 | 33 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/Makefile.objs | 34 | --- a/meson.build |
21 | +++ b/Makefile.objs | 35 | +++ b/meson.build |
22 | @@ -XXX,XX +XXX,XX @@ trace-events-subdirs += hw/tpm | 36 | @@ -XXX,XX +XXX,XX @@ if have_system or have_user |
23 | trace-events-subdirs += hw/usb | 37 | 'accel/tcg', |
24 | trace-events-subdirs += hw/vfio | 38 | 'hw/core', |
25 | trace-events-subdirs += hw/virtio | 39 | 'target/arm', |
26 | +trace-events-subdirs += hw/watchdog | 40 | + 'target/arm/hvf', |
27 | trace-events-subdirs += hw/xen | 41 | 'target/hppa', |
28 | trace-events-subdirs += io | 42 | 'target/i386', |
29 | trace-events-subdirs += linux-user | 43 | 'target/i386/kvm', |
30 | diff --git a/hw/watchdog/Makefile.objs b/hw/watchdog/Makefile.objs | 44 | diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h |
31 | index XXXXXXX..XXXXXXX 100644 | 45 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/hw/watchdog/Makefile.objs | 46 | --- a/include/sysemu/hvf_int.h |
33 | +++ b/hw/watchdog/Makefile.objs | 47 | +++ b/include/sysemu/hvf_int.h |
34 | @@ -XXX,XX +XXX,XX @@ | 48 | @@ -XXX,XX +XXX,XX @@ |
35 | common-obj-y += watchdog.o | 49 | #ifndef HVF_INT_H |
36 | +common-obj-$(CONFIG_CMSDK_APB_WATCHDOG) += cmsdk-apb-watchdog.o | 50 | #define HVF_INT_H |
37 | common-obj-$(CONFIG_WDT_IB6300ESB) += wdt_i6300esb.o | 51 | |
38 | common-obj-$(CONFIG_WDT_IB700) += wdt_ib700.o | 52 | +#ifdef __aarch64__ |
39 | common-obj-$(CONFIG_WDT_DIAG288) += wdt_diag288.o | 53 | +#include <Hypervisor/Hypervisor.h> |
40 | diff --git a/include/hw/watchdog/cmsdk-apb-watchdog.h b/include/hw/watchdog/cmsdk-apb-watchdog.h | 54 | +#else |
55 | #include <Hypervisor/hv.h> | ||
56 | +#endif | ||
57 | |||
58 | /* hvf_slot flags */ | ||
59 | #define HVF_SLOT_LOG (1 << 0) | ||
60 | @@ -XXX,XX +XXX,XX @@ struct HVFState { | ||
61 | int num_slots; | ||
62 | |||
63 | hvf_vcpu_caps *hvf_caps; | ||
64 | + uint64_t vtimer_offset; | ||
65 | }; | ||
66 | extern HVFState *hvf_state; | ||
67 | |||
68 | struct hvf_vcpu_state { | ||
69 | - int fd; | ||
70 | + uint64_t fd; | ||
71 | + void *exit; | ||
72 | + bool vtimer_masked; | ||
73 | }; | ||
74 | |||
75 | void assert_hvf_ok(hv_return_t ret); | ||
76 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *); | ||
77 | hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t); | ||
78 | int hvf_put_registers(CPUState *); | ||
79 | int hvf_get_registers(CPUState *); | ||
80 | +void hvf_kick_vcpu_thread(CPUState *cpu); | ||
81 | |||
82 | #endif | ||
83 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c | ||
84 | index XXXXXXX..XXXXXXX 100644 | ||
85 | --- a/accel/hvf/hvf-accel-ops.c | ||
86 | +++ b/accel/hvf/hvf-accel-ops.c | ||
87 | @@ -XXX,XX +XXX,XX @@ | ||
88 | |||
89 | HVFState *hvf_state; | ||
90 | |||
91 | +#ifdef __aarch64__ | ||
92 | +#define HV_VM_DEFAULT NULL | ||
93 | +#endif | ||
94 | + | ||
95 | /* Memory slots */ | ||
96 | |||
97 | hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) | ||
98 | @@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu) | ||
99 | pthread_sigmask(SIG_BLOCK, NULL, &set); | ||
100 | sigdelset(&set, SIG_IPI); | ||
101 | |||
102 | +#ifdef __aarch64__ | ||
103 | + r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); | ||
104 | +#else | ||
105 | r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); | ||
106 | +#endif | ||
107 | cpu->vcpu_dirty = 1; | ||
108 | assert_hvf_ok(r); | ||
109 | |||
110 | @@ -XXX,XX +XXX,XX @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) | ||
111 | AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); | ||
112 | |||
113 | ops->create_vcpu_thread = hvf_start_vcpu_thread; | ||
114 | + ops->kick_vcpu_thread = hvf_kick_vcpu_thread; | ||
115 | |||
116 | ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; | ||
117 | ops->synchronize_post_init = hvf_cpu_synchronize_post_init; | ||
118 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
41 | new file mode 100644 | 119 | new file mode 100644 |
42 | index XXXXXXX..XXXXXXX | 120 | index XXXXXXX..XXXXXXX |
43 | --- /dev/null | 121 | --- /dev/null |
44 | +++ b/include/hw/watchdog/cmsdk-apb-watchdog.h | 122 | +++ b/target/arm/hvf/hvf.c |
45 | @@ -XXX,XX +XXX,XX @@ | 123 | @@ -XXX,XX +XXX,XX @@ |
46 | +/* | 124 | +/* |
47 | + * ARM CMSDK APB watchdog emulation | 125 | + * QEMU Hypervisor.framework support for Apple Silicon |
126 | + | ||
127 | + * Copyright 2020 Alexander Graf <agraf@csgraf.de> | ||
48 | + * | 128 | + * |
49 | + * Copyright (c) 2018 Linaro Limited | 129 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
50 | + * Written by Peter Maydell | 130 | + * See the COPYING file in the top-level directory. |
51 | + * | 131 | + * |
52 | + * This program is free software; you can redistribute it and/or modify | ||
53 | + * it under the terms of the GNU General Public License version 2 or | ||
54 | + * (at your option) any later version. | ||
55 | + */ | 132 | + */ |
56 | + | 133 | + |
57 | +/* | 134 | +#include "qemu/osdep.h" |
58 | + * This is a model of the "APB watchdog" which is part of the Cortex-M | 135 | +#include "qemu-common.h" |
59 | + * System Design Kit (CMSDK) and documented in the Cortex-M System | 136 | +#include "qemu/error-report.h" |
60 | + * Design Kit Technical Reference Manual (ARM DDI0479C): | 137 | + |
61 | + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit | 138 | +#include "sysemu/runstate.h" |
62 | + * | 139 | +#include "sysemu/hvf.h" |
63 | + * QEMU interface: | 140 | +#include "sysemu/hvf_int.h" |
64 | + * + QOM property "wdogclk-frq": frequency at which the watchdog is clocked | 141 | +#include "sysemu/hw_accel.h" |
65 | + * + sysbus MMIO region 0: the register bank | 142 | + |
66 | + * + sysbus IRQ 0: watchdog interrupt | 143 | +#include <mach/mach_time.h> |
67 | + * | 144 | + |
68 | + * In real hardware the watchdog's reset output is just a GPIO line | 145 | +#include "exec/address-spaces.h" |
69 | + * which can then be masked by the board or treated as a simple interrupt. | 146 | +#include "hw/irq.h" |
70 | + * (For instance the IoTKit does this with the non-secure watchdog, so that | 147 | +#include "qemu/main-loop.h" |
71 | + * secure code can control whether non-secure code can perform a system | 148 | +#include "sysemu/cpus.h" |
72 | + * reset via its watchdog.) In QEMU, we just wire up the watchdog reset | 149 | +#include "target/arm/cpu.h" |
73 | + * to watchdog_perform_action(), at least for the moment. | 150 | +#include "target/arm/internals.h" |
74 | + */ | 151 | +#include "trace/trace-target_arm_hvf.h" |
75 | + | 152 | +#include "migration/vmstate.h" |
76 | +#ifndef CMSDK_APB_WATCHDOG_H | 153 | + |
77 | +#define CMSDK_APB_WATCHDOG_H | 154 | +#define HVF_SYSREG(crn, crm, op0, op1, op2) \ |
78 | + | 155 | + ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) |
79 | +#include "hw/sysbus.h" | 156 | +#define PL1_WRITE_MASK 0x4 |
80 | +#include "hw/ptimer.h" | 157 | + |
81 | + | 158 | +#define SYSREG(op0, op1, crn, crm, op2) \ |
82 | +#define TYPE_CMSDK_APB_WATCHDOG "cmsdk-apb-watchdog" | 159 | + ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1)) |
83 | +#define CMSDK_APB_WATCHDOG(obj) OBJECT_CHECK(CMSDKAPBWatchdog, (obj), \ | 160 | +#define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7) |
84 | + TYPE_CMSDK_APB_WATCHDOG) | 161 | +#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4) |
85 | + | 162 | +#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) |
86 | +typedef struct CMSDKAPBWatchdog { | 163 | +#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) |
87 | + /*< private >*/ | 164 | +#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) |
88 | + SysBusDevice parent_obj; | 165 | + |
89 | + | 166 | +#define WFX_IS_WFE (1 << 0) |
90 | + /*< public >*/ | 167 | + |
91 | + MemoryRegion iomem; | 168 | +#define TMR_CTL_ENABLE (1 << 0) |
92 | + qemu_irq wdogint; | 169 | +#define TMR_CTL_IMASK (1 << 1) |
93 | + uint32_t wdogclk_frq; | 170 | +#define TMR_CTL_ISTATUS (1 << 2) |
94 | + struct ptimer_state *timer; | 171 | + |
95 | + | 172 | +typedef struct HVFVTimer { |
96 | + uint32_t control; | 173 | + /* Vtimer value during migration and paused state */ |
97 | + uint32_t intstatus; | 174 | + uint64_t vtimer_val; |
98 | + uint32_t lock; | 175 | +} HVFVTimer; |
99 | + uint32_t itcr; | 176 | + |
100 | + uint32_t itop; | 177 | +static HVFVTimer vtimer; |
101 | + uint32_t resetstatus; | 178 | + |
102 | +} CMSDKAPBWatchdog; | 179 | +struct hvf_reg_match { |
103 | + | 180 | + int reg; |
181 | + uint64_t offset; | ||
182 | +}; | ||
183 | + | ||
184 | +static const struct hvf_reg_match hvf_reg_match[] = { | ||
185 | + { HV_REG_X0, offsetof(CPUARMState, xregs[0]) }, | ||
186 | + { HV_REG_X1, offsetof(CPUARMState, xregs[1]) }, | ||
187 | + { HV_REG_X2, offsetof(CPUARMState, xregs[2]) }, | ||
188 | + { HV_REG_X3, offsetof(CPUARMState, xregs[3]) }, | ||
189 | + { HV_REG_X4, offsetof(CPUARMState, xregs[4]) }, | ||
190 | + { HV_REG_X5, offsetof(CPUARMState, xregs[5]) }, | ||
191 | + { HV_REG_X6, offsetof(CPUARMState, xregs[6]) }, | ||
192 | + { HV_REG_X7, offsetof(CPUARMState, xregs[7]) }, | ||
193 | + { HV_REG_X8, offsetof(CPUARMState, xregs[8]) }, | ||
194 | + { HV_REG_X9, offsetof(CPUARMState, xregs[9]) }, | ||
195 | + { HV_REG_X10, offsetof(CPUARMState, xregs[10]) }, | ||
196 | + { HV_REG_X11, offsetof(CPUARMState, xregs[11]) }, | ||
197 | + { HV_REG_X12, offsetof(CPUARMState, xregs[12]) }, | ||
198 | + { HV_REG_X13, offsetof(CPUARMState, xregs[13]) }, | ||
199 | + { HV_REG_X14, offsetof(CPUARMState, xregs[14]) }, | ||
200 | + { HV_REG_X15, offsetof(CPUARMState, xregs[15]) }, | ||
201 | + { HV_REG_X16, offsetof(CPUARMState, xregs[16]) }, | ||
202 | + { HV_REG_X17, offsetof(CPUARMState, xregs[17]) }, | ||
203 | + { HV_REG_X18, offsetof(CPUARMState, xregs[18]) }, | ||
204 | + { HV_REG_X19, offsetof(CPUARMState, xregs[19]) }, | ||
205 | + { HV_REG_X20, offsetof(CPUARMState, xregs[20]) }, | ||
206 | + { HV_REG_X21, offsetof(CPUARMState, xregs[21]) }, | ||
207 | + { HV_REG_X22, offsetof(CPUARMState, xregs[22]) }, | ||
208 | + { HV_REG_X23, offsetof(CPUARMState, xregs[23]) }, | ||
209 | + { HV_REG_X24, offsetof(CPUARMState, xregs[24]) }, | ||
210 | + { HV_REG_X25, offsetof(CPUARMState, xregs[25]) }, | ||
211 | + { HV_REG_X26, offsetof(CPUARMState, xregs[26]) }, | ||
212 | + { HV_REG_X27, offsetof(CPUARMState, xregs[27]) }, | ||
213 | + { HV_REG_X28, offsetof(CPUARMState, xregs[28]) }, | ||
214 | + { HV_REG_X29, offsetof(CPUARMState, xregs[29]) }, | ||
215 | + { HV_REG_X30, offsetof(CPUARMState, xregs[30]) }, | ||
216 | + { HV_REG_PC, offsetof(CPUARMState, pc) }, | ||
217 | +}; | ||
218 | + | ||
219 | +static const struct hvf_reg_match hvf_fpreg_match[] = { | ||
220 | + { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) }, | ||
221 | + { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) }, | ||
222 | + { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) }, | ||
223 | + { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) }, | ||
224 | + { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) }, | ||
225 | + { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) }, | ||
226 | + { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) }, | ||
227 | + { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) }, | ||
228 | + { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) }, | ||
229 | + { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) }, | ||
230 | + { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) }, | ||
231 | + { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) }, | ||
232 | + { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) }, | ||
233 | + { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) }, | ||
234 | + { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) }, | ||
235 | + { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) }, | ||
236 | + { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) }, | ||
237 | + { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) }, | ||
238 | + { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) }, | ||
239 | + { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) }, | ||
240 | + { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) }, | ||
241 | + { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) }, | ||
242 | + { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) }, | ||
243 | + { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) }, | ||
244 | + { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) }, | ||
245 | + { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) }, | ||
246 | + { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) }, | ||
247 | + { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) }, | ||
248 | + { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) }, | ||
249 | + { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) }, | ||
250 | + { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) }, | ||
251 | + { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) }, | ||
252 | +}; | ||
253 | + | ||
254 | +struct hvf_sreg_match { | ||
255 | + int reg; | ||
256 | + uint32_t key; | ||
257 | + uint32_t cp_idx; | ||
258 | +}; | ||
259 | + | ||
260 | +static struct hvf_sreg_match hvf_sreg_match[] = { | ||
261 | + { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) }, | ||
262 | + { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) }, | ||
263 | + { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) }, | ||
264 | + { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) }, | ||
265 | + | ||
266 | + { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) }, | ||
267 | + { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) }, | ||
268 | + { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) }, | ||
269 | + { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) }, | ||
270 | + | ||
271 | + { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) }, | ||
272 | + { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) }, | ||
273 | + { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) }, | ||
274 | + { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) }, | ||
275 | + | ||
276 | + { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) }, | ||
277 | + { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) }, | ||
278 | + { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) }, | ||
279 | + { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) }, | ||
280 | + | ||
281 | + { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) }, | ||
282 | + { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) }, | ||
283 | + { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) }, | ||
284 | + { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) }, | ||
285 | + | ||
286 | + { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) }, | ||
287 | + { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) }, | ||
288 | + { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) }, | ||
289 | + { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) }, | ||
290 | + | ||
291 | + { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) }, | ||
292 | + { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) }, | ||
293 | + { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) }, | ||
294 | + { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) }, | ||
295 | + | ||
296 | + { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) }, | ||
297 | + { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) }, | ||
298 | + { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) }, | ||
299 | + { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) }, | ||
300 | + | ||
301 | + { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) }, | ||
302 | + { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) }, | ||
303 | + { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) }, | ||
304 | + { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) }, | ||
305 | + | ||
306 | + { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) }, | ||
307 | + { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) }, | ||
308 | + { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) }, | ||
309 | + { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) }, | ||
310 | + | ||
311 | + { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) }, | ||
312 | + { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) }, | ||
313 | + { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) }, | ||
314 | + { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) }, | ||
315 | + | ||
316 | + { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) }, | ||
317 | + { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) }, | ||
318 | + { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) }, | ||
319 | + { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) }, | ||
320 | + | ||
321 | + { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) }, | ||
322 | + { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) }, | ||
323 | + { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) }, | ||
324 | + { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) }, | ||
325 | + | ||
326 | + { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) }, | ||
327 | + { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) }, | ||
328 | + { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) }, | ||
329 | + { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) }, | ||
330 | + | ||
331 | + { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) }, | ||
332 | + { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) }, | ||
333 | + { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) }, | ||
334 | + { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) }, | ||
335 | + | ||
336 | + { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) }, | ||
337 | + { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) }, | ||
338 | + { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) }, | ||
339 | + { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) }, | ||
340 | + | ||
341 | +#ifdef SYNC_NO_RAW_REGS | ||
342 | + /* | ||
343 | + * The registers below are manually synced on init because they are | ||
344 | + * marked as NO_RAW. We still list them to make number space sync easier. | ||
345 | + */ | ||
346 | + { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) }, | ||
347 | + { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) }, | ||
348 | + { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) }, | ||
349 | + { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) }, | ||
104 | +#endif | 350 | +#endif |
105 | diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c | 351 | + { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) }, |
106 | new file mode 100644 | 352 | + { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) }, |
107 | index XXXXXXX..XXXXXXX | 353 | + { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) }, |
108 | --- /dev/null | 354 | + { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) }, |
109 | +++ b/hw/watchdog/cmsdk-apb-watchdog.c | 355 | + { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) }, |
110 | @@ -XXX,XX +XXX,XX @@ | 356 | +#ifdef SYNC_NO_MMFR0 |
111 | +/* | 357 | + /* We keep the hardware MMFR0 around. HW limits are there anyway */ |
112 | + * ARM CMSDK APB watchdog emulation | 358 | + { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) }, |
113 | + * | 359 | +#endif |
114 | + * Copyright (c) 2018 Linaro Limited | 360 | + { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) }, |
115 | + * Written by Peter Maydell | 361 | + { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) }, |
116 | + * | 362 | + |
117 | + * This program is free software; you can redistribute it and/or modify | 363 | + { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) }, |
118 | + * it under the terms of the GNU General Public License version 2 or | 364 | + { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) }, |
119 | + * (at your option) any later version. | 365 | + { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) }, |
120 | + */ | 366 | + { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) }, |
121 | + | 367 | + { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) }, |
122 | +/* | 368 | + { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) }, |
123 | + * This is a model of the "APB watchdog" which is part of the Cortex-M | 369 | + |
124 | + * System Design Kit (CMSDK) and documented in the Cortex-M System | 370 | + { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) }, |
125 | + * Design Kit Technical Reference Manual (ARM DDI0479C): | 371 | + { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) }, |
126 | + * https://developer.arm.com/products/system-design/system-design-kits/cortex-m-system-design-kit | 372 | + { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) }, |
127 | + */ | 373 | + { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) }, |
128 | + | 374 | + { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) }, |
129 | +#include "qemu/osdep.h" | 375 | + { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) }, |
130 | +#include "qemu/log.h" | 376 | + { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) }, |
131 | +#include "trace.h" | 377 | + { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) }, |
132 | +#include "qapi/error.h" | 378 | + { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) }, |
133 | +#include "qemu/main-loop.h" | 379 | + { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) }, |
134 | +#include "sysemu/watchdog.h" | 380 | + |
135 | +#include "hw/sysbus.h" | 381 | + { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) }, |
136 | +#include "hw/registerfields.h" | 382 | + { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) }, |
137 | +#include "hw/watchdog/cmsdk-apb-watchdog.h" | 383 | + { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) }, |
138 | + | 384 | + { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) }, |
139 | +REG32(WDOGLOAD, 0x0) | 385 | + { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) }, |
140 | +REG32(WDOGVALUE, 0x4) | 386 | + { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) }, |
141 | +REG32(WDOGCONTROL, 0x8) | 387 | + { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) }, |
142 | + FIELD(WDOGCONTROL, INTEN, 0, 1) | 388 | + { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) }, |
143 | + FIELD(WDOGCONTROL, RESEN, 1, 1) | 389 | + { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) }, |
144 | +#define R_WDOGCONTROL_VALID_MASK (R_WDOGCONTROL_INTEN_MASK | \ | 390 | + { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) }, |
145 | + R_WDOGCONTROL_RESEN_MASK) | 391 | + { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) }, |
146 | +REG32(WDOGINTCLR, 0xc) | 392 | + { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) }, |
147 | +REG32(WDOGRIS, 0x10) | 393 | + { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) }, |
148 | + FIELD(WDOGRIS, INT, 0, 1) | 394 | + { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) }, |
149 | +REG32(WDOGMIS, 0x14) | 395 | + { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) }, |
150 | +REG32(WDOGLOCK, 0xc00) | 396 | + { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) }, |
151 | +#define WDOG_UNLOCK_VALUE 0x1ACCE551 | 397 | + { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) }, |
152 | +REG32(WDOGITCR, 0xf00) | 398 | + { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) }, |
153 | + FIELD(WDOGITCR, ENABLE, 0, 1) | 399 | + { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) }, |
154 | +#define R_WDOGITCR_VALID_MASK R_WDOGITCR_ENABLE_MASK | 400 | + { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) }, |
155 | +REG32(WDOGITOP, 0xf04) | ||
156 | + FIELD(WDOGITOP, WDOGRES, 0, 1) | ||
157 | + FIELD(WDOGITOP, WDOGINT, 1, 1) | ||
158 | +#define R_WDOGITOP_VALID_MASK (R_WDOGITOP_WDOGRES_MASK | \ | ||
159 | + R_WDOGITOP_WDOGINT_MASK) | ||
160 | +REG32(PID4, 0xfd0) | ||
161 | +REG32(PID5, 0xfd4) | ||
162 | +REG32(PID6, 0xfd8) | ||
163 | +REG32(PID7, 0xfdc) | ||
164 | +REG32(PID0, 0xfe0) | ||
165 | +REG32(PID1, 0xfe4) | ||
166 | +REG32(PID2, 0xfe8) | ||
167 | +REG32(PID3, 0xfec) | ||
168 | +REG32(CID0, 0xff0) | ||
169 | +REG32(CID1, 0xff4) | ||
170 | +REG32(CID2, 0xff8) | ||
171 | +REG32(CID3, 0xffc) | ||
172 | + | ||
173 | +/* PID/CID values */ | ||
174 | +static const int watchdog_id[] = { | ||
175 | + 0x04, 0x00, 0x00, 0x00, /* PID4..PID7 */ | ||
176 | + 0x24, 0xb8, 0x1b, 0x00, /* PID0..PID3 */ | ||
177 | + 0x0d, 0xf0, 0x05, 0xb1, /* CID0..CID3 */ | ||
178 | +}; | 401 | +}; |
179 | + | 402 | + |
180 | +static bool cmsdk_apb_watchdog_intstatus(CMSDKAPBWatchdog *s) | 403 | +int hvf_get_registers(CPUState *cpu) |
181 | +{ | 404 | +{ |
182 | + /* Return masked interrupt status */ | 405 | + ARMCPU *arm_cpu = ARM_CPU(cpu); |
183 | + return s->intstatus && (s->control & R_WDOGCONTROL_INTEN_MASK); | 406 | + CPUARMState *env = &arm_cpu->env; |
184 | +} | 407 | + hv_return_t ret; |
185 | + | 408 | + uint64_t val; |
186 | +static bool cmsdk_apb_watchdog_resetstatus(CMSDKAPBWatchdog *s) | 409 | + hv_simd_fp_uchar16_t fpval; |
187 | +{ | 410 | + int i; |
188 | + /* Return masked reset status */ | 411 | + |
189 | + return s->resetstatus && (s->control & R_WDOGCONTROL_RESEN_MASK); | 412 | + for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { |
190 | +} | 413 | + ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); |
191 | + | 414 | + *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; |
192 | +static void cmsdk_apb_watchdog_update(CMSDKAPBWatchdog *s) | 415 | + assert_hvf_ok(ret); |
193 | +{ | 416 | + } |
194 | + bool wdogint; | 417 | + |
195 | + bool wdogres; | 418 | + for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { |
196 | + | 419 | + ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, |
197 | + if (s->itcr) { | 420 | + &fpval); |
198 | + wdogint = s->itop & R_WDOGITOP_WDOGINT_MASK; | 421 | + memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); |
199 | + wdogres = s->itop & R_WDOGITOP_WDOGRES_MASK; | 422 | + assert_hvf_ok(ret); |
200 | + } else { | 423 | + } |
201 | + wdogint = cmsdk_apb_watchdog_intstatus(s); | 424 | + |
202 | + wdogres = cmsdk_apb_watchdog_resetstatus(s); | 425 | + val = 0; |
203 | + } | 426 | + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); |
204 | + | 427 | + assert_hvf_ok(ret); |
205 | + qemu_set_irq(s->wdogint, wdogint); | 428 | + vfp_set_fpcr(env, val); |
206 | + if (wdogres) { | 429 | + |
207 | + watchdog_perform_action(); | 430 | + val = 0; |
208 | + } | 431 | + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); |
209 | +} | 432 | + assert_hvf_ok(ret); |
210 | + | 433 | + vfp_set_fpsr(env, val); |
211 | +static uint64_t cmsdk_apb_watchdog_read(void *opaque, hwaddr offset, | 434 | + |
212 | + unsigned size) | 435 | + ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); |
213 | +{ | 436 | + assert_hvf_ok(ret); |
214 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); | 437 | + pstate_write(env, val); |
215 | + uint64_t r; | 438 | + |
216 | + | 439 | + for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { |
217 | + switch (offset) { | 440 | + if (hvf_sreg_match[i].cp_idx == -1) { |
218 | + case A_WDOGLOAD: | 441 | + continue; |
219 | + r = ptimer_get_limit(s->timer); | 442 | + } |
220 | + break; | 443 | + |
221 | + case A_WDOGVALUE: | 444 | + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); |
222 | + r = ptimer_get_count(s->timer); | 445 | + assert_hvf_ok(ret); |
223 | + break; | 446 | + |
224 | + case A_WDOGCONTROL: | 447 | + arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; |
225 | + r = s->control; | 448 | + } |
226 | + break; | 449 | + assert(write_list_to_cpustate(arm_cpu)); |
227 | + case A_WDOGRIS: | 450 | + |
228 | + r = s->intstatus; | 451 | + aarch64_restore_sp(env, arm_current_el(env)); |
229 | + break; | 452 | + |
230 | + case A_WDOGMIS: | 453 | + return 0; |
231 | + r = cmsdk_apb_watchdog_intstatus(s); | 454 | +} |
232 | + break; | 455 | + |
233 | + case A_WDOGLOCK: | 456 | +int hvf_put_registers(CPUState *cpu) |
234 | + r = s->lock; | 457 | +{ |
235 | + break; | 458 | + ARMCPU *arm_cpu = ARM_CPU(cpu); |
236 | + case A_WDOGITCR: | 459 | + CPUARMState *env = &arm_cpu->env; |
237 | + r = s->itcr; | 460 | + hv_return_t ret; |
238 | + break; | 461 | + uint64_t val; |
239 | + case A_PID4 ... A_CID3: | 462 | + hv_simd_fp_uchar16_t fpval; |
240 | + r = watchdog_id[(offset - A_PID4) / 4]; | 463 | + int i; |
241 | + break; | 464 | + |
242 | + case A_WDOGINTCLR: | 465 | + for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { |
243 | + case A_WDOGITOP: | 466 | + val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); |
244 | + qemu_log_mask(LOG_GUEST_ERROR, | 467 | + ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); |
245 | + "CMSDK APB watchdog read: read of WO offset %x\n", | 468 | + assert_hvf_ok(ret); |
246 | + (int)offset); | 469 | + } |
247 | + r = 0; | 470 | + |
471 | + for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { | ||
472 | + memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); | ||
473 | + ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, | ||
474 | + fpval); | ||
475 | + assert_hvf_ok(ret); | ||
476 | + } | ||
477 | + | ||
478 | + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); | ||
479 | + assert_hvf_ok(ret); | ||
480 | + | ||
481 | + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); | ||
482 | + assert_hvf_ok(ret); | ||
483 | + | ||
484 | + ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); | ||
485 | + assert_hvf_ok(ret); | ||
486 | + | ||
487 | + aarch64_save_sp(env, arm_current_el(env)); | ||
488 | + | ||
489 | + assert(write_cpustate_to_list(arm_cpu, false)); | ||
490 | + for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { | ||
491 | + if (hvf_sreg_match[i].cp_idx == -1) { | ||
492 | + continue; | ||
493 | + } | ||
494 | + | ||
495 | + val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; | ||
496 | + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); | ||
497 | + assert_hvf_ok(ret); | ||
498 | + } | ||
499 | + | ||
500 | + ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); | ||
501 | + assert_hvf_ok(ret); | ||
502 | + | ||
503 | + return 0; | ||
504 | +} | ||
505 | + | ||
506 | +static void flush_cpu_state(CPUState *cpu) | ||
507 | +{ | ||
508 | + if (cpu->vcpu_dirty) { | ||
509 | + hvf_put_registers(cpu); | ||
510 | + cpu->vcpu_dirty = false; | ||
511 | + } | ||
512 | +} | ||
513 | + | ||
514 | +static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) | ||
515 | +{ | ||
516 | + hv_return_t r; | ||
517 | + | ||
518 | + flush_cpu_state(cpu); | ||
519 | + | ||
520 | + if (rt < 31) { | ||
521 | + r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); | ||
522 | + assert_hvf_ok(r); | ||
523 | + } | ||
524 | +} | ||
525 | + | ||
526 | +static uint64_t hvf_get_reg(CPUState *cpu, int rt) | ||
527 | +{ | ||
528 | + uint64_t val = 0; | ||
529 | + hv_return_t r; | ||
530 | + | ||
531 | + flush_cpu_state(cpu); | ||
532 | + | ||
533 | + if (rt < 31) { | ||
534 | + r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); | ||
535 | + assert_hvf_ok(r); | ||
536 | + } | ||
537 | + | ||
538 | + return val; | ||
539 | +} | ||
540 | + | ||
541 | +void hvf_arch_vcpu_destroy(CPUState *cpu) | ||
542 | +{ | ||
543 | +} | ||
544 | + | ||
545 | +int hvf_arch_init_vcpu(CPUState *cpu) | ||
546 | +{ | ||
547 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
548 | + CPUARMState *env = &arm_cpu->env; | ||
549 | + uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match); | ||
550 | + uint32_t sregs_cnt = 0; | ||
551 | + uint64_t pfr; | ||
552 | + hv_return_t ret; | ||
553 | + int i; | ||
554 | + | ||
555 | + env->aarch64 = 1; | ||
556 | + asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz)); | ||
557 | + | ||
558 | + /* Allocate enough space for our sysreg sync */ | ||
559 | + arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes, | ||
560 | + sregs_match_len); | ||
561 | + arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values, | ||
562 | + sregs_match_len); | ||
563 | + arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t, | ||
564 | + arm_cpu->cpreg_vmstate_indexes, | ||
565 | + sregs_match_len); | ||
566 | + arm_cpu->cpreg_vmstate_values = g_renew(uint64_t, | ||
567 | + arm_cpu->cpreg_vmstate_values, | ||
568 | + sregs_match_len); | ||
569 | + | ||
570 | + memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t)); | ||
571 | + | ||
572 | + /* Populate cp list for all known sysregs */ | ||
573 | + for (i = 0; i < sregs_match_len; i++) { | ||
574 | + const ARMCPRegInfo *ri; | ||
575 | + uint32_t key = hvf_sreg_match[i].key; | ||
576 | + | ||
577 | + ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key); | ||
578 | + if (ri) { | ||
579 | + assert(!(ri->type & ARM_CP_NO_RAW)); | ||
580 | + hvf_sreg_match[i].cp_idx = sregs_cnt; | ||
581 | + arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key); | ||
582 | + } else { | ||
583 | + hvf_sreg_match[i].cp_idx = -1; | ||
584 | + } | ||
585 | + } | ||
586 | + arm_cpu->cpreg_array_len = sregs_cnt; | ||
587 | + arm_cpu->cpreg_vmstate_array_len = sregs_cnt; | ||
588 | + | ||
589 | + assert(write_cpustate_to_list(arm_cpu, false)); | ||
590 | + | ||
591 | + /* Set CP_NO_RAW system registers on init */ | ||
592 | + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, | ||
593 | + arm_cpu->midr); | ||
594 | + assert_hvf_ok(ret); | ||
595 | + | ||
596 | + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, | ||
597 | + arm_cpu->mp_affinity); | ||
598 | + assert_hvf_ok(ret); | ||
599 | + | ||
600 | + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); | ||
601 | + assert_hvf_ok(ret); | ||
602 | + pfr |= env->gicv3state ? (1 << 24) : 0; | ||
603 | + ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); | ||
604 | + assert_hvf_ok(ret); | ||
605 | + | ||
606 | + /* We're limited to underlying hardware caps, override internal versions */ | ||
607 | + ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, | ||
608 | + &arm_cpu->isar.id_aa64mmfr0); | ||
609 | + assert_hvf_ok(ret); | ||
610 | + | ||
611 | + return 0; | ||
612 | +} | ||
613 | + | ||
614 | +void hvf_kick_vcpu_thread(CPUState *cpu) | ||
615 | +{ | ||
616 | + hv_vcpus_exit(&cpu->hvf->fd, 1); | ||
617 | +} | ||
618 | + | ||
619 | +static void hvf_raise_exception(CPUState *cpu, uint32_t excp, | ||
620 | + uint32_t syndrome) | ||
621 | +{ | ||
622 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
623 | + CPUARMState *env = &arm_cpu->env; | ||
624 | + | ||
625 | + cpu->exception_index = excp; | ||
626 | + env->exception.target_el = 1; | ||
627 | + env->exception.syndrome = syndrome; | ||
628 | + | ||
629 | + arm_cpu_do_interrupt(cpu); | ||
630 | +} | ||
631 | + | ||
632 | +static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) | ||
633 | +{ | ||
634 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
635 | + CPUARMState *env = &arm_cpu->env; | ||
636 | + uint64_t val = 0; | ||
637 | + | ||
638 | + switch (reg) { | ||
639 | + case SYSREG_CNTPCT_EL0: | ||
640 | + val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / | ||
641 | + gt_cntfrq_period_ns(arm_cpu); | ||
642 | + break; | ||
643 | + case SYSREG_OSLSR_EL1: | ||
644 | + val = env->cp15.oslsr_el1; | ||
645 | + break; | ||
646 | + case SYSREG_OSDLR_EL1: | ||
647 | + /* Dummy register */ | ||
248 | + break; | 648 | + break; |
249 | + default: | 649 | + default: |
250 | + qemu_log_mask(LOG_GUEST_ERROR, | 650 | + cpu_synchronize_state(cpu); |
251 | + "CMSDK APB watchdog read: bad offset %x\n", (int)offset); | 651 | + trace_hvf_unhandled_sysreg_read(env->pc, reg, |
252 | + r = 0; | 652 | + (reg >> 20) & 0x3, |
253 | + break; | 653 | + (reg >> 14) & 0x7, |
254 | + } | 654 | + (reg >> 10) & 0xf, |
255 | + trace_cmsdk_apb_watchdog_read(offset, r, size); | 655 | + (reg >> 1) & 0xf, |
256 | + return r; | 656 | + (reg >> 17) & 0x7); |
257 | +} | 657 | + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); |
258 | + | 658 | + return 1; |
259 | +static void cmsdk_apb_watchdog_write(void *opaque, hwaddr offset, | 659 | + } |
260 | + uint64_t value, unsigned size) | 660 | + |
261 | +{ | 661 | + trace_hvf_sysreg_read(reg, |
262 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); | 662 | + (reg >> 20) & 0x3, |
263 | + | 663 | + (reg >> 14) & 0x7, |
264 | + trace_cmsdk_apb_watchdog_write(offset, value, size); | 664 | + (reg >> 10) & 0xf, |
265 | + | 665 | + (reg >> 1) & 0xf, |
266 | + if (s->lock && offset != A_WDOGLOCK) { | 666 | + (reg >> 17) & 0x7, |
267 | + /* Write access is disabled via WDOGLOCK */ | 667 | + val); |
268 | + qemu_log_mask(LOG_GUEST_ERROR, | 668 | + hvf_set_reg(cpu, rt, val); |
269 | + "CMSDK APB watchdog write: write to locked watchdog\n"); | 669 | + |
670 | + return 0; | ||
671 | +} | ||
672 | + | ||
673 | +static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | ||
674 | +{ | ||
675 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
676 | + CPUARMState *env = &arm_cpu->env; | ||
677 | + | ||
678 | + trace_hvf_sysreg_write(reg, | ||
679 | + (reg >> 20) & 0x3, | ||
680 | + (reg >> 14) & 0x7, | ||
681 | + (reg >> 10) & 0xf, | ||
682 | + (reg >> 1) & 0xf, | ||
683 | + (reg >> 17) & 0x7, | ||
684 | + val); | ||
685 | + | ||
686 | + switch (reg) { | ||
687 | + case SYSREG_OSLAR_EL1: | ||
688 | + env->cp15.oslsr_el1 = val & 1; | ||
689 | + break; | ||
690 | + case SYSREG_OSDLR_EL1: | ||
691 | + /* Dummy register */ | ||
692 | + break; | ||
693 | + default: | ||
694 | + cpu_synchronize_state(cpu); | ||
695 | + trace_hvf_unhandled_sysreg_write(env->pc, reg, | ||
696 | + (reg >> 20) & 0x3, | ||
697 | + (reg >> 14) & 0x7, | ||
698 | + (reg >> 10) & 0xf, | ||
699 | + (reg >> 1) & 0xf, | ||
700 | + (reg >> 17) & 0x7); | ||
701 | + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
702 | + return 1; | ||
703 | + } | ||
704 | + | ||
705 | + return 0; | ||
706 | +} | ||
707 | + | ||
708 | +static int hvf_inject_interrupts(CPUState *cpu) | ||
709 | +{ | ||
710 | + if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { | ||
711 | + trace_hvf_inject_fiq(); | ||
712 | + hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, | ||
713 | + true); | ||
714 | + } | ||
715 | + | ||
716 | + if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { | ||
717 | + trace_hvf_inject_irq(); | ||
718 | + hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, | ||
719 | + true); | ||
720 | + } | ||
721 | + | ||
722 | + return 0; | ||
723 | +} | ||
724 | + | ||
725 | +static uint64_t hvf_vtimer_val_raw(void) | ||
726 | +{ | ||
727 | + /* | ||
728 | + * mach_absolute_time() returns the vtimer value without the VM | ||
729 | + * offset that we define. Add our own offset on top. | ||
730 | + */ | ||
731 | + return mach_absolute_time() - hvf_state->vtimer_offset; | ||
732 | +} | ||
733 | + | ||
734 | +static void hvf_sync_vtimer(CPUState *cpu) | ||
735 | +{ | ||
736 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
737 | + hv_return_t r; | ||
738 | + uint64_t ctl; | ||
739 | + bool irq_state; | ||
740 | + | ||
741 | + if (!cpu->hvf->vtimer_masked) { | ||
742 | + /* We will get notified on vtimer changes by hvf, nothing to do */ | ||
270 | + return; | 743 | + return; |
271 | + } | 744 | + } |
272 | + | 745 | + |
273 | + switch (offset) { | 746 | + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); |
274 | + case A_WDOGLOAD: | 747 | + assert_hvf_ok(r); |
275 | + /* | 748 | + |
276 | + * Reset the load value and the current count, and make sure | 749 | + irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == |
277 | + * we're counting. | 750 | + (TMR_CTL_ENABLE | TMR_CTL_ISTATUS); |
278 | + */ | 751 | + qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state); |
279 | + ptimer_set_limit(s->timer, value, 1); | 752 | + |
280 | + ptimer_run(s->timer, 0); | 753 | + if (!irq_state) { |
281 | + break; | 754 | + /* Timer no longer asserting, we can unmask it */ |
282 | + case A_WDOGCONTROL: | 755 | + hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); |
283 | + s->control = value & R_WDOGCONTROL_VALID_MASK; | 756 | + cpu->hvf->vtimer_masked = false; |
284 | + cmsdk_apb_watchdog_update(s); | 757 | + } |
285 | + break; | 758 | +} |
286 | + case A_WDOGINTCLR: | 759 | + |
287 | + s->intstatus = 0; | 760 | +int hvf_vcpu_exec(CPUState *cpu) |
288 | + ptimer_set_count(s->timer, ptimer_get_limit(s->timer)); | 761 | +{ |
289 | + cmsdk_apb_watchdog_update(s); | 762 | + ARMCPU *arm_cpu = ARM_CPU(cpu); |
290 | + break; | 763 | + CPUARMState *env = &arm_cpu->env; |
291 | + case A_WDOGLOCK: | 764 | + hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; |
292 | + s->lock = (value != WDOG_UNLOCK_VALUE); | 765 | + hv_return_t r; |
293 | + break; | 766 | + bool advance_pc = false; |
294 | + case A_WDOGITCR: | 767 | + |
295 | + s->itcr = value & R_WDOGITCR_VALID_MASK; | 768 | + if (hvf_inject_interrupts(cpu)) { |
296 | + cmsdk_apb_watchdog_update(s); | 769 | + return EXCP_INTERRUPT; |
297 | + break; | 770 | + } |
298 | + case A_WDOGITOP: | 771 | + |
299 | + s->itop = value & R_WDOGITOP_VALID_MASK; | 772 | + if (cpu->halted) { |
300 | + cmsdk_apb_watchdog_update(s); | 773 | + return EXCP_HLT; |
301 | + break; | 774 | + } |
302 | + case A_WDOGVALUE: | 775 | + |
303 | + case A_WDOGRIS: | 776 | + flush_cpu_state(cpu); |
304 | + case A_WDOGMIS: | 777 | + |
305 | + case A_PID4 ... A_CID3: | 778 | + qemu_mutex_unlock_iothread(); |
306 | + qemu_log_mask(LOG_GUEST_ERROR, | 779 | + assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); |
307 | + "CMSDK APB watchdog write: write to RO offset 0x%x\n", | 780 | + |
308 | + (int)offset); | 781 | + /* handle VMEXIT */ |
309 | + break; | 782 | + uint64_t exit_reason = hvf_exit->reason; |
783 | + uint64_t syndrome = hvf_exit->exception.syndrome; | ||
784 | + uint32_t ec = syn_get_ec(syndrome); | ||
785 | + | ||
786 | + qemu_mutex_lock_iothread(); | ||
787 | + switch (exit_reason) { | ||
788 | + case HV_EXIT_REASON_EXCEPTION: | ||
789 | + /* This is the main one, handle below. */ | ||
790 | + break; | ||
791 | + case HV_EXIT_REASON_VTIMER_ACTIVATED: | ||
792 | + qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); | ||
793 | + cpu->hvf->vtimer_masked = true; | ||
794 | + return 0; | ||
795 | + case HV_EXIT_REASON_CANCELED: | ||
796 | + /* we got kicked, no exit to process */ | ||
797 | + return 0; | ||
310 | + default: | 798 | + default: |
311 | + qemu_log_mask(LOG_GUEST_ERROR, | 799 | + assert(0); |
312 | + "CMSDK APB watchdog write: bad offset 0x%x\n", | 800 | + } |
313 | + (int)offset); | 801 | + |
314 | + break; | 802 | + hvf_sync_vtimer(cpu); |
315 | + } | 803 | + |
316 | +} | 804 | + switch (ec) { |
317 | + | 805 | + case EC_DATAABORT: { |
318 | +static const MemoryRegionOps cmsdk_apb_watchdog_ops = { | 806 | + bool isv = syndrome & ARM_EL_ISV; |
319 | + .read = cmsdk_apb_watchdog_read, | 807 | + bool iswrite = (syndrome >> 6) & 1; |
320 | + .write = cmsdk_apb_watchdog_write, | 808 | + bool s1ptw = (syndrome >> 7) & 1; |
321 | + .endianness = DEVICE_LITTLE_ENDIAN, | 809 | + uint32_t sas = (syndrome >> 22) & 3; |
322 | + /* byte/halfword accesses are just zero-padded on reads and writes */ | 810 | + uint32_t len = 1 << sas; |
323 | + .impl.min_access_size = 4, | 811 | + uint32_t srt = (syndrome >> 16) & 0x1f; |
324 | + .impl.max_access_size = 4, | 812 | + uint64_t val = 0; |
325 | + .valid.min_access_size = 1, | 813 | + |
326 | + .valid.max_access_size = 4, | 814 | + trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, |
327 | +}; | 815 | + hvf_exit->exception.physical_address, isv, |
328 | + | 816 | + iswrite, s1ptw, len, srt); |
329 | +static void cmsdk_apb_watchdog_tick(void *opaque) | 817 | + |
330 | +{ | 818 | + assert(isv); |
331 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(opaque); | 819 | + |
332 | + | 820 | + if (iswrite) { |
333 | + if (!s->intstatus) { | 821 | + val = hvf_get_reg(cpu, srt); |
334 | + /* Count expired for the first time: raise interrupt */ | 822 | + address_space_write(&address_space_memory, |
335 | + s->intstatus = R_WDOGRIS_INT_MASK; | 823 | + hvf_exit->exception.physical_address, |
336 | + } else { | 824 | + MEMTXATTRS_UNSPECIFIED, &val, len); |
337 | + /* Count expired for the second time: raise reset and stop clock */ | 825 | + } else { |
338 | + s->resetstatus = 1; | 826 | + address_space_read(&address_space_memory, |
339 | + ptimer_stop(s->timer); | 827 | + hvf_exit->exception.physical_address, |
340 | + } | 828 | + MEMTXATTRS_UNSPECIFIED, &val, len); |
341 | + cmsdk_apb_watchdog_update(s); | 829 | + hvf_set_reg(cpu, srt, val); |
342 | +} | 830 | + } |
343 | + | 831 | + |
344 | +static void cmsdk_apb_watchdog_reset(DeviceState *dev) | 832 | + advance_pc = true; |
345 | +{ | 833 | + break; |
346 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); | 834 | + } |
347 | + | 835 | + case EC_SYSTEMREGISTERTRAP: { |
348 | + trace_cmsdk_apb_watchdog_reset(); | 836 | + bool isread = (syndrome >> 0) & 1; |
349 | + s->control = 0; | 837 | + uint32_t rt = (syndrome >> 5) & 0x1f; |
350 | + s->intstatus = 0; | 838 | + uint32_t reg = syndrome & SYSREG_MASK; |
351 | + s->lock = 0; | 839 | + uint64_t val; |
352 | + s->itcr = 0; | 840 | + int ret = 0; |
353 | + s->itop = 0; | 841 | + |
354 | + s->resetstatus = 0; | 842 | + if (isread) { |
355 | + /* Set the limit and the count */ | 843 | + ret = hvf_sysreg_read(cpu, reg, rt); |
356 | + ptimer_set_limit(s->timer, 0xffffffff, 1); | 844 | + } else { |
357 | + ptimer_run(s->timer, 0); | 845 | + val = hvf_get_reg(cpu, rt); |
358 | +} | 846 | + ret = hvf_sysreg_write(cpu, reg, val); |
359 | + | 847 | + } |
360 | +static void cmsdk_apb_watchdog_init(Object *obj) | 848 | + |
361 | +{ | 849 | + advance_pc = !ret; |
362 | + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); | 850 | + break; |
363 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(obj); | 851 | + } |
364 | + | 852 | + case EC_WFX_TRAP: |
365 | + memory_region_init_io(&s->iomem, obj, &cmsdk_apb_watchdog_ops, | 853 | + advance_pc = true; |
366 | + s, "cmsdk-apb-watchdog", 0x1000); | 854 | + break; |
367 | + sysbus_init_mmio(sbd, &s->iomem); | 855 | + case EC_AA64_HVC: |
368 | + sysbus_init_irq(sbd, &s->wdogint); | 856 | + cpu_synchronize_state(cpu); |
369 | +} | 857 | + trace_hvf_unknown_hvc(env->xregs[0]); |
370 | + | 858 | + /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ |
371 | +static void cmsdk_apb_watchdog_realize(DeviceState *dev, Error **errp) | 859 | + env->xregs[0] = -1; |
372 | +{ | 860 | + break; |
373 | + CMSDKAPBWatchdog *s = CMSDK_APB_WATCHDOG(dev); | 861 | + case EC_AA64_SMC: |
374 | + QEMUBH *bh; | 862 | + cpu_synchronize_state(cpu); |
375 | + | 863 | + trace_hvf_unknown_smc(env->xregs[0]); |
376 | + if (s->wdogclk_frq == 0) { | 864 | + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); |
377 | + error_setg(errp, | 865 | + break; |
378 | + "CMSDK APB watchdog: wdogclk-frq property must be set"); | 866 | + default: |
379 | + return; | 867 | + cpu_synchronize_state(cpu); |
380 | + } | 868 | + trace_hvf_exit(syndrome, ec, env->pc); |
381 | + | 869 | + error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec); |
382 | + bh = qemu_bh_new(cmsdk_apb_watchdog_tick, s); | 870 | + } |
383 | + s->timer = ptimer_init(bh, | 871 | + |
384 | + PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | | 872 | + if (advance_pc) { |
385 | + PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT | | 873 | + uint64_t pc; |
386 | + PTIMER_POLICY_NO_IMMEDIATE_RELOAD | | 874 | + |
387 | + PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); | 875 | + flush_cpu_state(cpu); |
388 | + | 876 | + |
389 | + ptimer_set_freq(s->timer, s->wdogclk_frq); | 877 | + r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); |
390 | +} | 878 | + assert_hvf_ok(r); |
391 | + | 879 | + pc += 4; |
392 | +static const VMStateDescription cmsdk_apb_watchdog_vmstate = { | 880 | + r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); |
393 | + .name = "cmsdk-apb-watchdog", | 881 | + assert_hvf_ok(r); |
882 | + } | ||
883 | + | ||
884 | + return 0; | ||
885 | +} | ||
886 | + | ||
887 | +static const VMStateDescription vmstate_hvf_vtimer = { | ||
888 | + .name = "hvf-vtimer", | ||
394 | + .version_id = 1, | 889 | + .version_id = 1, |
395 | + .minimum_version_id = 1, | 890 | + .minimum_version_id = 1, |
396 | + .fields = (VMStateField[]) { | 891 | + .fields = (VMStateField[]) { |
397 | + VMSTATE_PTIMER(timer, CMSDKAPBWatchdog), | 892 | + VMSTATE_UINT64(vtimer_val, HVFVTimer), |
398 | + VMSTATE_UINT32(control, CMSDKAPBWatchdog), | ||
399 | + VMSTATE_UINT32(intstatus, CMSDKAPBWatchdog), | ||
400 | + VMSTATE_UINT32(lock, CMSDKAPBWatchdog), | ||
401 | + VMSTATE_UINT32(itcr, CMSDKAPBWatchdog), | ||
402 | + VMSTATE_UINT32(itop, CMSDKAPBWatchdog), | ||
403 | + VMSTATE_UINT32(resetstatus, CMSDKAPBWatchdog), | ||
404 | + VMSTATE_END_OF_LIST() | 893 | + VMSTATE_END_OF_LIST() |
405 | + } | 894 | + }, |
406 | +}; | 895 | +}; |
407 | + | 896 | + |
408 | +static Property cmsdk_apb_watchdog_properties[] = { | 897 | +static void hvf_vm_state_change(void *opaque, bool running, RunState state) |
409 | + DEFINE_PROP_UINT32("wdogclk-frq", CMSDKAPBWatchdog, wdogclk_frq, 0), | 898 | +{ |
410 | + DEFINE_PROP_END_OF_LIST(), | 899 | + HVFVTimer *s = opaque; |
411 | +}; | 900 | + |
412 | + | 901 | + if (running) { |
413 | +static void cmsdk_apb_watchdog_class_init(ObjectClass *klass, void *data) | 902 | + /* Update vtimer offset on all CPUs */ |
414 | +{ | 903 | + hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val; |
415 | + DeviceClass *dc = DEVICE_CLASS(klass); | 904 | + cpu_synchronize_all_states(); |
416 | + | 905 | + } else { |
417 | + dc->realize = cmsdk_apb_watchdog_realize; | 906 | + /* Remember vtimer value on every pause */ |
418 | + dc->vmsd = &cmsdk_apb_watchdog_vmstate; | 907 | + s->vtimer_val = hvf_vtimer_val_raw(); |
419 | + dc->reset = cmsdk_apb_watchdog_reset; | 908 | + } |
420 | + dc->props = cmsdk_apb_watchdog_properties; | 909 | +} |
421 | +} | 910 | + |
422 | + | 911 | +int hvf_arch_init(void) |
423 | +static const TypeInfo cmsdk_apb_watchdog_info = { | 912 | +{ |
424 | + .name = TYPE_CMSDK_APB_WATCHDOG, | 913 | + hvf_state->vtimer_offset = mach_absolute_time(); |
425 | + .parent = TYPE_SYS_BUS_DEVICE, | 914 | + vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer); |
426 | + .instance_size = sizeof(CMSDKAPBWatchdog), | 915 | + qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer); |
427 | + .instance_init = cmsdk_apb_watchdog_init, | 916 | + return 0; |
428 | + .class_init = cmsdk_apb_watchdog_class_init, | 917 | +} |
429 | +}; | 918 | diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c |
430 | + | 919 | index XXXXXXX..XXXXXXX 100644 |
431 | +static void cmsdk_apb_watchdog_register_types(void) | 920 | --- a/target/i386/hvf/hvf.c |
432 | +{ | 921 | +++ b/target/i386/hvf/hvf.c |
433 | + type_register_static(&cmsdk_apb_watchdog_info); | 922 | @@ -XXX,XX +XXX,XX @@ static inline bool apic_bus_freq_is_known(CPUX86State *env) |
434 | +} | 923 | return env->apic_bus_freq != 0; |
435 | + | 924 | } |
436 | +type_init(cmsdk_apb_watchdog_register_types); | 925 | |
926 | +void hvf_kick_vcpu_thread(CPUState *cpu) | ||
927 | +{ | ||
928 | + cpus_kick_thread(cpu); | ||
929 | +} | ||
930 | + | ||
931 | int hvf_arch_init(void) | ||
932 | { | ||
933 | return 0; | ||
437 | diff --git a/MAINTAINERS b/MAINTAINERS | 934 | diff --git a/MAINTAINERS b/MAINTAINERS |
438 | index XXXXXXX..XXXXXXX 100644 | 935 | index XXXXXXX..XXXXXXX 100644 |
439 | --- a/MAINTAINERS | 936 | --- a/MAINTAINERS |
440 | +++ b/MAINTAINERS | 937 | +++ b/MAINTAINERS |
441 | @@ -XXX,XX +XXX,XX @@ F: hw/timer/cmsdk-apb-timer.c | 938 | @@ -XXX,XX +XXX,XX @@ F: accel/accel-*.c |
442 | F: include/hw/timer/cmsdk-apb-timer.h | 939 | F: accel/Makefile.objs |
443 | F: hw/char/cmsdk-apb-uart.c | 940 | F: accel/stubs/Makefile.objs |
444 | F: include/hw/char/cmsdk-apb-uart.h | 941 | |
445 | +F: hw/watchdog/cmsdk-apb-watchdog.c | 942 | +Apple Silicon HVF CPUs |
446 | +F: include/hw/watchdog/cmsdk-apb-watchdog.h | 943 | +M: Alexander Graf <agraf@csgraf.de> |
447 | F: hw/misc/tz-ppc.c | 944 | +S: Maintained |
448 | F: include/hw/misc/tz-ppc.h | 945 | +F: target/arm/hvf/ |
449 | F: hw/misc/tz-mpc.c | 946 | + |
450 | diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak | 947 | X86 HVF CPUs |
451 | index XXXXXXX..XXXXXXX 100644 | 948 | M: Cameron Esfahani <dirty@apple.com> |
452 | --- a/default-configs/arm-softmmu.mak | 949 | M: Roman Bolshakov <r.bolshakov@yadro.com> |
453 | +++ b/default-configs/arm-softmmu.mak | 950 | diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events |
454 | @@ -XXX,XX +XXX,XX @@ CONFIG_STM32F205_SOC=y | ||
455 | |||
456 | CONFIG_CMSDK_APB_TIMER=y | ||
457 | CONFIG_CMSDK_APB_UART=y | ||
458 | +CONFIG_CMSDK_APB_WATCHDOG=y | ||
459 | |||
460 | CONFIG_MPS2_FPGAIO=y | ||
461 | CONFIG_MPS2_SCC=y | ||
462 | diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events | ||
463 | new file mode 100644 | 951 | new file mode 100644 |
464 | index XXXXXXX..XXXXXXX | 952 | index XXXXXXX..XXXXXXX |
465 | --- /dev/null | 953 | --- /dev/null |
466 | +++ b/hw/watchdog/trace-events | 954 | +++ b/target/arm/hvf/trace-events |
467 | @@ -XXX,XX +XXX,XX @@ | 955 | @@ -XXX,XX +XXX,XX @@ |
468 | +# See docs/devel/tracing.txt for syntax documentation. | 956 | +hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" |
469 | + | 957 | +hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" |
470 | +# hw/char/cmsdk_apb_watchdog.c | 958 | +hvf_inject_fiq(void) "injecting FIQ" |
471 | +cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" | 959 | +hvf_inject_irq(void) "injecting IRQ" |
472 | +cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" | 960 | +hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" |
473 | +cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" | 961 | +hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64 |
962 | +hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")" | ||
963 | +hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64 | ||
964 | +hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64 | ||
965 | +hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]" | ||
474 | -- | 966 | -- |
475 | 2.18.0 | 967 | 2.20.1 |
476 | 968 | ||
477 | 969 | diff view generated by jsdifflib |
1 | From: Roman Kapl <rka@sysgo.com> | 1 | From: Peter Collingbourne <pcc@google.com> |
---|---|---|---|
2 | 2 | ||
3 | If an instruction is conditional (like CBZ) and it is executed | 3 | Sleep on WFI until the VTIMER is due but allow ourselves to be woken |
4 | conditionally (using the ITx instruction), a jump to an undefined | 4 | up on IPI. |
5 | label is generated, and QEMU crashes. | ||
6 | 5 | ||
7 | CBZ in IT block is an UNPREDICTABLE behavior, but we should not | 6 | In this implementation IPI is blocked on the CPU thread at startup and |
8 | crash. Honouring the condition code is allowed by the spec in this | 7 | pselect() is used to atomically unblock the signal and begin sleeping. |
9 | case (constrained unpredictable, ARMv8, section K1.1.7), and matches | 8 | The signal is sent unconditionally so there's no need to worry about |
10 | what we do for other "UNPREDICTABLE inside an IT block" instructions. | 9 | races between actually sleeping and the "we think we're sleeping" |
10 | state. It may lead to an extra wakeup but that's better than missing | ||
11 | it entirely. | ||
11 | 12 | ||
12 | Fix the 'skip on condition' code to create a new label only if it | 13 | Signed-off-by: Peter Collingbourne <pcc@google.com> |
13 | does not already exist. Previously multiple labels were created, but | 14 | Signed-off-by: Alexander Graf <agraf@csgraf.de> |
14 | only the last one of them was set. | 15 | Acked-by: Roman Bolshakov <r.bolshakov@yadro.com> |
15 | 16 | Reviewed-by: Sergio Lopez <slp@redhat.com> | |
16 | Signed-off-by: Roman Kapl <rka@sysgo.com> | 17 | Message-id: 20210916155404.86958-6-agraf@csgraf.de |
17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | 18 | [agraf: Remove unused 'set' variable, always advance PC on WFX trap, |
18 | Message-id: 20180816120533.6587-1-rka@sysgo.com | 19 | support vm stop / continue operations and cntv offsets] |
19 | [PMM: fixed ^ 1 being applied to wrong argument, fixed typo] | 20 | Signed-off-by: Alexander Graf <agraf@csgraf.de> |
20 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 21 | Acked-by: Roman Bolshakov <r.bolshakov@yadro.com> |
22 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
21 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 23 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
22 | --- | 24 | --- |
23 | target/arm/translate.c | 35 +++++++++++++++++++++-------------- | 25 | include/sysemu/hvf_int.h | 1 + |
24 | 1 file changed, 21 insertions(+), 14 deletions(-) | 26 | accel/hvf/hvf-accel-ops.c | 5 +-- |
27 | target/arm/hvf/hvf.c | 79 +++++++++++++++++++++++++++++++++++++++ | ||
28 | 3 files changed, 82 insertions(+), 3 deletions(-) | ||
25 | 29 | ||
26 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 30 | diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h |
27 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/target/arm/translate.c | 32 | --- a/include/sysemu/hvf_int.h |
29 | +++ b/target/arm/translate.c | 33 | +++ b/include/sysemu/hvf_int.h |
30 | @@ -XXX,XX +XXX,XX @@ static void gen_srs(DisasContext *s, | 34 | @@ -XXX,XX +XXX,XX @@ struct hvf_vcpu_state { |
31 | s->base.is_jmp = DISAS_UPDATE; | 35 | uint64_t fd; |
36 | void *exit; | ||
37 | bool vtimer_masked; | ||
38 | + sigset_t unblock_ipi_mask; | ||
39 | }; | ||
40 | |||
41 | void assert_hvf_ok(hv_return_t ret); | ||
42 | diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/accel/hvf/hvf-accel-ops.c | ||
45 | +++ b/accel/hvf/hvf-accel-ops.c | ||
46 | @@ -XXX,XX +XXX,XX @@ static int hvf_init_vcpu(CPUState *cpu) | ||
47 | cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); | ||
48 | |||
49 | /* init cpu signals */ | ||
50 | - sigset_t set; | ||
51 | struct sigaction sigact; | ||
52 | |||
53 | memset(&sigact, 0, sizeof(sigact)); | ||
54 | sigact.sa_handler = dummy_signal; | ||
55 | sigaction(SIG_IPI, &sigact, NULL); | ||
56 | |||
57 | - pthread_sigmask(SIG_BLOCK, NULL, &set); | ||
58 | - sigdelset(&set, SIG_IPI); | ||
59 | + pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); | ||
60 | + sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); | ||
61 | |||
62 | #ifdef __aarch64__ | ||
63 | r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); | ||
64 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/target/arm/hvf/hvf.c | ||
67 | +++ b/target/arm/hvf/hvf.c | ||
68 | @@ -XXX,XX +XXX,XX @@ | ||
69 | * QEMU Hypervisor.framework support for Apple Silicon | ||
70 | |||
71 | * Copyright 2020 Alexander Graf <agraf@csgraf.de> | ||
72 | + * Copyright 2020 Google LLC | ||
73 | * | ||
74 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
75 | * See the COPYING file in the top-level directory. | ||
76 | @@ -XXX,XX +XXX,XX @@ int hvf_arch_init_vcpu(CPUState *cpu) | ||
77 | |||
78 | void hvf_kick_vcpu_thread(CPUState *cpu) | ||
79 | { | ||
80 | + cpus_kick_thread(cpu); | ||
81 | hv_vcpus_exit(&cpu->hvf->fd, 1); | ||
32 | } | 82 | } |
33 | 83 | ||
34 | +/* Generate a label used for skipping this instruction */ | 84 | @@ -XXX,XX +XXX,XX @@ static uint64_t hvf_vtimer_val_raw(void) |
35 | +static void arm_gen_condlabel(DisasContext *s) | 85 | return mach_absolute_time() - hvf_state->vtimer_offset; |
86 | } | ||
87 | |||
88 | +static uint64_t hvf_vtimer_val(void) | ||
36 | +{ | 89 | +{ |
37 | + if (!s->condjmp) { | 90 | + if (!runstate_is_running()) { |
38 | + s->condlabel = gen_new_label(); | 91 | + /* VM is paused, the vtimer value is in vtimer.vtimer_val */ |
39 | + s->condjmp = 1; | 92 | + return vtimer.vtimer_val; |
40 | + } | 93 | + } |
94 | + | ||
95 | + return hvf_vtimer_val_raw(); | ||
41 | +} | 96 | +} |
42 | + | 97 | + |
43 | +/* Skip this instruction if the ARM condition is false */ | 98 | +static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) |
44 | +static void arm_skip_unless(DisasContext *s, uint32_t cond) | ||
45 | +{ | 99 | +{ |
46 | + arm_gen_condlabel(s); | 100 | + /* |
47 | + arm_gen_test_cc(cond ^ 1, s->condlabel); | 101 | + * Use pselect to sleep so that other threads can IPI us while we're |
102 | + * sleeping. | ||
103 | + */ | ||
104 | + qatomic_mb_set(&cpu->thread_kicked, false); | ||
105 | + qemu_mutex_unlock_iothread(); | ||
106 | + pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); | ||
107 | + qemu_mutex_lock_iothread(); | ||
48 | +} | 108 | +} |
49 | + | 109 | + |
50 | static void disas_arm_insn(DisasContext *s, unsigned int insn) | 110 | +static void hvf_wfi(CPUState *cpu) |
111 | +{ | ||
112 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
113 | + struct timespec ts; | ||
114 | + hv_return_t r; | ||
115 | + uint64_t ctl; | ||
116 | + uint64_t cval; | ||
117 | + int64_t ticks_to_sleep; | ||
118 | + uint64_t seconds; | ||
119 | + uint64_t nanos; | ||
120 | + uint32_t cntfrq; | ||
121 | + | ||
122 | + if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { | ||
123 | + /* Interrupt pending, no need to wait */ | ||
124 | + return; | ||
125 | + } | ||
126 | + | ||
127 | + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); | ||
128 | + assert_hvf_ok(r); | ||
129 | + | ||
130 | + if (!(ctl & 1) || (ctl & 2)) { | ||
131 | + /* Timer disabled or masked, just wait for an IPI. */ | ||
132 | + hvf_wait_for_ipi(cpu, NULL); | ||
133 | + return; | ||
134 | + } | ||
135 | + | ||
136 | + r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); | ||
137 | + assert_hvf_ok(r); | ||
138 | + | ||
139 | + ticks_to_sleep = cval - hvf_vtimer_val(); | ||
140 | + if (ticks_to_sleep < 0) { | ||
141 | + return; | ||
142 | + } | ||
143 | + | ||
144 | + cntfrq = gt_cntfrq_period_ns(arm_cpu); | ||
145 | + seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND); | ||
146 | + ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq); | ||
147 | + nanos = ticks_to_sleep * cntfrq; | ||
148 | + | ||
149 | + /* | ||
150 | + * Don't sleep for less than the time a context switch would take, | ||
151 | + * so that we can satisfy fast timer requests on the same CPU. | ||
152 | + * Measurements on M1 show the sweet spot to be ~2ms. | ||
153 | + */ | ||
154 | + if (!seconds && nanos < (2 * SCALE_MS)) { | ||
155 | + return; | ||
156 | + } | ||
157 | + | ||
158 | + ts = (struct timespec) { seconds, nanos }; | ||
159 | + hvf_wait_for_ipi(cpu, &ts); | ||
160 | +} | ||
161 | + | ||
162 | static void hvf_sync_vtimer(CPUState *cpu) | ||
51 | { | 163 | { |
52 | unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh; | 164 | ARMCPU *arm_cpu = ARM_CPU(cpu); |
53 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | 165 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) |
54 | if (cond != 0xe) { | ||
55 | /* if not always execute, we generate a conditional jump to | ||
56 | next instruction */ | ||
57 | - s->condlabel = gen_new_label(); | ||
58 | - arm_gen_test_cc(cond ^ 1, s->condlabel); | ||
59 | - s->condjmp = 1; | ||
60 | + arm_skip_unless(s, cond); | ||
61 | } | 166 | } |
62 | if ((insn & 0x0f900000) == 0x03000000) { | 167 | case EC_WFX_TRAP: |
63 | if ((insn & (1 << 21)) == 0) { | 168 | advance_pc = true; |
64 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) | 169 | + if (!(syndrome & WFX_IS_WFE)) { |
65 | /* Conditional branch. */ | 170 | + hvf_wfi(cpu); |
66 | op = (insn >> 22) & 0xf; | 171 | + } |
67 | /* Generate a conditional jump to next instruction. */ | 172 | break; |
68 | - s->condlabel = gen_new_label(); | 173 | case EC_AA64_HVC: |
69 | - arm_gen_test_cc(op ^ 1, s->condlabel); | 174 | cpu_synchronize_state(cpu); |
70 | - s->condjmp = 1; | ||
71 | + arm_skip_unless(s, op); | ||
72 | |||
73 | /* offset[11:1] = insn[10:0] */ | ||
74 | offset = (insn & 0x7ff) << 1; | ||
75 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn) | ||
76 | case 1: case 3: case 9: case 11: /* czb */ | ||
77 | rm = insn & 7; | ||
78 | tmp = load_reg(s, rm); | ||
79 | - s->condlabel = gen_new_label(); | ||
80 | - s->condjmp = 1; | ||
81 | + arm_gen_condlabel(s); | ||
82 | if (insn & (1 << 11)) | ||
83 | tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); | ||
84 | else | ||
85 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn) | ||
86 | break; | ||
87 | } | ||
88 | /* generate a conditional jump to next instruction */ | ||
89 | - s->condlabel = gen_new_label(); | ||
90 | - arm_gen_test_cc(cond ^ 1, s->condlabel); | ||
91 | - s->condjmp = 1; | ||
92 | + arm_skip_unless(s, cond); | ||
93 | |||
94 | /* jump to the offset */ | ||
95 | val = (uint32_t)s->pc + 2; | ||
96 | @@ -XXX,XX +XXX,XX @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) | ||
97 | uint32_t cond = dc->condexec_cond; | ||
98 | |||
99 | if (cond != 0x0e) { /* Skip conditional when condition is AL. */ | ||
100 | - dc->condlabel = gen_new_label(); | ||
101 | - arm_gen_test_cc(cond ^ 1, dc->condlabel); | ||
102 | - dc->condjmp = 1; | ||
103 | + arm_skip_unless(dc, cond); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | -- | 175 | -- |
108 | 2.18.0 | 176 | 2.20.1 |
109 | 177 | ||
110 | 178 | diff view generated by jsdifflib |
1 | Create a new include file for the pl081's device struct, | 1 | Now that we have working system register sync, we push more target CPU |
---|---|---|---|
2 | type macros, etc, so that it can be instantiated using | 2 | properties into the virtual machine. That might be useful in some |
3 | the "embedded struct" coding style. | 3 | situations, but is not the typical case that users want. |
4 | 4 | ||
5 | So let's add a -cpu host option that allows them to explicitly pass all | ||
6 | CPU capabilities of their host CPU into the guest. | ||
7 | |||
8 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
9 | Acked-by: Roman Bolshakov <r.bolshakov@yadro.com> | ||
10 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
12 | Message-id: 20210916155404.86958-7-agraf@csgraf.de | ||
13 | [PMM: drop unnecessary #include line from .h file] | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
7 | --- | 15 | --- |
8 | include/hw/dma/pl080.h | 62 ++++++++++++++++++++++++++++++++++++++++++ | 16 | target/arm/cpu.h | 2 + |
9 | hw/dma/pl080.c | 34 ++--------------------- | 17 | target/arm/hvf_arm.h | 18 +++++++++ |
10 | MAINTAINERS | 1 + | 18 | target/arm/kvm_arm.h | 2 - |
11 | 3 files changed, 65 insertions(+), 32 deletions(-) | 19 | target/arm/cpu.c | 13 ++++-- |
12 | create mode 100644 include/hw/dma/pl080.h | 20 | target/arm/hvf/hvf.c | 95 ++++++++++++++++++++++++++++++++++++++++++++ |
13 | 21 | 5 files changed, 124 insertions(+), 6 deletions(-) | |
14 | diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h | 22 | create mode 100644 target/arm/hvf_arm.h |
23 | |||
24 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/target/arm/cpu.h | ||
27 | +++ b/target/arm/cpu.h | ||
28 | @@ -XXX,XX +XXX,XX @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); | ||
29 | #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) | ||
30 | #define CPU_RESOLVING_TYPE TYPE_ARM_CPU | ||
31 | |||
32 | +#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU | ||
33 | + | ||
34 | #define cpu_signal_handler cpu_arm_signal_handler | ||
35 | #define cpu_list arm_cpu_list | ||
36 | |||
37 | diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h | ||
15 | new file mode 100644 | 38 | new file mode 100644 |
16 | index XXXXXXX..XXXXXXX | 39 | index XXXXXXX..XXXXXXX |
17 | --- /dev/null | 40 | --- /dev/null |
18 | +++ b/include/hw/dma/pl080.h | 41 | +++ b/target/arm/hvf_arm.h |
19 | @@ -XXX,XX +XXX,XX @@ | 42 | @@ -XXX,XX +XXX,XX @@ |
20 | +/* | 43 | +/* |
21 | + * ARM PrimeCell PL080/PL081 DMA controller | 44 | + * QEMU Hypervisor.framework (HVF) support -- ARM specifics |
22 | + * | 45 | + * |
23 | + * Copyright (c) 2006 CodeSourcery. | 46 | + * Copyright (c) 2021 Alexander Graf |
24 | + * Copyright (c) 2018 Linaro Limited | ||
25 | + * Written by Paul Brook, Peter Maydell | ||
26 | + * | 47 | + * |
27 | + * This program is free software; you can redistribute it and/or modify | 48 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. |
28 | + * it under the terms of the GNU General Public License version 2 or | 49 | + * See the COPYING file in the top-level directory. |
29 | + * (at your option) any later version. | 50 | + * |
30 | + */ | 51 | + */ |
31 | + | 52 | + |
32 | +/* This is a model of the Arm PrimeCell PL080/PL081 DMA controller: | 53 | +#ifndef QEMU_HVF_ARM_H |
33 | + * The PL080 TRM is: | 54 | +#define QEMU_HVF_ARM_H |
34 | + * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0196g/DDI0196.pdf | 55 | + |
35 | + * and the PL081 TRM is: | 56 | +#include "cpu.h" |
36 | + * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0218e/DDI0218.pdf | 57 | + |
37 | + * | 58 | +void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu); |
38 | + * QEMU interface: | ||
39 | + * + sysbus IRQ: DMACINTR combined interrupt line | ||
40 | + * + sysbus MMIO region 0: MemoryRegion for the device's registers | ||
41 | + */ | ||
42 | + | ||
43 | +#ifndef HW_DMA_PL080_H | ||
44 | +#define HW_DMA_PL080_H | ||
45 | + | ||
46 | +#include "hw/sysbus.h" | ||
47 | + | ||
48 | +#define PL080_MAX_CHANNELS 8 | ||
49 | + | ||
50 | +typedef struct { | ||
51 | + uint32_t src; | ||
52 | + uint32_t dest; | ||
53 | + uint32_t lli; | ||
54 | + uint32_t ctrl; | ||
55 | + uint32_t conf; | ||
56 | +} pl080_channel; | ||
57 | + | ||
58 | +#define TYPE_PL080 "pl080" | ||
59 | +#define TYPE_PL081 "pl081" | ||
60 | +#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080) | ||
61 | + | ||
62 | +typedef struct PL080State { | ||
63 | + SysBusDevice parent_obj; | ||
64 | + | ||
65 | + MemoryRegion iomem; | ||
66 | + uint8_t tc_int; | ||
67 | + uint8_t tc_mask; | ||
68 | + uint8_t err_int; | ||
69 | + uint8_t err_mask; | ||
70 | + uint32_t conf; | ||
71 | + uint32_t sync; | ||
72 | + uint32_t req_single; | ||
73 | + uint32_t req_burst; | ||
74 | + pl080_channel chan[PL080_MAX_CHANNELS]; | ||
75 | + int nchannels; | ||
76 | + /* Flag to avoid recursive DMA invocations. */ | ||
77 | + int running; | ||
78 | + qemu_irq irq; | ||
79 | +} PL080State; | ||
80 | + | 59 | + |
81 | +#endif | 60 | +#endif |
82 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | 61 | diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h |
83 | index XXXXXXX..XXXXXXX 100644 | 62 | index XXXXXXX..XXXXXXX 100644 |
84 | --- a/hw/dma/pl080.c | 63 | --- a/target/arm/kvm_arm.h |
85 | +++ b/hw/dma/pl080.c | 64 | +++ b/target/arm/kvm_arm.h |
65 | @@ -XXX,XX +XXX,XX @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, | ||
66 | */ | ||
67 | void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); | ||
68 | |||
69 | -#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU | ||
70 | - | ||
71 | /** | ||
72 | * ARMHostCPUFeatures: information about the host CPU (identified | ||
73 | * by asking the host kernel) | ||
74 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/target/arm/cpu.c | ||
77 | +++ b/target/arm/cpu.c | ||
86 | @@ -XXX,XX +XXX,XX @@ | 78 | @@ -XXX,XX +XXX,XX @@ |
87 | #include "hw/sysbus.h" | 79 | #include "sysemu/tcg.h" |
88 | #include "exec/address-spaces.h" | 80 | #include "sysemu/hw_accel.h" |
89 | #include "qemu/log.h" | 81 | #include "kvm_arm.h" |
90 | +#include "hw/dma/pl080.h" | 82 | +#include "hvf_arm.h" |
91 | 83 | #include "disas/capstone.h" | |
92 | -#define PL080_MAX_CHANNELS 8 | 84 | #include "fpu/softfloat.h" |
93 | #define PL080_CONF_E 0x1 | 85 | |
94 | #define PL080_CONF_M1 0x2 | 86 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) |
95 | #define PL080_CONF_M2 0x4 | 87 | * this is the first point where we can report it. |
88 | */ | ||
89 | if (cpu->host_cpu_probe_failed) { | ||
90 | - if (!kvm_enabled()) { | ||
91 | - error_setg(errp, "The 'host' CPU type can only be used with KVM"); | ||
92 | + if (!kvm_enabled() && !hvf_enabled()) { | ||
93 | + error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF"); | ||
94 | } else { | ||
95 | error_setg(errp, "Failed to retrieve host CPU features"); | ||
96 | } | ||
97 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) | ||
98 | #endif /* CONFIG_TCG */ | ||
99 | } | ||
100 | |||
101 | -#ifdef CONFIG_KVM | ||
102 | +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) | ||
103 | static void arm_host_initfn(Object *obj) | ||
104 | { | ||
105 | ARMCPU *cpu = ARM_CPU(obj); | ||
106 | |||
107 | +#ifdef CONFIG_KVM | ||
108 | kvm_arm_set_cpu_features_from_host(cpu); | ||
109 | if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | ||
110 | aarch64_add_sve_properties(obj); | ||
111 | } | ||
112 | +#else | ||
113 | + hvf_arm_set_cpu_features_from_host(cpu); | ||
114 | +#endif | ||
115 | arm_cpu_post_init(obj); | ||
116 | } | ||
117 | |||
118 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_register_types(void) | ||
119 | { | ||
120 | type_register_static(&arm_cpu_type_info); | ||
121 | |||
122 | -#ifdef CONFIG_KVM | ||
123 | +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) | ||
124 | type_register_static(&host_arm_cpu_type_info); | ||
125 | #endif | ||
126 | } | ||
127 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
128 | index XXXXXXX..XXXXXXX 100644 | ||
129 | --- a/target/arm/hvf/hvf.c | ||
130 | +++ b/target/arm/hvf/hvf.c | ||
96 | @@ -XXX,XX +XXX,XX @@ | 131 | @@ -XXX,XX +XXX,XX @@ |
97 | #define PL080_CCTRL_D 0x02000000 | 132 | #include "sysemu/hvf.h" |
98 | #define PL080_CCTRL_S 0x01000000 | 133 | #include "sysemu/hvf_int.h" |
99 | 134 | #include "sysemu/hw_accel.h" | |
100 | -typedef struct { | 135 | +#include "hvf_arm.h" |
101 | - uint32_t src; | 136 | |
102 | - uint32_t dest; | 137 | #include <mach/mach_time.h> |
103 | - uint32_t lli; | 138 | |
104 | - uint32_t ctrl; | 139 | @@ -XXX,XX +XXX,XX @@ typedef struct HVFVTimer { |
105 | - uint32_t conf; | 140 | |
106 | -} pl080_channel; | 141 | static HVFVTimer vtimer; |
107 | - | 142 | |
108 | -#define TYPE_PL080 "pl080" | 143 | +typedef struct ARMHostCPUFeatures { |
109 | -#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080) | 144 | + ARMISARegisters isar; |
110 | - | 145 | + uint64_t features; |
111 | -typedef struct PL080State { | 146 | + uint64_t midr; |
112 | - SysBusDevice parent_obj; | 147 | + uint32_t reset_sctlr; |
113 | - | 148 | + const char *dtb_compatible; |
114 | - MemoryRegion iomem; | 149 | +} ARMHostCPUFeatures; |
115 | - uint8_t tc_int; | 150 | + |
116 | - uint8_t tc_mask; | 151 | +static ARMHostCPUFeatures arm_host_cpu_features; |
117 | - uint8_t err_int; | 152 | + |
118 | - uint8_t err_mask; | 153 | struct hvf_reg_match { |
119 | - uint32_t conf; | 154 | int reg; |
120 | - uint32_t sync; | 155 | uint64_t offset; |
121 | - uint32_t req_single; | 156 | @@ -XXX,XX +XXX,XX @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt) |
122 | - uint32_t req_burst; | 157 | return val; |
123 | - pl080_channel chan[PL080_MAX_CHANNELS]; | 158 | } |
124 | - int nchannels; | 159 | |
125 | - /* Flag to avoid recursive DMA invocations. */ | 160 | +static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) |
126 | - int running; | 161 | +{ |
127 | - qemu_irq irq; | 162 | + ARMISARegisters host_isar = {}; |
128 | -} PL080State; | 163 | + const struct isar_regs { |
129 | - | 164 | + int reg; |
130 | static const VMStateDescription vmstate_pl080_channel = { | 165 | + uint64_t *val; |
131 | .name = "pl080_channel", | 166 | + } regs[] = { |
132 | .version_id = 1, | 167 | + { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, |
133 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo pl080_info = { | 168 | + { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, |
134 | }; | 169 | + { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, |
135 | 170 | + { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, | |
136 | static const TypeInfo pl081_info = { | 171 | + { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, |
137 | - .name = "pl081", | 172 | + { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, |
138 | + .name = TYPE_PL081, | 173 | + { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, |
139 | .parent = TYPE_PL080, | 174 | + { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, |
140 | .instance_init = pl081_init, | 175 | + { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, |
141 | }; | 176 | + }; |
142 | diff --git a/MAINTAINERS b/MAINTAINERS | 177 | + hv_vcpu_t fd; |
143 | index XXXXXXX..XXXXXXX 100644 | 178 | + hv_return_t r = HV_SUCCESS; |
144 | --- a/MAINTAINERS | 179 | + hv_vcpu_exit_t *exit; |
145 | +++ b/MAINTAINERS | 180 | + int i; |
146 | @@ -XXX,XX +XXX,XX @@ F: hw/char/pl011.c | 181 | + |
147 | F: include/hw/char/pl011.h | 182 | + ahcf->dtb_compatible = "arm,arm-v8"; |
148 | F: hw/display/pl110* | 183 | + ahcf->features = (1ULL << ARM_FEATURE_V8) | |
149 | F: hw/dma/pl080.c | 184 | + (1ULL << ARM_FEATURE_NEON) | |
150 | +F: include/hw/dma/pl080.h | 185 | + (1ULL << ARM_FEATURE_AARCH64) | |
151 | F: hw/dma/pl330.c | 186 | + (1ULL << ARM_FEATURE_PMU) | |
152 | F: hw/gpio/pl061.c | 187 | + (1ULL << ARM_FEATURE_GENERIC_TIMER); |
153 | F: hw/input/pl050.c | 188 | + |
189 | + /* We set up a small vcpu to extract host registers */ | ||
190 | + | ||
191 | + if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) { | ||
192 | + return false; | ||
193 | + } | ||
194 | + | ||
195 | + for (i = 0; i < ARRAY_SIZE(regs); i++) { | ||
196 | + r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val); | ||
197 | + } | ||
198 | + r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr); | ||
199 | + r |= hv_vcpu_destroy(fd); | ||
200 | + | ||
201 | + ahcf->isar = host_isar; | ||
202 | + | ||
203 | + /* | ||
204 | + * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1 | ||
205 | + * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97 | ||
206 | + */ | ||
207 | + ahcf->reset_sctlr = 0x30100180; | ||
208 | + /* | ||
209 | + * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility, | ||
210 | + * let's disable it on boot and then allow guest software to turn it on by | ||
211 | + * setting it to 0. | ||
212 | + */ | ||
213 | + ahcf->reset_sctlr |= 0x00800000; | ||
214 | + | ||
215 | + /* Make sure we don't advertise AArch32 support for EL0/EL1 */ | ||
216 | + if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) { | ||
217 | + return false; | ||
218 | + } | ||
219 | + | ||
220 | + return r == HV_SUCCESS; | ||
221 | +} | ||
222 | + | ||
223 | +void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu) | ||
224 | +{ | ||
225 | + if (!arm_host_cpu_features.dtb_compatible) { | ||
226 | + if (!hvf_enabled() || | ||
227 | + !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) { | ||
228 | + /* | ||
229 | + * We can't report this error yet, so flag that we need to | ||
230 | + * in arm_cpu_realizefn(). | ||
231 | + */ | ||
232 | + cpu->host_cpu_probe_failed = true; | ||
233 | + return; | ||
234 | + } | ||
235 | + } | ||
236 | + | ||
237 | + cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; | ||
238 | + cpu->isar = arm_host_cpu_features.isar; | ||
239 | + cpu->env.features = arm_host_cpu_features.features; | ||
240 | + cpu->midr = arm_host_cpu_features.midr; | ||
241 | + cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr; | ||
242 | +} | ||
243 | + | ||
244 | void hvf_arch_vcpu_destroy(CPUState *cpu) | ||
245 | { | ||
246 | } | ||
154 | -- | 247 | -- |
155 | 2.18.0 | 248 | 2.20.1 |
156 | 249 | ||
157 | 250 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <agraf@csgraf.de> | |
2 | |||
3 | We need to handle PSCI calls. Most of the TCG code works for us, | ||
4 | but we can simplify it to only handle aa64 mode and we need to | ||
5 | handle SUSPEND differently. | ||
6 | |||
7 | This patch takes the TCG code as template and duplicates it in HVF. | ||
8 | |||
9 | To tell the guest that we support PSCI 0.2 now, update the check in | ||
10 | arm_cpu_initfn() as well. | ||
11 | |||
12 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
13 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
14 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
15 | Message-id: 20210916155404.86958-8-agraf@csgraf.de | ||
16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
17 | --- | ||
18 | target/arm/cpu.c | 4 +- | ||
19 | target/arm/hvf/hvf.c | 141 ++++++++++++++++++++++++++++++++++-- | ||
20 | target/arm/hvf/trace-events | 1 + | ||
21 | 3 files changed, 139 insertions(+), 7 deletions(-) | ||
22 | |||
23 | diff --git a/target/arm/cpu.c b/target/arm/cpu.c | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/target/arm/cpu.c | ||
26 | +++ b/target/arm/cpu.c | ||
27 | @@ -XXX,XX +XXX,XX @@ static void arm_cpu_initfn(Object *obj) | ||
28 | cpu->psci_version = 1; /* By default assume PSCI v0.1 */ | ||
29 | cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; | ||
30 | |||
31 | - if (tcg_enabled()) { | ||
32 | - cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ | ||
33 | + if (tcg_enabled() || hvf_enabled()) { | ||
34 | + cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */ | ||
35 | } | ||
36 | } | ||
37 | |||
38 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/target/arm/hvf/hvf.c | ||
41 | +++ b/target/arm/hvf/hvf.c | ||
42 | @@ -XXX,XX +XXX,XX @@ | ||
43 | #include "hw/irq.h" | ||
44 | #include "qemu/main-loop.h" | ||
45 | #include "sysemu/cpus.h" | ||
46 | +#include "arm-powerctl.h" | ||
47 | #include "target/arm/cpu.h" | ||
48 | #include "target/arm/internals.h" | ||
49 | #include "trace/trace-target_arm_hvf.h" | ||
50 | @@ -XXX,XX +XXX,XX @@ | ||
51 | #define TMR_CTL_IMASK (1 << 1) | ||
52 | #define TMR_CTL_ISTATUS (1 << 2) | ||
53 | |||
54 | +static void hvf_wfi(CPUState *cpu); | ||
55 | + | ||
56 | typedef struct HVFVTimer { | ||
57 | /* Vtimer value during migration and paused state */ | ||
58 | uint64_t vtimer_val; | ||
59 | @@ -XXX,XX +XXX,XX @@ static void hvf_raise_exception(CPUState *cpu, uint32_t excp, | ||
60 | arm_cpu_do_interrupt(cpu); | ||
61 | } | ||
62 | |||
63 | +static void hvf_psci_cpu_off(ARMCPU *arm_cpu) | ||
64 | +{ | ||
65 | + int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity); | ||
66 | + assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); | ||
67 | +} | ||
68 | + | ||
69 | +/* | ||
70 | + * Handle a PSCI call. | ||
71 | + * | ||
72 | + * Returns 0 on success | ||
73 | + * -1 when the PSCI call is unknown, | ||
74 | + */ | ||
75 | +static bool hvf_handle_psci_call(CPUState *cpu) | ||
76 | +{ | ||
77 | + ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
78 | + CPUARMState *env = &arm_cpu->env; | ||
79 | + uint64_t param[4] = { | ||
80 | + env->xregs[0], | ||
81 | + env->xregs[1], | ||
82 | + env->xregs[2], | ||
83 | + env->xregs[3] | ||
84 | + }; | ||
85 | + uint64_t context_id, mpidr; | ||
86 | + bool target_aarch64 = true; | ||
87 | + CPUState *target_cpu_state; | ||
88 | + ARMCPU *target_cpu; | ||
89 | + target_ulong entry; | ||
90 | + int target_el = 1; | ||
91 | + int32_t ret = 0; | ||
92 | + | ||
93 | + trace_hvf_psci_call(param[0], param[1], param[2], param[3], | ||
94 | + arm_cpu->mp_affinity); | ||
95 | + | ||
96 | + switch (param[0]) { | ||
97 | + case QEMU_PSCI_0_2_FN_PSCI_VERSION: | ||
98 | + ret = QEMU_PSCI_0_2_RET_VERSION_0_2; | ||
99 | + break; | ||
100 | + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: | ||
101 | + ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ | ||
102 | + break; | ||
103 | + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: | ||
104 | + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: | ||
105 | + mpidr = param[1]; | ||
106 | + | ||
107 | + switch (param[2]) { | ||
108 | + case 0: | ||
109 | + target_cpu_state = arm_get_cpu_by_id(mpidr); | ||
110 | + if (!target_cpu_state) { | ||
111 | + ret = QEMU_PSCI_RET_INVALID_PARAMS; | ||
112 | + break; | ||
113 | + } | ||
114 | + target_cpu = ARM_CPU(target_cpu_state); | ||
115 | + | ||
116 | + ret = target_cpu->power_state; | ||
117 | + break; | ||
118 | + default: | ||
119 | + /* Everything above affinity level 0 is always on. */ | ||
120 | + ret = 0; | ||
121 | + } | ||
122 | + break; | ||
123 | + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: | ||
124 | + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); | ||
125 | + /* | ||
126 | + * QEMU reset and shutdown are async requests, but PSCI | ||
127 | + * mandates that we never return from the reset/shutdown | ||
128 | + * call, so power the CPU off now so it doesn't execute | ||
129 | + * anything further. | ||
130 | + */ | ||
131 | + hvf_psci_cpu_off(arm_cpu); | ||
132 | + break; | ||
133 | + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: | ||
134 | + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); | ||
135 | + hvf_psci_cpu_off(arm_cpu); | ||
136 | + break; | ||
137 | + case QEMU_PSCI_0_1_FN_CPU_ON: | ||
138 | + case QEMU_PSCI_0_2_FN_CPU_ON: | ||
139 | + case QEMU_PSCI_0_2_FN64_CPU_ON: | ||
140 | + mpidr = param[1]; | ||
141 | + entry = param[2]; | ||
142 | + context_id = param[3]; | ||
143 | + ret = arm_set_cpu_on(mpidr, entry, context_id, | ||
144 | + target_el, target_aarch64); | ||
145 | + break; | ||
146 | + case QEMU_PSCI_0_1_FN_CPU_OFF: | ||
147 | + case QEMU_PSCI_0_2_FN_CPU_OFF: | ||
148 | + hvf_psci_cpu_off(arm_cpu); | ||
149 | + break; | ||
150 | + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: | ||
151 | + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: | ||
152 | + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: | ||
153 | + /* Affinity levels are not supported in QEMU */ | ||
154 | + if (param[1] & 0xfffe0000) { | ||
155 | + ret = QEMU_PSCI_RET_INVALID_PARAMS; | ||
156 | + break; | ||
157 | + } | ||
158 | + /* Powerdown is not supported, we always go into WFI */ | ||
159 | + env->xregs[0] = 0; | ||
160 | + hvf_wfi(cpu); | ||
161 | + break; | ||
162 | + case QEMU_PSCI_0_1_FN_MIGRATE: | ||
163 | + case QEMU_PSCI_0_2_FN_MIGRATE: | ||
164 | + ret = QEMU_PSCI_RET_NOT_SUPPORTED; | ||
165 | + break; | ||
166 | + default: | ||
167 | + return false; | ||
168 | + } | ||
169 | + | ||
170 | + env->xregs[0] = ret; | ||
171 | + return true; | ||
172 | +} | ||
173 | + | ||
174 | static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) | ||
175 | { | ||
176 | ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
177 | @@ -XXX,XX +XXX,XX @@ int hvf_vcpu_exec(CPUState *cpu) | ||
178 | break; | ||
179 | case EC_AA64_HVC: | ||
180 | cpu_synchronize_state(cpu); | ||
181 | - trace_hvf_unknown_hvc(env->xregs[0]); | ||
182 | - /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ | ||
183 | - env->xregs[0] = -1; | ||
184 | + if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) { | ||
185 | + if (!hvf_handle_psci_call(cpu)) { | ||
186 | + trace_hvf_unknown_hvc(env->xregs[0]); | ||
187 | + /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ | ||
188 | + env->xregs[0] = -1; | ||
189 | + } | ||
190 | + } else { | ||
191 | + trace_hvf_unknown_hvc(env->xregs[0]); | ||
192 | + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
193 | + } | ||
194 | break; | ||
195 | case EC_AA64_SMC: | ||
196 | cpu_synchronize_state(cpu); | ||
197 | - trace_hvf_unknown_smc(env->xregs[0]); | ||
198 | - hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
199 | + if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) { | ||
200 | + advance_pc = true; | ||
201 | + | ||
202 | + if (!hvf_handle_psci_call(cpu)) { | ||
203 | + trace_hvf_unknown_smc(env->xregs[0]); | ||
204 | + /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ | ||
205 | + env->xregs[0] = -1; | ||
206 | + } | ||
207 | + } else { | ||
208 | + trace_hvf_unknown_smc(env->xregs[0]); | ||
209 | + hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | ||
210 | + } | ||
211 | break; | ||
212 | default: | ||
213 | cpu_synchronize_state(cpu); | ||
214 | diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events | ||
215 | index XXXXXXX..XXXXXXX 100644 | ||
216 | --- a/target/arm/hvf/trace-events | ||
217 | +++ b/target/arm/hvf/trace-events | ||
218 | @@ -XXX,XX +XXX,XX @@ hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_ | ||
219 | hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64 | ||
220 | hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64 | ||
221 | hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]" | ||
222 | +hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x" | ||
223 | -- | ||
224 | 2.20.1 | ||
225 | |||
226 | diff view generated by jsdifflib |
1 | From: Stefan Hajnoczi <stefanha@redhat.com> | 1 | From: Alexander Graf <agraf@csgraf.de> |
---|---|---|---|
2 | 2 | ||
3 | The generic loader device supports the U-Boot and Intel HEX executable | 3 | Now that we have all logic in place that we need to handle Hypervisor.framework |
4 | formats in addition to the document raw and ELF formats. Reword the | 4 | on Apple Silicon systems, let's add CONFIG_HVF for aarch64 as well so that we |
5 | documentation to include these formats and explain how various options | 5 | can build it. |
6 | depend on the executable format. | ||
7 | 6 | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Alexander Graf <agraf@csgraf.de> |
9 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 8 | Reviewed-by: Roman Bolshakov <r.bolshakov@yadro.com> |
10 | Message-id: 20180816145554.9814-1-stefanha@redhat.com | 9 | Tested-by: Roman Bolshakov <r.bolshakov@yadro.com> (x86 only) |
11 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | 10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
12 | Message-id: 20210916155404.86958-9-agraf@csgraf.de | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 13 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
13 | --- | 14 | --- |
14 | docs/generic-loader.txt | 20 ++++++++++---------- | 15 | meson.build | 7 +++++++ |
15 | 1 file changed, 10 insertions(+), 10 deletions(-) | 16 | target/arm/hvf/meson.build | 3 +++ |
17 | target/arm/meson.build | 2 ++ | ||
18 | 3 files changed, 12 insertions(+) | ||
19 | create mode 100644 target/arm/hvf/meson.build | ||
16 | 20 | ||
17 | diff --git a/docs/generic-loader.txt b/docs/generic-loader.txt | 21 | diff --git a/meson.build b/meson.build |
18 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/docs/generic-loader.txt | 23 | --- a/meson.build |
20 | +++ b/docs/generic-loader.txt | 24 | +++ b/meson.build |
21 | @@ -XXX,XX +XXX,XX @@ An example of setting CPU 0's PC to 0x8000 is: | 25 | @@ -XXX,XX +XXX,XX @@ else |
22 | 26 | endif | |
23 | Loading Files | 27 | |
24 | ------------- | 28 | accelerator_targets = { 'CONFIG_KVM': kvm_targets } |
25 | -The loader device also allows files to be loaded into memory. It can load raw | 29 | + |
26 | -files and ELF executable files. Raw files are loaded verbatim. ELF executable | 30 | +if cpu in ['aarch64'] |
27 | -files are loaded by an ELF loader. The syntax is shown below: | 31 | + accelerator_targets += { |
28 | +The loader device also allows files to be loaded into memory. It can load ELF, | 32 | + 'CONFIG_HVF': ['aarch64-softmmu'] |
29 | +U-Boot, and Intel HEX executable formats as well as raw images. The syntax is | 33 | + } |
30 | +shown below: | 34 | +endif |
31 | 35 | + | |
32 | -device loader,file=<file>[,addr=<addr>][,cpu-num=<cpu-num>][,force-raw=<raw>] | 36 | if cpu in ['x86', 'x86_64', 'arm', 'aarch64'] |
33 | 37 | # i386 emulator provides xenpv machine type for multiple architectures | |
34 | <file> - A file to be loaded into memory | 38 | accelerator_targets += { |
35 | - <addr> - The addr in memory that the file should be loaded. This is | 39 | diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build |
36 | - ignored if you are using an ELF (unless force-raw is true). | 40 | new file mode 100644 |
37 | - This is required if you aren't loading an ELF. | 41 | index XXXXXXX..XXXXXXX |
38 | + <addr> - The memory address where the file should be loaded. This is | 42 | --- /dev/null |
39 | + required for raw images and ignored for non-raw files. | 43 | +++ b/target/arm/hvf/meson.build |
40 | <cpu-num> - This specifies the CPU that should be used. This is an | 44 | @@ -XXX,XX +XXX,XX @@ |
41 | optional argument and will cause the CPU's PC to be set to | 45 | +arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( |
42 | - where the image is stored or in the case of an ELF file to | 46 | + 'hvf.c', |
43 | - the value in the header. This option should only be used | 47 | +)) |
44 | - for the boot image. | 48 | diff --git a/target/arm/meson.build b/target/arm/meson.build |
45 | + the memory address where the raw file is loaded or the entry | 49 | index XXXXXXX..XXXXXXX 100644 |
46 | + point specified in the executable format header. This option | 50 | --- a/target/arm/meson.build |
47 | + should only be used for the boot image. | 51 | +++ b/target/arm/meson.build |
48 | This will also cause the image to be written to the specified | 52 | @@ -XXX,XX +XXX,XX @@ arm_softmmu_ss.add(files( |
49 | CPU's address space. If not specified, the default is CPU 0. | 53 | 'psci.c', |
50 | <force-raw> - Setting force-raw=on forces the file to be treated as a raw | 54 | )) |
51 | - image. This can be used to load ELF files as if they were raw. | 55 | |
52 | + image. This can be used to load supported executable formats | 56 | +subdir('hvf') |
53 | + as if they were raw. | 57 | + |
54 | 58 | target_arch += {'arm': arm_ss} | |
55 | All values are parsed using the standard QemuOps parsing. This allows the user | 59 | target_softmmu_arch += {'arm': arm_softmmu_ss} |
56 | to specify any values in any format supported. By default the values | ||
57 | -- | 60 | -- |
58 | 2.18.0 | 61 | 2.20.1 |
59 | 62 | ||
60 | 63 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Alexander Graf <agraf@csgraf.de> | |
2 | |||
3 | We can expose cycle counters on the PMU easily. To be as compatible as | ||
4 | possible, let's do so, but make sure we don't expose any other architectural | ||
5 | counters that we can not model yet. | ||
6 | |||
7 | This allows OSs to work that require PMU support. | ||
8 | |||
9 | Signed-off-by: Alexander Graf <agraf@csgraf.de> | ||
10 | Reviewed-by: Peter Maydell <peter.maydell@linaro.org> | ||
11 | Message-id: 20210916155404.86958-10-agraf@csgraf.de | ||
12 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | ||
13 | --- | ||
14 | target/arm/hvf/hvf.c | 179 +++++++++++++++++++++++++++++++++++++++++++ | ||
15 | 1 file changed, 179 insertions(+) | ||
16 | |||
17 | diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/target/arm/hvf/hvf.c | ||
20 | +++ b/target/arm/hvf/hvf.c | ||
21 | @@ -XXX,XX +XXX,XX @@ | ||
22 | #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) | ||
23 | #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) | ||
24 | #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) | ||
25 | +#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) | ||
26 | +#define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) | ||
27 | +#define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) | ||
28 | +#define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2) | ||
29 | +#define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2) | ||
30 | +#define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3) | ||
31 | +#define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4) | ||
32 | +#define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5) | ||
33 | +#define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6) | ||
34 | +#define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7) | ||
35 | +#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0) | ||
36 | +#define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7) | ||
37 | |||
38 | #define WFX_IS_WFE (1 << 0) | ||
39 | |||
40 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) | ||
41 | val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / | ||
42 | gt_cntfrq_period_ns(arm_cpu); | ||
43 | break; | ||
44 | + case SYSREG_PMCR_EL0: | ||
45 | + val = env->cp15.c9_pmcr; | ||
46 | + break; | ||
47 | + case SYSREG_PMCCNTR_EL0: | ||
48 | + pmu_op_start(env); | ||
49 | + val = env->cp15.c15_ccnt; | ||
50 | + pmu_op_finish(env); | ||
51 | + break; | ||
52 | + case SYSREG_PMCNTENCLR_EL0: | ||
53 | + val = env->cp15.c9_pmcnten; | ||
54 | + break; | ||
55 | + case SYSREG_PMOVSCLR_EL0: | ||
56 | + val = env->cp15.c9_pmovsr; | ||
57 | + break; | ||
58 | + case SYSREG_PMSELR_EL0: | ||
59 | + val = env->cp15.c9_pmselr; | ||
60 | + break; | ||
61 | + case SYSREG_PMINTENCLR_EL1: | ||
62 | + val = env->cp15.c9_pminten; | ||
63 | + break; | ||
64 | + case SYSREG_PMCCFILTR_EL0: | ||
65 | + val = env->cp15.pmccfiltr_el0; | ||
66 | + break; | ||
67 | + case SYSREG_PMCNTENSET_EL0: | ||
68 | + val = env->cp15.c9_pmcnten; | ||
69 | + break; | ||
70 | + case SYSREG_PMUSERENR_EL0: | ||
71 | + val = env->cp15.c9_pmuserenr; | ||
72 | + break; | ||
73 | + case SYSREG_PMCEID0_EL0: | ||
74 | + case SYSREG_PMCEID1_EL0: | ||
75 | + /* We can't really count anything yet, declare all events invalid */ | ||
76 | + val = 0; | ||
77 | + break; | ||
78 | case SYSREG_OSLSR_EL1: | ||
79 | val = env->cp15.oslsr_el1; | ||
80 | break; | ||
81 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | +static void pmu_update_irq(CPUARMState *env) | ||
86 | +{ | ||
87 | + ARMCPU *cpu = env_archcpu(env); | ||
88 | + qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && | ||
89 | + (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); | ||
90 | +} | ||
91 | + | ||
92 | +static bool pmu_event_supported(uint16_t number) | ||
93 | +{ | ||
94 | + return false; | ||
95 | +} | ||
96 | + | ||
97 | +/* Returns true if the counter (pass 31 for PMCCNTR) should count events using | ||
98 | + * the current EL, security state, and register configuration. | ||
99 | + */ | ||
100 | +static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) | ||
101 | +{ | ||
102 | + uint64_t filter; | ||
103 | + bool enabled, filtered = true; | ||
104 | + int el = arm_current_el(env); | ||
105 | + | ||
106 | + enabled = (env->cp15.c9_pmcr & PMCRE) && | ||
107 | + (env->cp15.c9_pmcnten & (1 << counter)); | ||
108 | + | ||
109 | + if (counter == 31) { | ||
110 | + filter = env->cp15.pmccfiltr_el0; | ||
111 | + } else { | ||
112 | + filter = env->cp15.c14_pmevtyper[counter]; | ||
113 | + } | ||
114 | + | ||
115 | + if (el == 0) { | ||
116 | + filtered = filter & PMXEVTYPER_U; | ||
117 | + } else if (el == 1) { | ||
118 | + filtered = filter & PMXEVTYPER_P; | ||
119 | + } | ||
120 | + | ||
121 | + if (counter != 31) { | ||
122 | + /* | ||
123 | + * If not checking PMCCNTR, ensure the counter is setup to an event we | ||
124 | + * support | ||
125 | + */ | ||
126 | + uint16_t event = filter & PMXEVTYPER_EVTCOUNT; | ||
127 | + if (!pmu_event_supported(event)) { | ||
128 | + return false; | ||
129 | + } | ||
130 | + } | ||
131 | + | ||
132 | + return enabled && !filtered; | ||
133 | +} | ||
134 | + | ||
135 | +static void pmswinc_write(CPUARMState *env, uint64_t value) | ||
136 | +{ | ||
137 | + unsigned int i; | ||
138 | + for (i = 0; i < pmu_num_counters(env); i++) { | ||
139 | + /* Increment a counter's count iff: */ | ||
140 | + if ((value & (1 << i)) && /* counter's bit is set */ | ||
141 | + /* counter is enabled and not filtered */ | ||
142 | + pmu_counter_enabled(env, i) && | ||
143 | + /* counter is SW_INCR */ | ||
144 | + (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { | ||
145 | + /* | ||
146 | + * Detect if this write causes an overflow since we can't predict | ||
147 | + * PMSWINC overflows like we can for other events | ||
148 | + */ | ||
149 | + uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; | ||
150 | + | ||
151 | + if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { | ||
152 | + env->cp15.c9_pmovsr |= (1 << i); | ||
153 | + pmu_update_irq(env); | ||
154 | + } | ||
155 | + | ||
156 | + env->cp15.c14_pmevcntr[i] = new_pmswinc; | ||
157 | + } | ||
158 | + } | ||
159 | +} | ||
160 | + | ||
161 | static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | ||
162 | { | ||
163 | ARMCPU *arm_cpu = ARM_CPU(cpu); | ||
164 | @@ -XXX,XX +XXX,XX @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) | ||
165 | val); | ||
166 | |||
167 | switch (reg) { | ||
168 | + case SYSREG_PMCCNTR_EL0: | ||
169 | + pmu_op_start(env); | ||
170 | + env->cp15.c15_ccnt = val; | ||
171 | + pmu_op_finish(env); | ||
172 | + break; | ||
173 | + case SYSREG_PMCR_EL0: | ||
174 | + pmu_op_start(env); | ||
175 | + | ||
176 | + if (val & PMCRC) { | ||
177 | + /* The counter has been reset */ | ||
178 | + env->cp15.c15_ccnt = 0; | ||
179 | + } | ||
180 | + | ||
181 | + if (val & PMCRP) { | ||
182 | + unsigned int i; | ||
183 | + for (i = 0; i < pmu_num_counters(env); i++) { | ||
184 | + env->cp15.c14_pmevcntr[i] = 0; | ||
185 | + } | ||
186 | + } | ||
187 | + | ||
188 | + env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; | ||
189 | + env->cp15.c9_pmcr |= (val & PMCR_WRITEABLE_MASK); | ||
190 | + | ||
191 | + pmu_op_finish(env); | ||
192 | + break; | ||
193 | + case SYSREG_PMUSERENR_EL0: | ||
194 | + env->cp15.c9_pmuserenr = val & 0xf; | ||
195 | + break; | ||
196 | + case SYSREG_PMCNTENSET_EL0: | ||
197 | + env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env)); | ||
198 | + break; | ||
199 | + case SYSREG_PMCNTENCLR_EL0: | ||
200 | + env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env)); | ||
201 | + break; | ||
202 | + case SYSREG_PMINTENCLR_EL1: | ||
203 | + pmu_op_start(env); | ||
204 | + env->cp15.c9_pminten |= val; | ||
205 | + pmu_op_finish(env); | ||
206 | + break; | ||
207 | + case SYSREG_PMOVSCLR_EL0: | ||
208 | + pmu_op_start(env); | ||
209 | + env->cp15.c9_pmovsr &= ~val; | ||
210 | + pmu_op_finish(env); | ||
211 | + break; | ||
212 | + case SYSREG_PMSWINC_EL0: | ||
213 | + pmu_op_start(env); | ||
214 | + pmswinc_write(env, val); | ||
215 | + pmu_op_finish(env); | ||
216 | + break; | ||
217 | + case SYSREG_PMSELR_EL0: | ||
218 | + env->cp15.c9_pmselr = val & 0x1f; | ||
219 | + break; | ||
220 | + case SYSREG_PMCCFILTR_EL0: | ||
221 | + pmu_op_start(env); | ||
222 | + env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0; | ||
223 | + pmu_op_finish(env); | ||
224 | + break; | ||
225 | case SYSREG_OSLAR_EL1: | ||
226 | env->cp15.oslsr_el1 = val & 1; | ||
227 | break; | ||
228 | -- | ||
229 | 2.20.1 | ||
230 | |||
231 | diff view generated by jsdifflib |
1 | ARMv7VE introduced the ERET instruction, which is necessary to | 1 | Currently gen_jmp_tb() assumes that if it is called then the jump it |
---|---|---|---|
2 | return from an exception taken to Hyp mode. Implement this. | 2 | is handling is the only reason that we might be trying to end the TB, |
3 | In A32 encoding it is a completely new encoding; in T32 it | 3 | so it will use goto_tb if it can. This is usually the case: mostly |
4 | is an adjustment of the behaviour of the existing | 4 | "we did something that means we must end the TB" happens on a |
5 | "SUBS PC, LR, #<imm8>" instruction. | 5 | non-branch instruction. However, there are cases where we decide |
6 | early in handling an instruction that we need to end the TB and | ||
7 | return to the main loop, and then the insn is a complex one that | ||
8 | involves gen_jmp_tb(). For instance, for M-profile FP instructions, | ||
9 | in gen_preserve_fp_state() which is called from vfp_access_check() we | ||
10 | want to force an exit to the main loop if lazy state preservation is | ||
11 | active and we are in icount mode. | ||
12 | |||
13 | Make gen_jmp_tb() look at the current value of is_jmp, and only use | ||
14 | goto_tb if the previous is_jmp was DISAS_NEXT or DISAS_TOO_MANY. | ||
6 | 15 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 16 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 17 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | 18 | Message-id: 20210913095440.13462-2-peter.maydell@linaro.org |
10 | Message-id: 20180814124254.5229-10-peter.maydell@linaro.org | ||
11 | --- | 19 | --- |
12 | target/arm/translate.c | 31 +++++++++++++++++++++++++++++-- | 20 | target/arm/translate.c | 34 +++++++++++++++++++++++++++++++++- |
13 | 1 file changed, 29 insertions(+), 2 deletions(-) | 21 | 1 file changed, 33 insertions(+), 1 deletion(-) |
14 | 22 | ||
15 | diff --git a/target/arm/translate.c b/target/arm/translate.c | 23 | diff --git a/target/arm/translate.c b/target/arm/translate.c |
16 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/translate.c | 25 | --- a/target/arm/translate.c |
18 | +++ b/target/arm/translate.c | 26 | +++ b/target/arm/translate.c |
19 | @@ -XXX,XX +XXX,XX @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) | 27 | @@ -XXX,XX +XXX,XX @@ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno) |
20 | tcg_temp_free_i32(tmp2); | 28 | /* An indirect jump so that we still trigger the debug exception. */ |
21 | store_reg(s, rd, tmp); | 29 | gen_set_pc_im(s, dest); |
22 | break; | 30 | s->base.is_jmp = DISAS_JUMP; |
23 | + case 0x6: /* ERET */ | 31 | - } else { |
24 | + if (op1 != 3) { | 32 | + return; |
25 | + goto illegal_op; | 33 | + } |
26 | + } | 34 | + switch (s->base.is_jmp) { |
27 | + if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) { | 35 | + case DISAS_NEXT: |
28 | + goto illegal_op; | 36 | + case DISAS_TOO_MANY: |
29 | + } | 37 | + case DISAS_NORETURN: |
30 | + if ((insn & 0x000fff0f) != 0x0000000e) { | 38 | + /* |
31 | + /* UNPREDICTABLE; we choose to UNDEF */ | 39 | + * The normal case: just go to the destination TB. |
32 | + goto illegal_op; | 40 | + * NB: NORETURN happens if we generate code like |
33 | + } | 41 | + * gen_brcondi(l); |
34 | + | 42 | + * gen_jmp(); |
35 | + if (s->current_el == 2) { | 43 | + * gen_set_label(l); |
36 | + tmp = load_cpu_field(elr_el[2]); | 44 | + * gen_jmp(); |
37 | + } else { | 45 | + * on the second call to gen_jmp(). |
38 | + tmp = load_reg(s, 14); | 46 | + */ |
39 | + } | 47 | gen_goto_tb(s, tbno, dest); |
40 | + gen_exception_return(s, tmp); | 48 | + break; |
41 | + break; | 49 | + case DISAS_UPDATE_NOCHAIN: |
42 | case 7: | 50 | + case DISAS_UPDATE_EXIT: |
43 | { | 51 | + /* |
44 | int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4); | 52 | + * We already decided we're leaving the TB for some other reason. |
45 | @@ -XXX,XX +XXX,XX @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) | 53 | + * Avoid using goto_tb so we really do exit back to the main loop |
46 | if (rn != 14 || rd != 15) { | 54 | + * and don't chain to another TB. |
47 | goto illegal_op; | 55 | + */ |
48 | } | 56 | + gen_set_pc_im(s, dest); |
49 | - tmp = load_reg(s, rn); | 57 | + gen_goto_ptr(); |
50 | - tcg_gen_subi_i32(tmp, tmp, insn & 0xff); | 58 | + s->base.is_jmp = DISAS_NORETURN; |
51 | + if (s->current_el == 2) { | 59 | + break; |
52 | + /* ERET from Hyp uses ELR_Hyp, not LR */ | 60 | + default: |
53 | + if (insn & 0xff) { | 61 | + /* |
54 | + goto illegal_op; | 62 | + * We shouldn't be emitting code for a jump and also have |
55 | + } | 63 | + * is_jmp set to one of the special cases like DISAS_SWI. |
56 | + tmp = load_cpu_field(elr_el[2]); | 64 | + */ |
57 | + } else { | 65 | + g_assert_not_reached(); |
58 | + tmp = load_reg(s, rn); | 66 | } |
59 | + tcg_gen_subi_i32(tmp, tmp, insn & 0xff); | 67 | } |
60 | + } | 68 | |
61 | gen_exception_return(s, tmp); | ||
62 | break; | ||
63 | case 6: /* MRS */ | ||
64 | -- | 69 | -- |
65 | 2.18.0 | 70 | 2.20.1 |
66 | 71 | ||
67 | 72 | diff view generated by jsdifflib |
1 | The AArch32 HSR is the equivalent of AArch64 ESR_EL2; | 1 | Architecturally, for an M-profile CPU with the LOB feature the |
---|---|---|---|
2 | we can implement it by marking our existing ESR_EL2 regdef | 2 | LTPSIZE field in FPDSCR is always constant 4. QEMU's implementation |
3 | as STATE_BOTH. It also needs to be "RES0 from EL3 if | 3 | enforces this everywhere, except that we don't check that it is true |
4 | EL2 not implemented", so add the missing stanza to | 4 | in incoming migration data. |
5 | el3_no_el2_cp_reginfo. | 5 | |
6 | We're going to add come in gen_update_fp_context() which relies on | ||
7 | the "always 4" property. Since this is TCG-only, we don't actually | ||
8 | need to be robust to bogus incoming migration data, and the effect of | ||
9 | it being wrong would be wrong code generation rather than a QEMU | ||
10 | crash; but if it did ever happen somehow it would be very difficult | ||
11 | to track down the cause. Add a check so that we fail the inbound | ||
12 | migration if the FPDSCR.LTPSIZE value is incorrect. | ||
6 | 13 | ||
7 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 14 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
8 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 15 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
9 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | 16 | Message-id: 20210913095440.13462-3-peter.maydell@linaro.org |
10 | Message-id: 20180814124254.5229-8-peter.maydell@linaro.org | ||
11 | --- | 17 | --- |
12 | target/arm/helper.c | 6 +++++- | 18 | target/arm/machine.c | 13 +++++++++++++ |
13 | 1 file changed, 5 insertions(+), 1 deletion(-) | 19 | 1 file changed, 13 insertions(+) |
14 | 20 | ||
15 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 21 | diff --git a/target/arm/machine.c b/target/arm/machine.c |
16 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/target/arm/helper.c | 23 | --- a/target/arm/machine.c |
18 | +++ b/target/arm/helper.c | 24 | +++ b/target/arm/machine.c |
19 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { | 25 | @@ -XXX,XX +XXX,XX @@ static int cpu_post_load(void *opaque, int version_id) |
20 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, | 26 | hw_breakpoint_update_all(cpu); |
21 | .access = PL2_RW, | 27 | hw_watchpoint_update_all(cpu); |
22 | .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, | 28 | |
23 | + { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, | 29 | + /* |
24 | + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, | 30 | + * TCG gen_update_fp_context() relies on the invariant that |
25 | + .access = PL2_RW, | 31 | + * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension; |
26 | + .type = ARM_CP_CONST, .resetvalue = 0 }, | 32 | + * forbid bogus incoming data with some other value. |
27 | { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, | 33 | + */ |
28 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, | 34 | + if (arm_feature(env, ARM_FEATURE_M) && cpu_isar_feature(aa32_lob, cpu)) { |
29 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | 35 | + if (extract32(env->v7m.fpdscr[M_REG_NS], |
30 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 36 | + FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4 || |
31 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, | 37 | + extract32(env->v7m.fpdscr[M_REG_S], |
32 | .access = PL2_RW, | 38 | + FPCR_LTPSIZE_SHIFT, FPCR_LTPSIZE_LENGTH) != 4) { |
33 | .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, | 39 | + return -1; |
34 | - { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64, | 40 | + } |
35 | + { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, | 41 | + } |
36 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, | 42 | if (!kvm_enabled()) { |
37 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, | 43 | pmu_op_finish(&cpu->env); |
38 | { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, | 44 | } |
39 | -- | 45 | -- |
40 | 2.18.0 | 46 | 2.20.1 |
41 | 47 | ||
42 | 48 | diff view generated by jsdifflib |
1 | ARMCPRegInfo structs will default to .cp = 15 if they | 1 | Our current codegen for MVE always calls out to helper functions, |
---|---|---|---|
2 | are ARM_CP_STATE_BOTH, but not if they are ARM_CP_STATE_AA32 | 2 | because some byte lanes might be predicated. The common case is that |
3 | (because a coprocessor number of 0 is valid for AArch32). | 3 | in fact there is no predication active and all lanes should be |
4 | We forgot to explicitly set .cp = 15 for the HMAIR1 and | 4 | updated together, so we can produce better code by detecting that and |
5 | HAMAIR1 regdefs, which meant they would UNDEF when the guest | 5 | using the TCG generic vector infrastructure. |
6 | tried to access them under cp15. | 6 | |
7 | Add a TB flag that is set when we can guarantee that there is no | ||
8 | active MVE predication, and a bool in the DisasContext. Subsequent | ||
9 | patches will use this flag to generate improved code for some | ||
10 | instructions. | ||
11 | |||
12 | In most cases when the predication state changes we simply end the TB | ||
13 | after that instruction. For the code called from vfp_access_check() | ||
14 | that handles lazy state preservation and creating a new FP context, | ||
15 | we can usually avoid having to try to end the TB because luckily the | ||
16 | new value of the flag following the register changes in those | ||
17 | sequences doesn't depend on any runtime decisions. We do have to end | ||
18 | the TB if the guest has enabled lazy FP state preservation but not | ||
19 | automatic state preservation, but this is an odd corner case that is | ||
20 | not going to be common in real-world code. | ||
7 | 21 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 22 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 23 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
10 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | 24 | Message-id: 20210913095440.13462-4-peter.maydell@linaro.org |
11 | Message-id: 20180814124254.5229-3-peter.maydell@linaro.org | ||
12 | --- | 25 | --- |
13 | target/arm/helper.c | 8 ++++---- | 26 | target/arm/cpu.h | 4 +++- |
14 | 1 file changed, 4 insertions(+), 4 deletions(-) | 27 | target/arm/translate.h | 2 ++ |
15 | 28 | target/arm/helper.c | 33 +++++++++++++++++++++++++++++++++ | |
29 | target/arm/translate-m-nocp.c | 8 +++++++- | ||
30 | target/arm/translate-mve.c | 13 ++++++++++++- | ||
31 | target/arm/translate-vfp.c | 33 +++++++++++++++++++++++++++------ | ||
32 | target/arm/translate.c | 8 ++++++++ | ||
33 | 7 files changed, 92 insertions(+), 9 deletions(-) | ||
34 | |||
35 | diff --git a/target/arm/cpu.h b/target/arm/cpu.h | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/target/arm/cpu.h | ||
38 | +++ b/target/arm/cpu.h | ||
39 | @@ -XXX,XX +XXX,XX @@ typedef ARMCPU ArchCPU; | ||
40 | * | TBFLAG_AM32 | +-----+----------+ | ||
41 | * | | |TBFLAG_M32| | ||
42 | * +-------------+----------------+----------+ | ||
43 | - * 31 23 5 4 0 | ||
44 | + * 31 23 6 5 0 | ||
45 | * | ||
46 | * Unless otherwise noted, these bits are cached in env->hflags. | ||
47 | */ | ||
48 | @@ -XXX,XX +XXX,XX @@ FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */ | ||
49 | FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */ | ||
50 | /* Set if FPCCR.S does not match current security state */ | ||
51 | FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */ | ||
52 | +/* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */ | ||
53 | +FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */ | ||
54 | |||
55 | /* | ||
56 | * Bit usage when in AArch64 state | ||
57 | diff --git a/target/arm/translate.h b/target/arm/translate.h | ||
58 | index XXXXXXX..XXXXXXX 100644 | ||
59 | --- a/target/arm/translate.h | ||
60 | +++ b/target/arm/translate.h | ||
61 | @@ -XXX,XX +XXX,XX @@ typedef struct DisasContext { | ||
62 | bool align_mem; | ||
63 | /* True if PSTATE.IL is set */ | ||
64 | bool pstate_il; | ||
65 | + /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ | ||
66 | + bool mve_no_pred; | ||
67 | /* | ||
68 | * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. | ||
69 | * < 0, set by the current instruction. | ||
16 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 70 | diff --git a/target/arm/helper.c b/target/arm/helper.c |
17 | index XXXXXXX..XXXXXXX 100644 | 71 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/target/arm/helper.c | 72 | --- a/target/arm/helper.c |
19 | +++ b/target/arm/helper.c | 73 | +++ b/target/arm/helper.c |
20 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { | 74 | @@ -XXX,XX +XXX,XX @@ static inline void assert_hflags_rebuild_correctly(CPUARMState *env) |
21 | .access = PL2_RW, .type = ARM_CP_CONST, | 75 | #endif |
22 | .resetvalue = 0 }, | 76 | } |
23 | { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, | 77 | |
24 | - .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, | 78 | +static bool mve_no_pred(CPUARMState *env) |
25 | + .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, | 79 | +{ |
26 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | 80 | + /* |
27 | { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, | 81 | + * Return true if there is definitely no predication of MVE |
28 | .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, | 82 | + * instructions by VPR or LTPSIZE. (Returning false even if there |
29 | .access = PL2_RW, .type = ARM_CP_CONST, | 83 | + * isn't any predication is OK; generated code will just be |
30 | .resetvalue = 0 }, | 84 | + * a little worse.) |
31 | { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, | 85 | + * If the CPU does not implement MVE then this TB flag is always 0. |
32 | - .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | 86 | + * |
33 | + .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | 87 | + * NOTE: if you change this logic, the "recalculate s->mve_no_pred" |
34 | .access = PL2_RW, .type = ARM_CP_CONST, | 88 | + * logic in gen_update_fp_context() needs to be updated to match. |
35 | .resetvalue = 0 }, | 89 | + * |
36 | { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, | 90 | + * We do not include the effect of the ECI bits here -- they are |
37 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 91 | + * tracked in other TB flags. This simplifies the logic for |
38 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), | 92 | + * "when did we emit code that changes the MVE_NO_PRED TB flag |
39 | .resetvalue = 0 }, | 93 | + * and thus need to end the TB?". |
40 | { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, | 94 | + */ |
41 | - .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, | 95 | + if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { |
42 | + .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, | 96 | + return false; |
43 | .access = PL2_RW, .type = ARM_CP_ALIAS, | 97 | + } |
44 | .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, | 98 | + if (env->v7m.vpr) { |
45 | { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, | 99 | + return false; |
46 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 100 | + } |
47 | .resetvalue = 0 }, | 101 | + if (env->v7m.ltpsize < 4) { |
48 | /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ | 102 | + return false; |
49 | { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, | 103 | + } |
50 | - .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | 104 | + return true; |
51 | + .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | 105 | +} |
52 | .access = PL2_RW, .type = ARM_CP_CONST, | 106 | + |
53 | .resetvalue = 0 }, | 107 | void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, |
54 | { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, | 108 | target_ulong *cs_base, uint32_t *pflags) |
109 | { | ||
110 | @@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | ||
111 | if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { | ||
112 | DP_TBFLAG_M32(flags, LSPACT, 1); | ||
113 | } | ||
114 | + | ||
115 | + if (mve_no_pred(env)) { | ||
116 | + DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); | ||
117 | + } | ||
118 | } else { | ||
119 | /* | ||
120 | * Note that XSCALE_CPAR shares bits with VECSTRIDE. | ||
121 | diff --git a/target/arm/translate-m-nocp.c b/target/arm/translate-m-nocp.c | ||
122 | index XXXXXXX..XXXXXXX 100644 | ||
123 | --- a/target/arm/translate-m-nocp.c | ||
124 | +++ b/target/arm/translate-m-nocp.c | ||
125 | @@ -XXX,XX +XXX,XX @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) | ||
126 | |||
127 | clear_eci_state(s); | ||
128 | |||
129 | - /* End the TB, because we have updated FP control bits */ | ||
130 | + /* | ||
131 | + * End the TB, because we have updated FP control bits, | ||
132 | + * and possibly VPR or LTPSIZE. | ||
133 | + */ | ||
134 | s->base.is_jmp = DISAS_UPDATE_EXIT; | ||
135 | return true; | ||
136 | } | ||
137 | @@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, | ||
138 | store_cpu_field(control, v7m.control[M_REG_S]); | ||
139 | tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); | ||
140 | gen_helper_vfp_set_fpscr(cpu_env, tmp); | ||
141 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
142 | tcg_temp_free_i32(tmp); | ||
143 | tcg_temp_free_i32(sfpa); | ||
144 | break; | ||
145 | @@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, | ||
146 | } | ||
147 | tmp = loadfn(s, opaque, true); | ||
148 | store_cpu_field(tmp, v7m.vpr); | ||
149 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
150 | break; | ||
151 | case ARM_VFP_P0: | ||
152 | { | ||
153 | @@ -XXX,XX +XXX,XX @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, | ||
154 | tcg_gen_deposit_i32(vpr, vpr, tmp, | ||
155 | R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); | ||
156 | store_cpu_field(vpr, v7m.vpr); | ||
157 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
158 | tcg_temp_free_i32(tmp); | ||
159 | break; | ||
160 | } | ||
161 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c | ||
162 | index XXXXXXX..XXXXXXX 100644 | ||
163 | --- a/target/arm/translate-mve.c | ||
164 | +++ b/target/arm/translate-mve.c | ||
165 | @@ -XXX,XX +XXX,XX @@ DO_LOGIC(VORR, gen_helper_mve_vorr) | ||
166 | DO_LOGIC(VORN, gen_helper_mve_vorn) | ||
167 | DO_LOGIC(VEOR, gen_helper_mve_veor) | ||
168 | |||
169 | -DO_LOGIC(VPSEL, gen_helper_mve_vpsel) | ||
170 | +static bool trans_VPSEL(DisasContext *s, arg_2op *a) | ||
171 | +{ | ||
172 | + /* This insn updates predication bits */ | ||
173 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
174 | + return do_2op(s, a, gen_helper_mve_vpsel); | ||
175 | +} | ||
176 | |||
177 | #define DO_2OP(INSN, FN) \ | ||
178 | static bool trans_##INSN(DisasContext *s, arg_2op *a) \ | ||
179 | @@ -XXX,XX +XXX,XX @@ static bool trans_VPNOT(DisasContext *s, arg_VPNOT *a) | ||
180 | } | ||
181 | |||
182 | gen_helper_mve_vpnot(cpu_env); | ||
183 | + /* This insn updates predication bits */ | ||
184 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
185 | mve_update_eci(s); | ||
186 | return true; | ||
187 | } | ||
188 | @@ -XXX,XX +XXX,XX @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn) | ||
189 | /* VPT */ | ||
190 | gen_vpst(s, a->mask); | ||
191 | } | ||
192 | + /* This insn updates predication bits */ | ||
193 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
194 | mve_update_eci(s); | ||
195 | return true; | ||
196 | } | ||
197 | @@ -XXX,XX +XXX,XX @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a, | ||
198 | /* VPT */ | ||
199 | gen_vpst(s, a->mask); | ||
200 | } | ||
201 | + /* This insn updates predication bits */ | ||
202 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
203 | mve_update_eci(s); | ||
204 | return true; | ||
205 | } | ||
206 | diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c | ||
207 | index XXXXXXX..XXXXXXX 100644 | ||
208 | --- a/target/arm/translate-vfp.c | ||
209 | +++ b/target/arm/translate-vfp.c | ||
210 | @@ -XXX,XX +XXX,XX @@ static inline long vfp_f16_offset(unsigned reg, bool top) | ||
211 | * Generate code for M-profile lazy FP state preservation if needed; | ||
212 | * this corresponds to the pseudocode PreserveFPState() function. | ||
213 | */ | ||
214 | -static void gen_preserve_fp_state(DisasContext *s) | ||
215 | +static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update) | ||
216 | { | ||
217 | if (s->v7m_lspact) { | ||
218 | /* | ||
219 | @@ -XXX,XX +XXX,XX @@ static void gen_preserve_fp_state(DisasContext *s) | ||
220 | * any further FP insns in this TB. | ||
221 | */ | ||
222 | s->v7m_lspact = false; | ||
223 | + /* | ||
224 | + * The helper might have zeroed VPR, so we do not know the | ||
225 | + * correct value for the MVE_NO_PRED TB flag any more. | ||
226 | + * If we're about to create a new fp context then that | ||
227 | + * will precisely determine the MVE_NO_PRED value (see | ||
228 | + * gen_update_fp_context()). Otherwise, we must: | ||
229 | + * - set s->mve_no_pred to false, so this instruction | ||
230 | + * is generated to use helper functions | ||
231 | + * - end the TB now, without chaining to the next TB | ||
232 | + */ | ||
233 | + if (skip_context_update || !s->v7m_new_fp_ctxt_needed) { | ||
234 | + s->mve_no_pred = false; | ||
235 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
236 | + } | ||
237 | } | ||
238 | } | ||
239 | |||
240 | @@ -XXX,XX +XXX,XX @@ static void gen_update_fp_context(DisasContext *s) | ||
241 | TCGv_i32 z32 = tcg_const_i32(0); | ||
242 | store_cpu_field(z32, v7m.vpr); | ||
243 | } | ||
244 | - | ||
245 | /* | ||
246 | - * We don't need to arrange to end the TB, because the only | ||
247 | - * parts of FPSCR which we cache in the TB flags are the VECLEN | ||
248 | - * and VECSTRIDE, and those don't exist for M-profile. | ||
249 | + * We just updated the FPSCR and VPR. Some of this state is cached | ||
250 | + * in the MVE_NO_PRED TB flag. We want to avoid having to end the | ||
251 | + * TB here, which means we need the new value of the MVE_NO_PRED | ||
252 | + * flag to be exactly known here and the same for all executions. | ||
253 | + * Luckily FPDSCR.LTPSIZE is always constant 4 and the VPR is | ||
254 | + * always set to 0, so the new MVE_NO_PRED flag is always 1 | ||
255 | + * if and only if we have MVE. | ||
256 | + * | ||
257 | + * (The other FPSCR state cached in TB flags is VECLEN and VECSTRIDE, | ||
258 | + * but those do not exist for M-profile, so are not relevant here.) | ||
259 | */ | ||
260 | + s->mve_no_pred = dc_isar_feature(aa32_mve, s); | ||
261 | |||
262 | if (s->v8m_secure) { | ||
263 | bits |= R_V7M_CONTROL_SFPA_MASK; | ||
264 | @@ -XXX,XX +XXX,XX @@ bool vfp_access_check_m(DisasContext *s, bool skip_context_update) | ||
265 | /* Handle M-profile lazy FP state mechanics */ | ||
266 | |||
267 | /* Trigger lazy-state preservation if necessary */ | ||
268 | - gen_preserve_fp_state(s); | ||
269 | + gen_preserve_fp_state(s, skip_context_update); | ||
270 | |||
271 | if (!skip_context_update) { | ||
272 | /* Update ownership of FP context and create new FP context if needed */ | ||
273 | diff --git a/target/arm/translate.c b/target/arm/translate.c | ||
274 | index XXXXXXX..XXXXXXX 100644 | ||
275 | --- a/target/arm/translate.c | ||
276 | +++ b/target/arm/translate.c | ||
277 | @@ -XXX,XX +XXX,XX @@ static bool trans_DLS(DisasContext *s, arg_DLS *a) | ||
278 | /* DLSTP: set FPSCR.LTPSIZE */ | ||
279 | tmp = tcg_const_i32(a->size); | ||
280 | store_cpu_field(tmp, v7m.ltpsize); | ||
281 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
282 | } | ||
283 | return true; | ||
284 | } | ||
285 | @@ -XXX,XX +XXX,XX @@ static bool trans_WLS(DisasContext *s, arg_WLS *a) | ||
286 | assert(ok); | ||
287 | tmp = tcg_const_i32(a->size); | ||
288 | store_cpu_field(tmp, v7m.ltpsize); | ||
289 | + /* | ||
290 | + * LTPSIZE updated, but MVE_NO_PRED will always be the same thing (0) | ||
291 | + * when we take this upcoming exit from this TB, so gen_jmp_tb() is OK. | ||
292 | + */ | ||
293 | } | ||
294 | gen_jmp_tb(s, s->base.pc_next, 1); | ||
295 | |||
296 | @@ -XXX,XX +XXX,XX @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a) | ||
297 | gen_helper_mve_vctp(cpu_env, masklen); | ||
298 | tcg_temp_free_i32(masklen); | ||
299 | tcg_temp_free_i32(rn_shifted); | ||
300 | + /* This insn updates predication bits */ | ||
301 | + s->base.is_jmp = DISAS_UPDATE_NOCHAIN; | ||
302 | mve_update_eci(s); | ||
303 | return true; | ||
304 | } | ||
305 | @@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) | ||
306 | dc->v7m_new_fp_ctxt_needed = | ||
307 | EX_TBFLAG_M32(tb_flags, NEW_FP_CTXT_NEEDED); | ||
308 | dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT); | ||
309 | + dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED); | ||
310 | } else { | ||
311 | dc->debug_target_el = EX_TBFLAG_ANY(tb_flags, DEBUG_TARGET_EL); | ||
312 | dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B); | ||
55 | -- | 313 | -- |
56 | 2.18.0 | 314 | 2.20.1 |
57 | 315 | ||
58 | 316 | diff view generated by jsdifflib |
1 | Move the m48t59 device away from using old_mmio MemoryRegionOps | 1 | When not predicating, implement the MVE bitwise logical insns |
---|---|---|---|
2 | accessors. | 2 | directly using TCG vector operations. |
3 | 3 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> | 6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20180802180602.22047-1-peter.maydell@linaro.org | 7 | Message-id: 20210913095440.13462-5-peter.maydell@linaro.org |
8 | --- | 8 | --- |
9 | hw/timer/m48t59.c | 59 +++++++++-------------------------------------- | 9 | target/arm/translate-mve.c | 51 +++++++++++++++++++++++++++----------- |
10 | 1 file changed, 11 insertions(+), 48 deletions(-) | 10 | 1 file changed, 36 insertions(+), 15 deletions(-) |
11 | 11 | ||
12 | diff --git a/hw/timer/m48t59.c b/hw/timer/m48t59.c | 12 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
13 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/hw/timer/m48t59.c | 14 | --- a/target/arm/translate-mve.c |
15 | +++ b/hw/timer/m48t59.c | 15 | +++ b/target/arm/translate-mve.c |
16 | @@ -XXX,XX +XXX,XX @@ static uint64_t NVRAM_readb(void *opaque, hwaddr addr, unsigned size) | 16 | @@ -XXX,XX +XXX,XX @@ static TCGv_ptr mve_qreg_ptr(unsigned reg) |
17 | return retval; | 17 | return ret; |
18 | } | 18 | } |
19 | 19 | ||
20 | -static void nvram_writeb (void *opaque, hwaddr addr, uint32_t value) | 20 | +static bool mve_no_predication(DisasContext *s) |
21 | -{ | 21 | +{ |
22 | - M48t59State *NVRAM = opaque; | 22 | + /* |
23 | - | 23 | + * Return true if we are executing the entire MVE instruction |
24 | - m48t59_write(NVRAM, addr, value & 0xff); | 24 | + * with no predication or partial-execution, and so we can safely |
25 | -} | 25 | + * use an inline TCG vector implementation. |
26 | - | 26 | + */ |
27 | -static void nvram_writew (void *opaque, hwaddr addr, uint32_t value) | 27 | + return s->eci == 0 && s->mve_no_pred; |
28 | -{ | 28 | +} |
29 | - M48t59State *NVRAM = opaque; | 29 | + |
30 | - | 30 | static bool mve_check_qreg_bank(DisasContext *s, int qmask) |
31 | - m48t59_write(NVRAM, addr, (value >> 8) & 0xff); | ||
32 | - m48t59_write(NVRAM, addr + 1, value & 0xff); | ||
33 | -} | ||
34 | - | ||
35 | -static void nvram_writel (void *opaque, hwaddr addr, uint32_t value) | ||
36 | -{ | ||
37 | - M48t59State *NVRAM = opaque; | ||
38 | - | ||
39 | - m48t59_write(NVRAM, addr, (value >> 24) & 0xff); | ||
40 | - m48t59_write(NVRAM, addr + 1, (value >> 16) & 0xff); | ||
41 | - m48t59_write(NVRAM, addr + 2, (value >> 8) & 0xff); | ||
42 | - m48t59_write(NVRAM, addr + 3, value & 0xff); | ||
43 | -} | ||
44 | - | ||
45 | -static uint32_t nvram_readb (void *opaque, hwaddr addr) | ||
46 | +static uint64_t nvram_read(void *opaque, hwaddr addr, unsigned size) | ||
47 | { | 31 | { |
48 | M48t59State *NVRAM = opaque; | 32 | /* |
49 | 33 | @@ -XXX,XX +XXX,XX @@ static bool trans_VNEG_fp(DisasContext *s, arg_1op *a) | |
50 | return m48t59_read(NVRAM, addr); | 34 | return do_1op(s, a, fns[a->size]); |
51 | } | 35 | } |
52 | 36 | ||
53 | -static uint32_t nvram_readw (void *opaque, hwaddr addr) | 37 | -static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn) |
54 | +static void nvram_write(void *opaque, hwaddr addr, uint64_t value, | 38 | +static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn, |
55 | + unsigned size) | 39 | + GVecGen3Fn *vecfn) |
56 | { | 40 | { |
57 | M48t59State *NVRAM = opaque; | 41 | TCGv_ptr qd, qn, qm; |
58 | - uint32_t retval; | 42 | |
59 | 43 | @@ -XXX,XX +XXX,XX @@ static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn) | |
60 | - retval = m48t59_read(NVRAM, addr) << 8; | 44 | return true; |
61 | - retval |= m48t59_read(NVRAM, addr + 1); | 45 | } |
62 | - return retval; | 46 | |
63 | -} | 47 | - qd = mve_qreg_ptr(a->qd); |
64 | - | 48 | - qn = mve_qreg_ptr(a->qn); |
65 | -static uint32_t nvram_readl (void *opaque, hwaddr addr) | 49 | - qm = mve_qreg_ptr(a->qm); |
66 | -{ | 50 | - fn(cpu_env, qd, qn, qm); |
67 | - M48t59State *NVRAM = opaque; | 51 | - tcg_temp_free_ptr(qd); |
68 | - uint32_t retval; | 52 | - tcg_temp_free_ptr(qn); |
69 | - | 53 | - tcg_temp_free_ptr(qm); |
70 | - retval = m48t59_read(NVRAM, addr) << 24; | 54 | + if (vecfn && mve_no_predication(s)) { |
71 | - retval |= m48t59_read(NVRAM, addr + 1) << 16; | 55 | + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qn), |
72 | - retval |= m48t59_read(NVRAM, addr + 2) << 8; | 56 | + mve_qreg_offset(a->qm), 16, 16); |
73 | - retval |= m48t59_read(NVRAM, addr + 3); | 57 | + } else { |
74 | - return retval; | 58 | + qd = mve_qreg_ptr(a->qd); |
75 | + return m48t59_write(NVRAM, addr, value); | 59 | + qn = mve_qreg_ptr(a->qn); |
60 | + qm = mve_qreg_ptr(a->qm); | ||
61 | + fn(cpu_env, qd, qn, qm); | ||
62 | + tcg_temp_free_ptr(qd); | ||
63 | + tcg_temp_free_ptr(qn); | ||
64 | + tcg_temp_free_ptr(qm); | ||
65 | + } | ||
66 | mve_update_eci(s); | ||
67 | return true; | ||
76 | } | 68 | } |
77 | 69 | ||
78 | static const MemoryRegionOps nvram_ops = { | 70 | -#define DO_LOGIC(INSN, HELPER) \ |
79 | - .old_mmio = { | 71 | +static bool do_2op(DisasContext *s, arg_2op *a, MVEGenTwoOpFn *fn) |
80 | - .read = { nvram_readb, nvram_readw, nvram_readl, }, | 72 | +{ |
81 | - .write = { nvram_writeb, nvram_writew, nvram_writel, }, | 73 | + return do_2op_vec(s, a, fn, NULL); |
82 | - }, | 74 | +} |
83 | - .endianness = DEVICE_NATIVE_ENDIAN, | 75 | + |
84 | + .read = nvram_read, | 76 | +#define DO_LOGIC(INSN, HELPER, VECFN) \ |
85 | + .write = nvram_write, | 77 | static bool trans_##INSN(DisasContext *s, arg_2op *a) \ |
86 | + .impl.min_access_size = 1, | 78 | { \ |
87 | + .impl.max_access_size = 1, | 79 | - return do_2op(s, a, HELPER); \ |
88 | + .valid.min_access_size = 1, | 80 | + return do_2op_vec(s, a, HELPER, VECFN); \ |
89 | + .valid.max_access_size = 4, | 81 | } |
90 | + .endianness = DEVICE_BIG_ENDIAN, | 82 | |
91 | }; | 83 | -DO_LOGIC(VAND, gen_helper_mve_vand) |
92 | 84 | -DO_LOGIC(VBIC, gen_helper_mve_vbic) | |
93 | static const VMStateDescription vmstate_m48t59 = { | 85 | -DO_LOGIC(VORR, gen_helper_mve_vorr) |
86 | -DO_LOGIC(VORN, gen_helper_mve_vorn) | ||
87 | -DO_LOGIC(VEOR, gen_helper_mve_veor) | ||
88 | +DO_LOGIC(VAND, gen_helper_mve_vand, tcg_gen_gvec_and) | ||
89 | +DO_LOGIC(VBIC, gen_helper_mve_vbic, tcg_gen_gvec_andc) | ||
90 | +DO_LOGIC(VORR, gen_helper_mve_vorr, tcg_gen_gvec_or) | ||
91 | +DO_LOGIC(VORN, gen_helper_mve_vorn, tcg_gen_gvec_orc) | ||
92 | +DO_LOGIC(VEOR, gen_helper_mve_veor, tcg_gen_gvec_xor) | ||
93 | |||
94 | static bool trans_VPSEL(DisasContext *s, arg_2op *a) | ||
95 | { | ||
94 | -- | 96 | -- |
95 | 2.18.0 | 97 | 2.20.1 |
96 | 98 | ||
97 | 99 | diff view generated by jsdifflib |
1 | The PL08x model currently will unconditionally call hw_error() | 1 | Optimize MVE arithmetic ops when we have a TCG |
---|---|---|---|
2 | if the DMA engine is enabled by the guest. This has been | 2 | vector operation we can use. |
3 | present since the PL080 model was edded in 2006, and is | ||
4 | presumably either unintentional debug code left enabled, | ||
5 | or a guard against untested DMA engine code being used. | ||
6 | |||
7 | Remove the hw_error(), since we now have a guest which | ||
8 | will actually try to use the DMA engine (the self-test | ||
9 | binary for the AN505 MPS2 FPGA image). | ||
10 | 3 | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 20210913095440.13462-6-peter.maydell@linaro.org | ||
13 | --- | 8 | --- |
14 | hw/dma/pl080.c | 1 - | 9 | target/arm/translate-mve.c | 20 +++++++++++--------- |
15 | 1 file changed, 1 deletion(-) | 10 | 1 file changed, 11 insertions(+), 9 deletions(-) |
16 | 11 | ||
17 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | 12 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
18 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/hw/dma/pl080.c | 14 | --- a/target/arm/translate-mve.c |
20 | +++ b/hw/dma/pl080.c | 15 | +++ b/target/arm/translate-mve.c |
21 | @@ -XXX,XX +XXX,XX @@ static void pl080_run(PL080State *s) | 16 | @@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a) |
22 | if ((s->conf & PL080_CONF_E) == 0) | 17 | return do_2op(s, a, gen_helper_mve_vpsel); |
23 | return; | 18 | } |
24 | 19 | ||
25 | -hw_error("DMA active\n"); | 20 | -#define DO_2OP(INSN, FN) \ |
26 | /* If we are already in the middle of a DMA operation then indicate that | 21 | +#define DO_2OP_VEC(INSN, FN, VECFN) \ |
27 | there may be new DMA requests and return immediately. */ | 22 | static bool trans_##INSN(DisasContext *s, arg_2op *a) \ |
28 | if (s->running) { | 23 | { \ |
24 | static MVEGenTwoOpFn * const fns[] = { \ | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool trans_VPSEL(DisasContext *s, arg_2op *a) | ||
26 | gen_helper_mve_##FN##w, \ | ||
27 | NULL, \ | ||
28 | }; \ | ||
29 | - return do_2op(s, a, fns[a->size]); \ | ||
30 | + return do_2op_vec(s, a, fns[a->size], VECFN); \ | ||
31 | } | ||
32 | |||
33 | -DO_2OP(VADD, vadd) | ||
34 | -DO_2OP(VSUB, vsub) | ||
35 | -DO_2OP(VMUL, vmul) | ||
36 | +#define DO_2OP(INSN, FN) DO_2OP_VEC(INSN, FN, NULL) | ||
37 | + | ||
38 | +DO_2OP_VEC(VADD, vadd, tcg_gen_gvec_add) | ||
39 | +DO_2OP_VEC(VSUB, vsub, tcg_gen_gvec_sub) | ||
40 | +DO_2OP_VEC(VMUL, vmul, tcg_gen_gvec_mul) | ||
41 | DO_2OP(VMULH_S, vmulhs) | ||
42 | DO_2OP(VMULH_U, vmulhu) | ||
43 | DO_2OP(VRMULH_S, vrmulhs) | ||
44 | DO_2OP(VRMULH_U, vrmulhu) | ||
45 | -DO_2OP(VMAX_S, vmaxs) | ||
46 | -DO_2OP(VMAX_U, vmaxu) | ||
47 | -DO_2OP(VMIN_S, vmins) | ||
48 | -DO_2OP(VMIN_U, vminu) | ||
49 | +DO_2OP_VEC(VMAX_S, vmaxs, tcg_gen_gvec_smax) | ||
50 | +DO_2OP_VEC(VMAX_U, vmaxu, tcg_gen_gvec_umax) | ||
51 | +DO_2OP_VEC(VMIN_S, vmins, tcg_gen_gvec_smin) | ||
52 | +DO_2OP_VEC(VMIN_U, vminu, tcg_gen_gvec_umin) | ||
53 | DO_2OP(VABD_S, vabds) | ||
54 | DO_2OP(VABD_U, vabdu) | ||
55 | DO_2OP(VHADD_S, vhadds) | ||
29 | -- | 56 | -- |
30 | 2.18.0 | 57 | 2.20.1 |
31 | 58 | ||
32 | 59 | diff view generated by jsdifflib |
1 | Currently our PL080/PL081 model uses a combination of the CPU's | 1 | Optimize the MVE VNEG and VABS insns by using TCG |
---|---|---|---|
2 | address space (via cpu_physical_memory_{read,write}()) and the | 2 | vector ops when possible. |
3 | system address space for performing DMA accesses. | ||
4 | |||
5 | For the PL081s in the MPS FPGA images, their DMA accesses | ||
6 | must go via Master Security Controllers. Switch the | ||
7 | PL080/PL081 model to take a MemoryRegion property which | ||
8 | defines its downstream for making DMA accesses. | ||
9 | |||
10 | Since the PL08x are only used in two board models, we | ||
11 | make provision of the 'downstream' link mandatory and convert | ||
12 | both users at once, rather than having it be optional with | ||
13 | a default to the system address space. | ||
14 | 3 | ||
15 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
16 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | Message-id: 20210913095440.13462-7-peter.maydell@linaro.org | ||
17 | --- | 8 | --- |
18 | include/hw/dma/pl080.h | 5 +++++ | 9 | target/arm/translate-mve.c | 32 ++++++++++++++++++++++---------- |
19 | hw/arm/realview.c | 8 +++++++- | 10 | 1 file changed, 22 insertions(+), 10 deletions(-) |
20 | hw/arm/versatilepb.c | 9 ++++++++- | ||
21 | hw/dma/pl080.c | 35 +++++++++++++++++++++++++++++------ | ||
22 | 4 files changed, 49 insertions(+), 8 deletions(-) | ||
23 | 11 | ||
24 | diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h | 12 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
25 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/hw/dma/pl080.h | 14 | --- a/target/arm/translate-mve.c |
27 | +++ b/include/hw/dma/pl080.h | 15 | +++ b/target/arm/translate-mve.c |
28 | @@ -XXX,XX +XXX,XX @@ | 16 | @@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) |
29 | * + sysbus IRQ 1: DMACINTERR error interrupt request | 17 | return true; |
30 | * + sysbus IRQ 2: DMACINTTC count interrupt request | ||
31 | * + sysbus MMIO region 0: MemoryRegion for the device's registers | ||
32 | + * + QOM property "downstream": MemoryRegion defining where DMA | ||
33 | + * bus master transactions are made | ||
34 | */ | ||
35 | |||
36 | #ifndef HW_DMA_PL080_H | ||
37 | @@ -XXX,XX +XXX,XX @@ typedef struct PL080State { | ||
38 | qemu_irq irq; | ||
39 | qemu_irq interr; | ||
40 | qemu_irq inttc; | ||
41 | + | ||
42 | + MemoryRegion *downstream; | ||
43 | + AddressSpace downstream_as; | ||
44 | } PL080State; | ||
45 | |||
46 | #endif | ||
47 | diff --git a/hw/arm/realview.c b/hw/arm/realview.c | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/hw/arm/realview.c | ||
50 | +++ b/hw/arm/realview.c | ||
51 | @@ -XXX,XX +XXX,XX @@ static void realview_init(MachineState *machine, | ||
52 | pl011_create(0x1000c000, pic[15], serial_hd(3)); | ||
53 | |||
54 | /* DMA controller is optional, apparently. */ | ||
55 | - sysbus_create_simple("pl081", 0x10030000, pic[24]); | ||
56 | + dev = qdev_create(NULL, "pl081"); | ||
57 | + object_property_set_link(OBJECT(dev), OBJECT(sysmem), "downstream", | ||
58 | + &error_fatal); | ||
59 | + qdev_init_nofail(dev); | ||
60 | + busdev = SYS_BUS_DEVICE(dev); | ||
61 | + sysbus_mmio_map(busdev, 0, 0x10030000); | ||
62 | + sysbus_connect_irq(busdev, 0, pic[24]); | ||
63 | |||
64 | sysbus_create_simple("sp804", 0x10011000, pic[4]); | ||
65 | sysbus_create_simple("sp804", 0x10012000, pic[5]); | ||
66 | diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/hw/arm/versatilepb.c | ||
69 | +++ b/hw/arm/versatilepb.c | ||
70 | @@ -XXX,XX +XXX,XX @@ static void versatile_init(MachineState *machine, int board_id) | ||
71 | pl011_create(0x101f3000, pic[14], serial_hd(2)); | ||
72 | pl011_create(0x10009000, sic[6], serial_hd(3)); | ||
73 | |||
74 | - sysbus_create_simple("pl080", 0x10130000, pic[17]); | ||
75 | + dev = qdev_create(NULL, "pl080"); | ||
76 | + object_property_set_link(OBJECT(dev), OBJECT(sysmem), "downstream", | ||
77 | + &error_fatal); | ||
78 | + qdev_init_nofail(dev); | ||
79 | + busdev = SYS_BUS_DEVICE(dev); | ||
80 | + sysbus_mmio_map(busdev, 0, 0x10130000); | ||
81 | + sysbus_connect_irq(busdev, 0, pic[17]); | ||
82 | + | ||
83 | sysbus_create_simple("sp804", 0x101e2000, pic[4]); | ||
84 | sysbus_create_simple("sp804", 0x101e3000, pic[5]); | ||
85 | |||
86 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | ||
87 | index XXXXXXX..XXXXXXX 100644 | ||
88 | --- a/hw/dma/pl080.c | ||
89 | +++ b/hw/dma/pl080.c | ||
90 | @@ -XXX,XX +XXX,XX @@ | ||
91 | #include "exec/address-spaces.h" | ||
92 | #include "qemu/log.h" | ||
93 | #include "hw/dma/pl080.h" | ||
94 | +#include "qapi/error.h" | ||
95 | |||
96 | #define PL080_CONF_E 0x1 | ||
97 | #define PL080_CONF_M1 0x2 | ||
98 | @@ -XXX,XX +XXX,XX @@ again: | ||
99 | swidth = 1 << ((ch->ctrl >> 18) & 7); | ||
100 | dwidth = 1 << ((ch->ctrl >> 21) & 7); | ||
101 | for (n = 0; n < dwidth; n+= swidth) { | ||
102 | - cpu_physical_memory_read(ch->src, buff + n, swidth); | ||
103 | + address_space_read(&s->downstream_as, ch->src, | ||
104 | + MEMTXATTRS_UNSPECIFIED, buff + n, swidth); | ||
105 | if (ch->ctrl & PL080_CCTRL_SI) | ||
106 | ch->src += swidth; | ||
107 | } | ||
108 | xsize = (dwidth < swidth) ? swidth : dwidth; | ||
109 | /* ??? This may pad the value incorrectly for dwidth < 32. */ | ||
110 | for (n = 0; n < xsize; n += dwidth) { | ||
111 | - cpu_physical_memory_write(ch->dest + n, buff + n, dwidth); | ||
112 | + address_space_write(&s->downstream_as, ch->dest + n, | ||
113 | + MEMTXATTRS_UNSPECIFIED, buff + n, dwidth); | ||
114 | if (ch->ctrl & PL080_CCTRL_DI) | ||
115 | ch->dest += swidth; | ||
116 | } | ||
117 | @@ -XXX,XX +XXX,XX @@ again: | ||
118 | if (size == 0) { | ||
119 | /* Transfer complete. */ | ||
120 | if (ch->lli) { | ||
121 | - ch->src = address_space_ldl_le(&address_space_memory, | ||
122 | + ch->src = address_space_ldl_le(&s->downstream_as, | ||
123 | ch->lli, | ||
124 | MEMTXATTRS_UNSPECIFIED, | ||
125 | NULL); | ||
126 | - ch->dest = address_space_ldl_le(&address_space_memory, | ||
127 | + ch->dest = address_space_ldl_le(&s->downstream_as, | ||
128 | ch->lli + 4, | ||
129 | MEMTXATTRS_UNSPECIFIED, | ||
130 | NULL); | ||
131 | - ch->ctrl = address_space_ldl_le(&address_space_memory, | ||
132 | + ch->ctrl = address_space_ldl_le(&s->downstream_as, | ||
133 | ch->lli + 12, | ||
134 | MEMTXATTRS_UNSPECIFIED, | ||
135 | NULL); | ||
136 | - ch->lli = address_space_ldl_le(&address_space_memory, | ||
137 | + ch->lli = address_space_ldl_le(&s->downstream_as, | ||
138 | ch->lli + 8, | ||
139 | MEMTXATTRS_UNSPECIFIED, | ||
140 | NULL); | ||
141 | @@ -XXX,XX +XXX,XX @@ static void pl080_init(Object *obj) | ||
142 | s->nchannels = 8; | ||
143 | } | 18 | } |
144 | 19 | ||
145 | +static void pl080_realize(DeviceState *dev, Error **errp) | 20 | -static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) |
21 | +static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn, | ||
22 | + GVecGen2Fn vecfn) | ||
23 | { | ||
24 | TCGv_ptr qd, qm; | ||
25 | |||
26 | @@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) | ||
27 | return true; | ||
28 | } | ||
29 | |||
30 | - qd = mve_qreg_ptr(a->qd); | ||
31 | - qm = mve_qreg_ptr(a->qm); | ||
32 | - fn(cpu_env, qd, qm); | ||
33 | - tcg_temp_free_ptr(qd); | ||
34 | - tcg_temp_free_ptr(qm); | ||
35 | + if (vecfn && mve_no_predication(s)) { | ||
36 | + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), 16, 16); | ||
37 | + } else { | ||
38 | + qd = mve_qreg_ptr(a->qd); | ||
39 | + qm = mve_qreg_ptr(a->qm); | ||
40 | + fn(cpu_env, qd, qm); | ||
41 | + tcg_temp_free_ptr(qd); | ||
42 | + tcg_temp_free_ptr(qm); | ||
43 | + } | ||
44 | mve_update_eci(s); | ||
45 | return true; | ||
46 | } | ||
47 | |||
48 | -#define DO_1OP(INSN, FN) \ | ||
49 | +static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) | ||
146 | +{ | 50 | +{ |
147 | + PL080State *s = PL080(dev); | 51 | + return do_1op_vec(s, a, fn, NULL); |
148 | + | ||
149 | + if (!s->downstream) { | ||
150 | + error_setg(errp, "PL080 'downstream' link not set"); | ||
151 | + return; | ||
152 | + } | ||
153 | + | ||
154 | + address_space_init(&s->downstream_as, s->downstream, "pl080-downstream"); | ||
155 | +} | 52 | +} |
156 | + | 53 | + |
157 | static void pl081_init(Object *obj) | 54 | +#define DO_1OP_VEC(INSN, FN, VECFN) \ |
158 | { | 55 | static bool trans_##INSN(DisasContext *s, arg_1op *a) \ |
159 | PL080State *s = PL080(obj); | 56 | { \ |
160 | @@ -XXX,XX +XXX,XX @@ static void pl081_init(Object *obj) | 57 | static MVEGenOneOpFn * const fns[] = { \ |
161 | s->nchannels = 2; | 58 | @@ -XXX,XX +XXX,XX @@ static bool do_1op(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn) |
162 | } | 59 | gen_helper_mve_##FN##w, \ |
163 | 60 | NULL, \ | |
164 | +static Property pl080_properties[] = { | 61 | }; \ |
165 | + DEFINE_PROP_LINK("downstream", PL080State, downstream, | 62 | - return do_1op(s, a, fns[a->size]); \ |
166 | + TYPE_MEMORY_REGION, MemoryRegion *), | 63 | + return do_1op_vec(s, a, fns[a->size], VECFN); \ |
167 | + DEFINE_PROP_END_OF_LIST(), | 64 | } |
168 | +}; | 65 | |
66 | +#define DO_1OP(INSN, FN) DO_1OP_VEC(INSN, FN, NULL) | ||
169 | + | 67 | + |
170 | static void pl080_class_init(ObjectClass *oc, void *data) | 68 | DO_1OP(VCLZ, vclz) |
171 | { | 69 | DO_1OP(VCLS, vcls) |
172 | DeviceClass *dc = DEVICE_CLASS(oc); | 70 | -DO_1OP(VABS, vabs) |
173 | 71 | -DO_1OP(VNEG, vneg) | |
174 | dc->vmsd = &vmstate_pl080; | 72 | +DO_1OP_VEC(VABS, vabs, tcg_gen_gvec_abs) |
175 | + dc->realize = pl080_realize; | 73 | +DO_1OP_VEC(VNEG, vneg, tcg_gen_gvec_neg) |
176 | + dc->props = pl080_properties; | 74 | DO_1OP(VQABS, vqabs) |
177 | } | 75 | DO_1OP(VQNEG, vqneg) |
178 | 76 | DO_1OP(VMAXA, vmaxa) | |
179 | static const TypeInfo pl080_info = { | ||
180 | -- | 77 | -- |
181 | 2.18.0 | 78 | 2.20.1 |
182 | 79 | ||
183 | 80 | diff view generated by jsdifflib |
1 | Implement the AArch32 HVBAR register; we can do this just by | 1 | Optimize the MVE VDUP insns by using TCG vector ops when possible. |
---|---|---|---|
2 | making the existing VBAR_EL2 regdefs be STATE_BOTH. | ||
3 | 2 | ||
4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
5 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 4 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | 5 | Message-id: 20210913095440.13462-8-peter.maydell@linaro.org |
7 | Message-id: 20180814124254.5229-5-peter.maydell@linaro.org | ||
8 | --- | 6 | --- |
9 | target/arm/helper.c | 4 ++-- | 7 | target/arm/translate-mve.c | 12 ++++++++---- |
10 | 1 file changed, 2 insertions(+), 2 deletions(-) | 8 | 1 file changed, 8 insertions(+), 4 deletions(-) |
11 | 9 | ||
12 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 10 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
13 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/target/arm/helper.c | 12 | --- a/target/arm/translate-mve.c |
15 | +++ b/target/arm/helper.c | 13 | +++ b/target/arm/translate-mve.c |
16 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v8_cp_reginfo[] = { | 14 | @@ -XXX,XX +XXX,XX @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) |
17 | 15 | return true; | |
18 | /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ | 16 | } |
19 | static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { | 17 | |
20 | - { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, | 18 | - qd = mve_qreg_ptr(a->qd); |
21 | + { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, | 19 | rt = load_reg(s, a->rt); |
22 | .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, | 20 | - tcg_gen_dup_i32(a->size, rt, rt); |
23 | .access = PL2_RW, | 21 | - gen_helper_mve_vdup(cpu_env, qd, rt); |
24 | .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, | 22 | - tcg_temp_free_ptr(qd); |
25 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 23 | + if (mve_no_predication(s)) { |
26 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, | 24 | + tcg_gen_gvec_dup_i32(a->size, mve_qreg_offset(a->qd), 16, 16, rt); |
27 | .access = PL2_RW, | 25 | + } else { |
28 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, | 26 | + qd = mve_qreg_ptr(a->qd); |
29 | - { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, | 27 | + tcg_gen_dup_i32(a->size, rt, rt); |
30 | + { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, | 28 | + gen_helper_mve_vdup(cpu_env, qd, rt); |
31 | .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, | 29 | + tcg_temp_free_ptr(qd); |
32 | .access = PL2_RW, .writefn = vbar_write, | 30 | + } |
33 | .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), | 31 | tcg_temp_free_i32(rt); |
32 | mve_update_eci(s); | ||
33 | return true; | ||
34 | -- | 34 | -- |
35 | 2.18.0 | 35 | 2.20.1 |
36 | 36 | ||
37 | 37 | diff view generated by jsdifflib |
1 | The PL080 and PL081 have three outgoing interrupt lines: | 1 | Optimize the MVE VMVN insn by using TCG vector ops when possible. |
---|---|---|---|
2 | * DMACINTERR signals DMA errors | ||
3 | * DMACINTTC is the DMA count interrupt | ||
4 | * DMACINTR is a combined interrupt, the logical OR of the other two | ||
5 | |||
6 | We currently only implement DMACINTR, because that's all the | ||
7 | realview and versatile boards needed, but the instances of the | ||
8 | PL081 in the MPS2 firmware images use all three interrupt lines. | ||
9 | Implement the missing DMACINTERR and DMACINTTC. | ||
10 | 2 | ||
11 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 3 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
12 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 4 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
5 | Message-id: 20210913095440.13462-9-peter.maydell@linaro.org | ||
13 | --- | 6 | --- |
14 | include/hw/dma/pl080.h | 6 +++++- | 7 | target/arm/translate-mve.c | 2 +- |
15 | hw/dma/pl080.c | 13 ++++++++----- | 8 | 1 file changed, 1 insertion(+), 1 deletion(-) |
16 | 2 files changed, 13 insertions(+), 6 deletions(-) | ||
17 | 9 | ||
18 | diff --git a/include/hw/dma/pl080.h b/include/hw/dma/pl080.h | 10 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
19 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
20 | --- a/include/hw/dma/pl080.h | 12 | --- a/target/arm/translate-mve.c |
21 | +++ b/include/hw/dma/pl080.h | 13 | +++ b/target/arm/translate-mve.c |
22 | @@ -XXX,XX +XXX,XX @@ | 14 | @@ -XXX,XX +XXX,XX @@ static bool trans_VREV64(DisasContext *s, arg_1op *a) |
23 | * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0218e/DDI0218.pdf | 15 | |
24 | * | 16 | static bool trans_VMVN(DisasContext *s, arg_1op *a) |
25 | * QEMU interface: | ||
26 | - * + sysbus IRQ: DMACINTR combined interrupt line | ||
27 | + * + sysbus IRQ 0: DMACINTR combined interrupt line | ||
28 | + * + sysbus IRQ 1: DMACINTERR error interrupt request | ||
29 | + * + sysbus IRQ 2: DMACINTTC count interrupt request | ||
30 | * + sysbus MMIO region 0: MemoryRegion for the device's registers | ||
31 | */ | ||
32 | |||
33 | @@ -XXX,XX +XXX,XX @@ typedef struct PL080State { | ||
34 | /* Flag to avoid recursive DMA invocations. */ | ||
35 | int running; | ||
36 | qemu_irq irq; | ||
37 | + qemu_irq interr; | ||
38 | + qemu_irq inttc; | ||
39 | } PL080State; | ||
40 | |||
41 | #endif | ||
42 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/hw/dma/pl080.c | ||
45 | +++ b/hw/dma/pl080.c | ||
46 | @@ -XXX,XX +XXX,XX @@ static const unsigned char pl081_id[] = | ||
47 | |||
48 | static void pl080_update(PL080State *s) | ||
49 | { | 17 | { |
50 | - if ((s->tc_int & s->tc_mask) | 18 | - return do_1op(s, a, gen_helper_mve_vmvn); |
51 | - || (s->err_int & s->err_mask)) | 19 | + return do_1op_vec(s, a, gen_helper_mve_vmvn, tcg_gen_gvec_not); |
52 | - qemu_irq_raise(s->irq); | ||
53 | - else | ||
54 | - qemu_irq_lower(s->irq); | ||
55 | + bool tclevel = (s->tc_int & s->tc_mask); | ||
56 | + bool errlevel = (s->err_int & s->err_mask); | ||
57 | + | ||
58 | + qemu_set_irq(s->interr, errlevel); | ||
59 | + qemu_set_irq(s->inttc, tclevel); | ||
60 | + qemu_set_irq(s->irq, errlevel || tclevel); | ||
61 | } | 20 | } |
62 | 21 | ||
63 | static void pl080_run(PL080State *s) | 22 | static bool trans_VABS_fp(DisasContext *s, arg_1op *a) |
64 | @@ -XXX,XX +XXX,XX @@ static void pl080_init(Object *obj) | ||
65 | memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000); | ||
66 | sysbus_init_mmio(sbd, &s->iomem); | ||
67 | sysbus_init_irq(sbd, &s->irq); | ||
68 | + sysbus_init_irq(sbd, &s->interr); | ||
69 | + sysbus_init_irq(sbd, &s->inttc); | ||
70 | s->nchannels = 8; | ||
71 | } | ||
72 | |||
73 | -- | 23 | -- |
74 | 2.18.0 | 24 | 2.20.1 |
75 | 25 | ||
76 | 26 | diff view generated by jsdifflib |
1 | On real v7M hardware, the NMI line is an externally visible signal | 1 | Optimize the MVE VSHL and VSHR immediate forms by using TCG vector |
---|---|---|---|
2 | that an SoC or board can toggle to assert an NMI. Expose it in | 2 | ops when possible. |
3 | our QEMU NVIC and armv7m container objects so that a board model | ||
4 | can wire it up if it needs to. | ||
5 | |||
6 | In particular, the MPS2 watchdog is wired to NMI. | ||
7 | 3 | ||
8 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
9 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
6 | Message-id: 20210913095440.13462-10-peter.maydell@linaro.org | ||
10 | --- | 7 | --- |
11 | hw/arm/armv7m.c | 1 + | 8 | target/arm/translate-mve.c | 83 +++++++++++++++++++++++++++++--------- |
12 | hw/intc/armv7m_nvic.c | 19 +++++++++++++++++++ | 9 | 1 file changed, 63 insertions(+), 20 deletions(-) |
13 | hw/intc/trace-events | 1 + | ||
14 | 3 files changed, 21 insertions(+) | ||
15 | 10 | ||
16 | diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c | 11 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
17 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/hw/arm/armv7m.c | 13 | --- a/target/arm/translate-mve.c |
19 | +++ b/hw/arm/armv7m.c | 14 | +++ b/target/arm/translate-mve.c |
20 | @@ -XXX,XX +XXX,XX @@ static void armv7m_realize(DeviceState *dev, Error **errp) | 15 | @@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) |
21 | */ | 16 | return do_1imm(s, a, fn); |
22 | qdev_pass_gpios(DEVICE(&s->nvic), dev, NULL); | 17 | } |
23 | qdev_pass_gpios(DEVICE(&s->nvic), dev, "SYSRESETREQ"); | 18 | |
24 | + qdev_pass_gpios(DEVICE(&s->nvic), dev, "NMI"); | 19 | -static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, |
25 | 20 | - bool negateshift) | |
26 | /* Wire the NVIC up to the CPU */ | 21 | +static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, |
27 | sbd = SYS_BUS_DEVICE(&s->nvic); | 22 | + bool negateshift, GVecGen2iFn vecfn) |
28 | diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c | 23 | { |
29 | index XXXXXXX..XXXXXXX 100644 | 24 | TCGv_ptr qd, qm; |
30 | --- a/hw/intc/armv7m_nvic.c | 25 | int shift = a->shift; |
31 | +++ b/hw/intc/armv7m_nvic.c | 26 | @@ -XXX,XX +XXX,XX @@ static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, |
32 | @@ -XXX,XX +XXX,XX @@ static void set_irq_level(void *opaque, int n, int level) | 27 | shift = -shift; |
33 | } | 28 | } |
29 | |||
30 | - qd = mve_qreg_ptr(a->qd); | ||
31 | - qm = mve_qreg_ptr(a->qm); | ||
32 | - fn(cpu_env, qd, qm, tcg_constant_i32(shift)); | ||
33 | - tcg_temp_free_ptr(qd); | ||
34 | - tcg_temp_free_ptr(qm); | ||
35 | + if (vecfn && mve_no_predication(s)) { | ||
36 | + vecfn(a->size, mve_qreg_offset(a->qd), mve_qreg_offset(a->qm), | ||
37 | + shift, 16, 16); | ||
38 | + } else { | ||
39 | + qd = mve_qreg_ptr(a->qd); | ||
40 | + qm = mve_qreg_ptr(a->qm); | ||
41 | + fn(cpu_env, qd, qm, tcg_constant_i32(shift)); | ||
42 | + tcg_temp_free_ptr(qd); | ||
43 | + tcg_temp_free_ptr(qm); | ||
44 | + } | ||
45 | mve_update_eci(s); | ||
46 | return true; | ||
34 | } | 47 | } |
35 | 48 | ||
36 | +/* callback when external NMI line is changed */ | 49 | -#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \ |
37 | +static void nvic_nmi_trigger(void *opaque, int n, int level) | 50 | - static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ |
51 | - { \ | ||
52 | - static MVEGenTwoOpShiftFn * const fns[] = { \ | ||
53 | - gen_helper_mve_##FN##b, \ | ||
54 | - gen_helper_mve_##FN##h, \ | ||
55 | - gen_helper_mve_##FN##w, \ | ||
56 | - NULL, \ | ||
57 | - }; \ | ||
58 | - return do_2shift(s, a, fns[a->size], NEGATESHIFT); \ | ||
59 | +static bool do_2shift(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, | ||
60 | + bool negateshift) | ||
38 | +{ | 61 | +{ |
39 | + NVICState *s = opaque; | 62 | + return do_2shift_vec(s, a, fn, negateshift, NULL); |
63 | +} | ||
40 | + | 64 | + |
41 | + trace_nvic_set_nmi_level(level); | 65 | +#define DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, VECFN) \ |
66 | + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ | ||
67 | + { \ | ||
68 | + static MVEGenTwoOpShiftFn * const fns[] = { \ | ||
69 | + gen_helper_mve_##FN##b, \ | ||
70 | + gen_helper_mve_##FN##h, \ | ||
71 | + gen_helper_mve_##FN##w, \ | ||
72 | + NULL, \ | ||
73 | + }; \ | ||
74 | + return do_2shift_vec(s, a, fns[a->size], NEGATESHIFT, VECFN); \ | ||
75 | } | ||
76 | |||
77 | -DO_2SHIFT(VSHLI, vshli_u, false) | ||
78 | +#define DO_2SHIFT(INSN, FN, NEGATESHIFT) \ | ||
79 | + DO_2SHIFT_VEC(INSN, FN, NEGATESHIFT, NULL) | ||
42 | + | 80 | + |
81 | +static void do_gvec_shri_s(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
82 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) | ||
83 | +{ | ||
43 | + /* | 84 | + /* |
44 | + * The architecture doesn't specify whether NMI should share | 85 | + * We get here with a negated shift count, and we must handle |
45 | + * the normal-interrupt behaviour of being resampled on | 86 | + * shifts by the element size, which tcg_gen_gvec_sari() does not do. |
46 | + * exception handler return. We choose not to, so just | ||
47 | + * set NMI pending here and don't track the current level. | ||
48 | + */ | 87 | + */ |
49 | + if (level) { | 88 | + shift = -shift; |
50 | + armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false); | 89 | + if (shift == (8 << vece)) { |
90 | + shift--; | ||
91 | + } | ||
92 | + tcg_gen_gvec_sari(vece, dofs, aofs, shift, oprsz, maxsz); | ||
93 | +} | ||
94 | + | ||
95 | +static void do_gvec_shri_u(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
96 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) | ||
97 | +{ | ||
98 | + /* | ||
99 | + * We get here with a negated shift count, and we must handle | ||
100 | + * shifts by the element size, which tcg_gen_gvec_shri() does not do. | ||
101 | + */ | ||
102 | + shift = -shift; | ||
103 | + if (shift == (8 << vece)) { | ||
104 | + tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, 0); | ||
105 | + } else { | ||
106 | + tcg_gen_gvec_shri(vece, dofs, aofs, shift, oprsz, maxsz); | ||
51 | + } | 107 | + } |
52 | +} | 108 | +} |
53 | + | 109 | + |
54 | static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) | 110 | +DO_2SHIFT_VEC(VSHLI, vshli_u, false, tcg_gen_gvec_shli) |
55 | { | 111 | DO_2SHIFT(VQSHLI_S, vqshli_s, false) |
56 | ARMCPU *cpu = s->cpu; | 112 | DO_2SHIFT(VQSHLI_U, vqshli_u, false) |
57 | @@ -XXX,XX +XXX,XX @@ static void armv7m_nvic_instance_init(Object *obj) | 113 | DO_2SHIFT(VQSHLUI, vqshlui_s, false) |
58 | qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1); | 114 | /* These right shifts use a left-shift helper with negated shift count */ |
59 | qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", | 115 | -DO_2SHIFT(VSHRI_S, vshli_s, true) |
60 | M_REG_NUM_BANKS); | 116 | -DO_2SHIFT(VSHRI_U, vshli_u, true) |
61 | + qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1); | 117 | +DO_2SHIFT_VEC(VSHRI_S, vshli_s, true, do_gvec_shri_s) |
62 | } | 118 | +DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u) |
63 | 119 | DO_2SHIFT(VRSHRI_S, vrshli_s, true) | |
64 | static void armv7m_nvic_class_init(ObjectClass *klass, void *data) | 120 | DO_2SHIFT(VRSHRI_U, vrshli_u, true) |
65 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events | ||
66 | index XXXXXXX..XXXXXXX 100644 | ||
67 | --- a/hw/intc/trace-events | ||
68 | +++ b/hw/intc/trace-events | ||
69 | @@ -XXX,XX +XXX,XX @@ nvic_acknowledge_irq(int irq, int prio) "NVIC acknowledge IRQ: %d now active (pr | ||
70 | nvic_get_pending_irq_info(int irq, bool secure) "NVIC next IRQ %d: targets_secure: %d" | ||
71 | nvic_complete_irq(int irq, bool secure) "NVIC complete IRQ %d (secure %d)" | ||
72 | nvic_set_irq_level(int irq, int level) "NVIC external irq %d level set to %d" | ||
73 | +nvic_set_nmi_level(int level) "NVIC external NMI level set to %d" | ||
74 | nvic_sysreg_read(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" | ||
75 | nvic_sysreg_write(uint64_t addr, uint32_t value, unsigned size) "NVIC sysreg write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" | ||
76 | 121 | ||
77 | -- | 122 | -- |
78 | 2.18.0 | 123 | 2.20.1 |
79 | 124 | ||
80 | 125 | diff view generated by jsdifflib |
1 | The PL080/PL081 model is missing a reset function; implement it. | 1 | Optimize the MVE VSHLL insns by using TCG vector ops when possible. |
---|---|---|---|
2 | This includes the VMOVL insn, which we handle in mve.decode as "VSHLL | ||
3 | with zero shift count". | ||
2 | 4 | ||
3 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | 6 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Message-id: 20210913095440.13462-11-peter.maydell@linaro.org | ||
5 | --- | 8 | --- |
6 | hw/dma/pl080.c | 25 +++++++++++++++++++++++++ | 9 | target/arm/translate-mve.c | 67 +++++++++++++++++++++++++++++++++----- |
7 | 1 file changed, 25 insertions(+) | 10 | 1 file changed, 59 insertions(+), 8 deletions(-) |
8 | 11 | ||
9 | diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c | 12 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
10 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/hw/dma/pl080.c | 14 | --- a/target/arm/translate-mve.c |
12 | +++ b/hw/dma/pl080.c | 15 | +++ b/target/arm/translate-mve.c |
13 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pl080_ops = { | 16 | @@ -XXX,XX +XXX,XX @@ DO_2SHIFT_SCALAR(VQSHL_U_scalar, vqshli_u) |
14 | .endianness = DEVICE_NATIVE_ENDIAN, | 17 | DO_2SHIFT_SCALAR(VQRSHL_S_scalar, vqrshli_s) |
15 | }; | 18 | DO_2SHIFT_SCALAR(VQRSHL_U_scalar, vqrshli_u) |
16 | 19 | ||
17 | +static void pl080_reset(DeviceState *dev) | 20 | -#define DO_VSHLL(INSN, FN) \ |
21 | - static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ | ||
22 | - { \ | ||
23 | - static MVEGenTwoOpShiftFn * const fns[] = { \ | ||
24 | - gen_helper_mve_##FN##b, \ | ||
25 | - gen_helper_mve_##FN##h, \ | ||
26 | - }; \ | ||
27 | - return do_2shift(s, a, fns[a->size], false); \ | ||
28 | +#define DO_VSHLL(INSN, FN) \ | ||
29 | + static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ | ||
30 | + { \ | ||
31 | + static MVEGenTwoOpShiftFn * const fns[] = { \ | ||
32 | + gen_helper_mve_##FN##b, \ | ||
33 | + gen_helper_mve_##FN##h, \ | ||
34 | + }; \ | ||
35 | + return do_2shift_vec(s, a, fns[a->size], false, do_gvec_##FN); \ | ||
36 | } | ||
37 | |||
38 | +/* | ||
39 | + * For the VSHLL vector helpers, the vece is the size of the input | ||
40 | + * (ie MO_8 or MO_16); the helpers want to work in the output size. | ||
41 | + * The shift count can be 0..<input size>, inclusive. (0 is VMOVL.) | ||
42 | + */ | ||
43 | +static void do_gvec_vshllbs(unsigned vece, uint32_t dofs, uint32_t aofs, | ||
44 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) | ||
18 | +{ | 45 | +{ |
19 | + PL080State *s = PL080(dev); | 46 | + unsigned ovece = vece + 1; |
20 | + int i; | 47 | + unsigned ibits = vece == MO_8 ? 8 : 16; |
48 | + tcg_gen_gvec_shli(ovece, dofs, aofs, ibits, oprsz, maxsz); | ||
49 | + tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); | ||
50 | +} | ||
21 | + | 51 | + |
22 | + s->tc_int = 0; | 52 | +static void do_gvec_vshllbu(unsigned vece, uint32_t dofs, uint32_t aofs, |
23 | + s->tc_mask = 0; | 53 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) |
24 | + s->err_int = 0; | 54 | +{ |
25 | + s->err_mask = 0; | 55 | + unsigned ovece = vece + 1; |
26 | + s->conf = 0; | 56 | + tcg_gen_gvec_andi(ovece, dofs, aofs, |
27 | + s->sync = 0; | 57 | + ovece == MO_16 ? 0xff : 0xffff, oprsz, maxsz); |
28 | + s->req_single = 0; | 58 | + tcg_gen_gvec_shli(ovece, dofs, dofs, shift, oprsz, maxsz); |
29 | + s->req_burst = 0; | 59 | +} |
30 | + s->running = 0; | ||
31 | + | 60 | + |
32 | + for (i = 0; i < s->nchannels; i++) { | 61 | +static void do_gvec_vshllts(unsigned vece, uint32_t dofs, uint32_t aofs, |
33 | + s->chan[i].src = 0; | 62 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) |
34 | + s->chan[i].dest = 0; | 63 | +{ |
35 | + s->chan[i].lli = 0; | 64 | + unsigned ovece = vece + 1; |
36 | + s->chan[i].ctrl = 0; | 65 | + unsigned ibits = vece == MO_8 ? 8 : 16; |
37 | + s->chan[i].conf = 0; | 66 | + if (shift == 0) { |
67 | + tcg_gen_gvec_sari(ovece, dofs, aofs, ibits, oprsz, maxsz); | ||
68 | + } else { | ||
69 | + tcg_gen_gvec_andi(ovece, dofs, aofs, | ||
70 | + ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz); | ||
71 | + tcg_gen_gvec_sari(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); | ||
38 | + } | 72 | + } |
39 | +} | 73 | +} |
40 | + | 74 | + |
41 | static void pl080_init(Object *obj) | 75 | +static void do_gvec_vshlltu(unsigned vece, uint32_t dofs, uint32_t aofs, |
42 | { | 76 | + int64_t shift, uint32_t oprsz, uint32_t maxsz) |
43 | SysBusDevice *sbd = SYS_BUS_DEVICE(obj); | 77 | +{ |
44 | @@ -XXX,XX +XXX,XX @@ static void pl080_class_init(ObjectClass *oc, void *data) | 78 | + unsigned ovece = vece + 1; |
45 | dc->vmsd = &vmstate_pl080; | 79 | + unsigned ibits = vece == MO_8 ? 8 : 16; |
46 | dc->realize = pl080_realize; | 80 | + if (shift == 0) { |
47 | dc->props = pl080_properties; | 81 | + tcg_gen_gvec_shri(ovece, dofs, aofs, ibits, oprsz, maxsz); |
48 | + dc->reset = pl080_reset; | 82 | + } else { |
49 | } | 83 | + tcg_gen_gvec_andi(ovece, dofs, aofs, |
50 | 84 | + ovece == MO_16 ? 0xff00 : 0xffff0000, oprsz, maxsz); | |
51 | static const TypeInfo pl080_info = { | 85 | + tcg_gen_gvec_shri(ovece, dofs, dofs, ibits - shift, oprsz, maxsz); |
86 | + } | ||
87 | +} | ||
88 | + | ||
89 | DO_VSHLL(VSHLL_BS, vshllbs) | ||
90 | DO_VSHLL(VSHLL_BU, vshllbu) | ||
91 | DO_VSHLL(VSHLL_TS, vshllts) | ||
52 | -- | 92 | -- |
53 | 2.18.0 | 93 | 2.20.1 |
54 | 94 | ||
55 | 95 | diff view generated by jsdifflib |
1 | We implement the HAMAIR1 register as RAZ/WI; we had a typo in the | 1 | Optimize the MVE shift-and-insert insns by using TCG |
---|---|---|---|
2 | regdef, though, and were incorrectly naming it HMAIR1 (which is | 2 | vector ops when possible. |
3 | a different register which we also implement as RAZ/WI). | ||
4 | 3 | ||
5 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
6 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
7 | Reviewed-by: Luc Michel <luc.michel@greensocs.com> | 6 | Message-id: 20210913095440.13462-12-peter.maydell@linaro.org |
8 | Message-id: 20180814124254.5229-2-peter.maydell@linaro.org | ||
9 | --- | 7 | --- |
10 | target/arm/helper.c | 4 ++-- | 8 | target/arm/translate-mve.c | 4 ++-- |
11 | 1 file changed, 2 insertions(+), 2 deletions(-) | 9 | 1 file changed, 2 insertions(+), 2 deletions(-) |
12 | 10 | ||
13 | diff --git a/target/arm/helper.c b/target/arm/helper.c | 11 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
14 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/target/arm/helper.c | 13 | --- a/target/arm/translate-mve.c |
16 | +++ b/target/arm/helper.c | 14 | +++ b/target/arm/translate-mve.c |
17 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { | 15 | @@ -XXX,XX +XXX,XX @@ DO_2SHIFT_VEC(VSHRI_U, vshli_u, true, do_gvec_shri_u) |
18 | .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, | 16 | DO_2SHIFT(VRSHRI_S, vrshli_s, true) |
19 | .access = PL2_RW, .type = ARM_CP_CONST, | 17 | DO_2SHIFT(VRSHRI_U, vrshli_u, true) |
20 | .resetvalue = 0 }, | 18 | |
21 | - { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, | 19 | -DO_2SHIFT(VSRI, vsri, false) |
22 | + { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, | 20 | -DO_2SHIFT(VSLI, vsli, false) |
23 | .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | 21 | +DO_2SHIFT_VEC(VSRI, vsri, false, gen_gvec_sri) |
24 | .access = PL2_RW, .type = ARM_CP_CONST, | 22 | +DO_2SHIFT_VEC(VSLI, vsli, false, gen_gvec_sli) |
25 | .resetvalue = 0 }, | 23 | |
26 | @@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = { | 24 | #define DO_2SHIFT_FP(INSN, FN) \ |
27 | .access = PL2_RW, .type = ARM_CP_CONST, | 25 | static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ |
28 | .resetvalue = 0 }, | ||
29 | /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ | ||
30 | - { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, | ||
31 | + { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, | ||
32 | .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | ||
33 | .access = PL2_RW, .type = ARM_CP_CONST, | ||
34 | .resetvalue = 0 }, | ||
35 | -- | 26 | -- |
36 | 2.18.0 | 27 | 2.20.1 |
37 | 28 | ||
38 | 29 | diff view generated by jsdifflib |
1 | We now support direct execution from MMIO regions in the | 1 | Optimize the MVE 1op-immediate insns (VORR, VBIC, VMOV) to |
---|---|---|---|
2 | core memory subsystem. This means that we don't need to | 2 | use TCG vector ops when possible. |
3 | have device-specific support for it, and we can remove | ||
4 | the request_ptr handling from the Xilinx SPIPS device. | ||
5 | (It was broken anyway due to race conditions, and disabled | ||
6 | by default.) | ||
7 | |||
8 | This device is the only in-tree user of this API. | ||
9 | 3 | ||
10 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> | 4 | Signed-off-by: Peter Maydell <peter.maydell@linaro.org> |
11 | Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 5 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> |
12 | Reviewed-by: Alistair Francis <alistair.francis@wdc.com> | 6 | Message-id: 20210913095440.13462-13-peter.maydell@linaro.org |
13 | Reviewed-by: KONRAD Frederic <frederic.konrad@adacore.com> | ||
14 | Message-id: 20180817114619.22354-2-peter.maydell@linaro.org | ||
15 | --- | 7 | --- |
16 | hw/ssi/xilinx_spips.c | 46 ------------------------------------------- | 8 | target/arm/translate-mve.c | 26 +++++++++++++++++++++----- |
17 | 1 file changed, 46 deletions(-) | 9 | 1 file changed, 21 insertions(+), 5 deletions(-) |
18 | 10 | ||
19 | diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c | 11 | diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c |
20 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/ssi/xilinx_spips.c | 13 | --- a/target/arm/translate-mve.c |
22 | +++ b/hw/ssi/xilinx_spips.c | 14 | +++ b/target/arm/translate-mve.c |
23 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps spips_ops = { | 15 | @@ -XXX,XX +XXX,XX @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) |
24 | 16 | return true; | |
25 | static void xilinx_qspips_invalidate_mmio_ptr(XilinxQSPIPS *q) | 17 | } |
18 | |||
19 | -static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) | ||
20 | +static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn, | ||
21 | + GVecGen2iFn *vecfn) | ||
26 | { | 22 | { |
27 | - XilinxSPIPS *s = &q->parent_obj; | 23 | TCGv_ptr qd; |
28 | - | 24 | uint64_t imm; |
29 | - if ((q->mmio_execution_enabled) && (q->lqspi_cached_addr != ~0ULL)) { | 25 | @@ -XXX,XX +XXX,XX @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn) |
30 | - /* Invalidate the current mapped mmio */ | 26 | |
31 | - memory_region_invalidate_mmio_ptr(&s->mmlqspi, q->lqspi_cached_addr, | 27 | imm = asimd_imm_const(a->imm, a->cmode, a->op); |
32 | - LQSPI_CACHE_SIZE); | 28 | |
33 | - } | 29 | - qd = mve_qreg_ptr(a->qd); |
34 | - | 30 | - fn(cpu_env, qd, tcg_constant_i64(imm)); |
35 | q->lqspi_cached_addr = ~0ULL; | 31 | - tcg_temp_free_ptr(qd); |
32 | + if (vecfn && mve_no_predication(s)) { | ||
33 | + vecfn(MO_64, mve_qreg_offset(a->qd), mve_qreg_offset(a->qd), | ||
34 | + imm, 16, 16); | ||
35 | + } else { | ||
36 | + qd = mve_qreg_ptr(a->qd); | ||
37 | + fn(cpu_env, qd, tcg_constant_i64(imm)); | ||
38 | + tcg_temp_free_ptr(qd); | ||
39 | + } | ||
40 | mve_update_eci(s); | ||
41 | return true; | ||
36 | } | 42 | } |
37 | 43 | ||
38 | @@ -XXX,XX +XXX,XX @@ static void lqspi_load_cache(void *opaque, hwaddr addr) | 44 | +static void gen_gvec_vmovi(unsigned vece, uint32_t dofs, uint32_t aofs, |
45 | + int64_t c, uint32_t oprsz, uint32_t maxsz) | ||
46 | +{ | ||
47 | + tcg_gen_gvec_dup_imm(vece, dofs, oprsz, maxsz, c); | ||
48 | +} | ||
49 | + | ||
50 | static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) | ||
51 | { | ||
52 | /* Handle decode of cmode/op here between VORR/VBIC/VMOV */ | ||
53 | MVEGenOneOpImmFn *fn; | ||
54 | + GVecGen2iFn *vecfn; | ||
55 | |||
56 | if ((a->cmode & 1) && a->cmode < 12) { | ||
57 | if (a->op) { | ||
58 | @@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) | ||
59 | * so the VBIC becomes a logical AND operation. | ||
60 | */ | ||
61 | fn = gen_helper_mve_vandi; | ||
62 | + vecfn = tcg_gen_gvec_andi; | ||
63 | } else { | ||
64 | fn = gen_helper_mve_vorri; | ||
65 | + vecfn = tcg_gen_gvec_ori; | ||
66 | } | ||
67 | } else { | ||
68 | /* There is one unallocated cmode/op combination in this space */ | ||
69 | @@ -XXX,XX +XXX,XX @@ static bool trans_Vimm_1r(DisasContext *s, arg_1imm *a) | ||
70 | } | ||
71 | /* asimd_imm_const() sorts out VMVNI vs VMOVI for us */ | ||
72 | fn = gen_helper_mve_vmovi; | ||
73 | + vecfn = gen_gvec_vmovi; | ||
39 | } | 74 | } |
75 | - return do_1imm(s, a, fn); | ||
76 | + return do_1imm(s, a, fn, vecfn); | ||
40 | } | 77 | } |
41 | 78 | ||
42 | -static void *lqspi_request_mmio_ptr(void *opaque, hwaddr addr, unsigned *size, | 79 | static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, |
43 | - unsigned *offset) | ||
44 | -{ | ||
45 | - XilinxQSPIPS *q = opaque; | ||
46 | - hwaddr offset_within_the_region; | ||
47 | - | ||
48 | - if (!q->mmio_execution_enabled) { | ||
49 | - return NULL; | ||
50 | - } | ||
51 | - | ||
52 | - offset_within_the_region = addr & ~(LQSPI_CACHE_SIZE - 1); | ||
53 | - lqspi_load_cache(opaque, offset_within_the_region); | ||
54 | - *size = LQSPI_CACHE_SIZE; | ||
55 | - *offset = offset_within_the_region; | ||
56 | - return q->lqspi_buf; | ||
57 | -} | ||
58 | - | ||
59 | static uint64_t | ||
60 | lqspi_read(void *opaque, hwaddr addr, unsigned int size) | ||
61 | { | ||
62 | @@ -XXX,XX +XXX,XX @@ lqspi_read(void *opaque, hwaddr addr, unsigned int size) | ||
63 | |||
64 | static const MemoryRegionOps lqspi_ops = { | ||
65 | .read = lqspi_read, | ||
66 | - .request_ptr = lqspi_request_mmio_ptr, | ||
67 | .endianness = DEVICE_NATIVE_ENDIAN, | ||
68 | .valid = { | ||
69 | .min_access_size = 1, | ||
70 | @@ -XXX,XX +XXX,XX @@ static void xilinx_qspips_realize(DeviceState *dev, Error **errp) | ||
71 | sysbus_init_mmio(sbd, &s->mmlqspi); | ||
72 | |||
73 | q->lqspi_cached_addr = ~0ULL; | ||
74 | - | ||
75 | - /* mmio_execution breaks migration better aborting than having strange | ||
76 | - * bugs. | ||
77 | - */ | ||
78 | - if (q->mmio_execution_enabled) { | ||
79 | - error_setg(&q->migration_blocker, | ||
80 | - "enabling mmio_execution breaks migration"); | ||
81 | - migrate_add_blocker(q->migration_blocker, &error_fatal); | ||
82 | - } | ||
83 | } | ||
84 | |||
85 | static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp) | ||
86 | @@ -XXX,XX +XXX,XX @@ static Property xilinx_zynqmp_qspips_properties[] = { | ||
87 | DEFINE_PROP_END_OF_LIST(), | ||
88 | }; | ||
89 | |||
90 | -static Property xilinx_qspips_properties[] = { | ||
91 | - /* We had to turn this off for 2.10 as it is not compatible with migration. | ||
92 | - * It can be enabled but will prevent the device to be migrated. | ||
93 | - * This will go aways when a fix will be released. | ||
94 | - */ | ||
95 | - DEFINE_PROP_BOOL("x-mmio-exec", XilinxQSPIPS, mmio_execution_enabled, | ||
96 | - false), | ||
97 | - DEFINE_PROP_END_OF_LIST(), | ||
98 | -}; | ||
99 | - | ||
100 | static Property xilinx_spips_properties[] = { | ||
101 | DEFINE_PROP_UINT8("num-busses", XilinxSPIPS, num_busses, 1), | ||
102 | DEFINE_PROP_UINT8("num-ss-bits", XilinxSPIPS, num_cs, 4), | ||
103 | @@ -XXX,XX +XXX,XX @@ static void xilinx_qspips_class_init(ObjectClass *klass, void * data) | ||
104 | XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass); | ||
105 | |||
106 | dc->realize = xilinx_qspips_realize; | ||
107 | - dc->props = xilinx_qspips_properties; | ||
108 | xsc->reg_ops = &qspips_ops; | ||
109 | xsc->rx_fifo_size = RXFF_A_Q; | ||
110 | xsc->tx_fifo_size = TXFF_A_Q; | ||
111 | -- | 80 | -- |
112 | 2.18.0 | 81 | 2.20.1 |
113 | 82 | ||
114 | 83 | diff view generated by jsdifflib |