1
The following changes since commit 7993b0f83fe5c3f8555e79781d5d098f99751a94:
1
A last small test of bug fixes before rc1.
2
2
3
Merge remote-tracking branch 'remotes/nvme/tags/nvme-fixes-for-6.0-pull-request' into staging (2021-03-29 18:45:12 +0100)
3
thanks
4
-- PMM
5
6
The following changes since commit ed8ad9728a9c0eec34db9dff61dfa2f1dd625637:
7
8
Merge tag 'pull-tpm-2023-07-14-1' of https://github.com/stefanberger/qemu-tpm into staging (2023-07-15 14:54:04 +0100)
4
9
5
are available in the Git repository at:
10
are available in the Git repository at:
6
11
7
https://git.linaro.org/people/pmaydell/qemu-arm.git pull-target-arm-20210330
12
https://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20230717
8
13
9
for you to fetch changes up to b9e3f1579a4b06fc63dfa8cdb68df1c58eeb0cf1:
14
for you to fetch changes up to c2c1c4a35c7c2b1a4140b0942b9797c857e476a4:
10
15
11
hw/timer/renesas_tmr: Add default-case asserts in read_tcnt() (2021-03-30 14:05:34 +0100)
16
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write (2023-07-17 11:05:52 +0100)
12
17
13
----------------------------------------------------------------
18
----------------------------------------------------------------
14
* net/npcm7xx_emc.c: Fix handling of receiving packets when RSDR not set
19
target-arm queue:
15
* hw/display/xlnx_dp: Free FIFOs adding xlnx_dp_finalize()
20
* hw/arm/sbsa-ref: set 'slots' property of xhci
16
* hw/arm/smmuv3: Drop unused CDM_VALID() and is_cd_valid()
21
* linux-user: Remove pointless NULL check in clock_adjtime handling
17
* target/arm: Make number of counters in PMCR follow the CPU
22
* ptw: Fix S1_ptw_translate() debug path
18
* hw/timer/renesas_tmr: Add default-case asserts in read_tcnt()
23
* ptw: Account for FEAT_RME when applying {N}SW, SA bits
24
* accel/tcg: Zero-pad PC in TCG CPU exec trace lines
25
* hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
19
26
20
----------------------------------------------------------------
27
----------------------------------------------------------------
21
Doug Evans (1):
28
Peter Maydell (5):
22
net/npcm7xx_emc.c: Fix handling of receiving packets when RSDR not set
29
linux-user: Remove pointless NULL check in clock_adjtime handling
30
target/arm/ptw.c: Add comments to S1Translate struct fields
31
target/arm: Fix S1_ptw_translate() debug path
32
target/arm/ptw.c: Account for FEAT_RME when applying {N}SW, SA bits
33
accel/tcg: Zero-pad PC in TCG CPU exec trace lines
23
34
24
Peter Maydell (2):
35
Tong Ho (1):
25
target/arm: Make number of counters in PMCR follow the CPU
36
hw/nvram: Avoid unnecessary Xilinx eFuse backstore write
26
hw/timer/renesas_tmr: Add default-case asserts in read_tcnt()
27
37
28
Philippe Mathieu-Daudé (1):
38
Yuquan Wang (1):
29
hw/display/xlnx_dp: Free FIFOs adding xlnx_dp_finalize()
39
hw/arm/sbsa-ref: set 'slots' property of xhci
30
40
31
Zenghui Yu (1):
41
accel/tcg/cpu-exec.c | 4 +--
32
hw/arm/smmuv3: Drop unused CDM_VALID() and is_cd_valid()
42
accel/tcg/translate-all.c | 2 +-
33
43
hw/arm/sbsa-ref.c | 1 +
34
hw/arm/smmuv3-internal.h | 7 -------
44
hw/nvram/xlnx-efuse.c | 11 ++++--
35
target/arm/cpu.h | 1 +
45
linux-user/syscall.c | 12 +++----
36
hw/display/xlnx_dp.c | 9 +++++++++
46
target/arm/ptw.c | 90 +++++++++++++++++++++++++++++++++++++++++------
37
hw/net/npcm7xx_emc.c | 4 +++-
47
6 files changed, 98 insertions(+), 22 deletions(-)
38
hw/timer/renesas_tmr.c | 4 ++++
39
target/arm/cpu64.c | 3 +++
40
target/arm/cpu_tcg.c | 5 +++++
41
target/arm/helper.c | 29 +++++++++++++++++------------
42
target/arm/kvm64.c | 2 ++
43
tests/qtest/npcm7xx_emc-test.c | 30 +++++++++++++++++++++---------
44
10 files changed, 65 insertions(+), 29 deletions(-)
45
diff view generated by jsdifflib
New patch
1
From: Yuquan Wang <wangyuquan1236@phytium.com.cn>
1
2
3
This extends the slots of xhci to 64, since the default xhci_sysbus
4
just supports one slot.
5
6
Signed-off-by: Wang Yuquan <wangyuquan1236@phytium.com.cn>
7
Signed-off-by: Chen Baozi <chenbaozi@phytium.com.cn>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
10
Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
11
Message-id: 20230710063750.473510-2-wangyuquan1236@phytium.com.cn
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
---
14
hw/arm/sbsa-ref.c | 1 +
15
1 file changed, 1 insertion(+)
16
17
diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/arm/sbsa-ref.c
20
+++ b/hw/arm/sbsa-ref.c
21
@@ -XXX,XX +XXX,XX @@ static void create_xhci(const SBSAMachineState *sms)
22
hwaddr base = sbsa_ref_memmap[SBSA_XHCI].base;
23
int irq = sbsa_ref_irqmap[SBSA_XHCI];
24
DeviceState *dev = qdev_new(TYPE_XHCI_SYSBUS);
25
+ qdev_prop_set_uint32(dev, "slots", XHCI_MAXSLOTS);
26
27
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
28
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
29
--
30
2.34.1
diff view generated by jsdifflib
1
In commit 81b3ddaf8772ec we fixed a use of uninitialized data
1
In the code for TARGET_NR_clock_adjtime, we set the pointer phtx to
2
in read_tcnt(). However this change wasn't enough to placate
2
the address of the local variable htx. This means it can never be
3
Coverity, which is not smart enough to see that if we read a
3
NULL, but later in the code we check it for NULL anyway. Coverity
4
2 bit field and then handle cases 0, 1, 2 and 3 then there cannot
4
complains about this (CID 1507683) because the NULL check comes after
5
be a flow of execution through the switch default. Add explicit
5
a call to clock_adjtime() that assumes it is non-NULL.
6
default cases which assert that they can't be reached, which
6
7
should help silence Coverity.
7
Since phtx is always &htx, and is used only in three places, it's not
8
really necessary. Remove it, bringing the code structure in to line
9
with that for TARGET_NR_clock_adjtime64, which already uses a simple
10
'&htx' when it wants a pointer to 'htx'.
8
11
9
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Message-id: 20210319162458.13760-1-peter.maydell@linaro.org
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-id: 20230623144410.1837261-1-peter.maydell@linaro.org
12
---
16
---
13
hw/timer/renesas_tmr.c | 4 ++++
17
linux-user/syscall.c | 12 +++++-------
14
1 file changed, 4 insertions(+)
18
1 file changed, 5 insertions(+), 7 deletions(-)
15
19
16
diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c
20
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
17
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/timer/renesas_tmr.c
22
--- a/linux-user/syscall.c
19
+++ b/hw/timer/renesas_tmr.c
23
+++ b/linux-user/syscall.c
20
@@ -XXX,XX +XXX,XX @@ static uint16_t read_tcnt(RTMRState *tmr, unsigned size, int ch)
24
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
21
case CSS_CASCADING:
25
#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
22
tcnt[1] = tmr->tcnt[1];
26
case TARGET_NR_clock_adjtime:
23
break;
27
{
24
+ default:
28
- struct timex htx, *phtx = &htx;
25
+ g_assert_not_reached();
29
+ struct timex htx;
30
31
- if (target_to_host_timex(phtx, arg2) != 0) {
32
+ if (target_to_host_timex(&htx, arg2) != 0) {
33
return -TARGET_EFAULT;
34
}
35
- ret = get_errno(clock_adjtime(arg1, phtx));
36
- if (!is_error(ret) && phtx) {
37
- if (host_to_target_timex(arg2, phtx) != 0) {
38
- return -TARGET_EFAULT;
39
- }
40
+ ret = get_errno(clock_adjtime(arg1, &htx));
41
+ if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
42
+ return -TARGET_EFAULT;
43
}
26
}
44
}
27
switch (FIELD_EX8(tmr->tccr[0], TCCR, CSS)) {
45
return ret;
28
case CSS_INTERNAL:
29
@@ -XXX,XX +XXX,XX @@ static uint16_t read_tcnt(RTMRState *tmr, unsigned size, int ch)
30
case CSS_EXTERNAL: /* QEMU doesn't implement this */
31
tcnt[0] = tmr->tcnt[0];
32
break;
33
+ default:
34
+ g_assert_not_reached();
35
}
36
} else {
37
tcnt[0] = tmr->tcnt[0];
38
--
46
--
39
2.20.1
47
2.34.1
40
48
41
49
diff view generated by jsdifflib
1
Currently we give all the v7-and-up CPUs a PMU with 4 counters. This
1
Add comments to the in_* fields in the S1Translate struct
2
means that we don't provide the 6 counters that are required by the
2
that explain what they're doing.
3
Arm BSA (Base System Architecture) specification if the CPU supports
4
the Virtualization extensions.
5
6
Instead of having a single PMCR_NUM_COUNTERS, make each CPU type
7
specify the PMCR reset value (obtained from the appropriate TRM), and
8
use the 'N' field of that value to define the number of counters
9
provided.
10
11
This means that we now supply 6 counters for Cortex-A53, A57, A72,
12
A15 and A9 as well as '-cpu max'; Cortex-A7 and A8 stay at 4; and
13
Cortex-R5 goes down to 3.
14
15
Note that because we now use the PMCR reset value of the specific
16
implementation, we no longer set the LC bit out of reset. This has
17
an UNKNOWN value out of reset for all cores with any AArch32 support,
18
so guest software should be setting it anyway if it wants it.
19
3
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Tested-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
22
Message-id: 20210311165947.27470-1-peter.maydell@linaro.org
23
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 20230710152130.3928330-2-peter.maydell@linaro.org
24
---
7
---
25
target/arm/cpu.h | 1 +
8
target/arm/ptw.c | 40 ++++++++++++++++++++++++++++++++++++++++
26
target/arm/cpu64.c | 3 +++
9
1 file changed, 40 insertions(+)
27
target/arm/cpu_tcg.c | 5 +++++
28
target/arm/helper.c | 29 +++++++++++++++++------------
29
target/arm/kvm64.c | 2 ++
30
5 files changed, 28 insertions(+), 12 deletions(-)
31
10
32
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
11
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
33
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
34
--- a/target/arm/cpu.h
13
--- a/target/arm/ptw.c
35
+++ b/target/arm/cpu.h
14
+++ b/target/arm/ptw.c
36
@@ -XXX,XX +XXX,XX @@ struct ARMCPU {
37
uint64_t id_aa64mmfr2;
38
uint64_t id_aa64dfr0;
39
uint64_t id_aa64dfr1;
40
+ uint64_t reset_pmcr_el0;
41
} isar;
42
uint64_t midr;
43
uint32_t revidr;
44
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu64.c
47
+++ b/target/arm/cpu64.c
48
@@ -XXX,XX +XXX,XX @@ static void aarch64_a57_initfn(Object *obj)
49
cpu->gic_num_lrs = 4;
50
cpu->gic_vpribits = 5;
51
cpu->gic_vprebits = 5;
52
+ cpu->isar.reset_pmcr_el0 = 0x41013000;
53
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
54
}
55
56
@@ -XXX,XX +XXX,XX @@ static void aarch64_a53_initfn(Object *obj)
57
cpu->gic_num_lrs = 4;
58
cpu->gic_vpribits = 5;
59
cpu->gic_vprebits = 5;
60
+ cpu->isar.reset_pmcr_el0 = 0x41033000;
61
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static void aarch64_a72_initfn(Object *obj)
65
cpu->gic_num_lrs = 4;
66
cpu->gic_vpribits = 5;
67
cpu->gic_vprebits = 5;
68
+ cpu->isar.reset_pmcr_el0 = 0x41023000;
69
define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo);
70
}
71
72
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/arm/cpu_tcg.c
75
+++ b/target/arm/cpu_tcg.c
76
@@ -XXX,XX +XXX,XX @@ static void cortex_a8_initfn(Object *obj)
77
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
78
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
79
cpu->reset_auxcr = 2;
80
+ cpu->isar.reset_pmcr_el0 = 0x41002000;
81
define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
82
}
83
84
@@ -XXX,XX +XXX,XX @@ static void cortex_a9_initfn(Object *obj)
85
cpu->clidr = (1 << 27) | (1 << 24) | 3;
86
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
87
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
88
+ cpu->isar.reset_pmcr_el0 = 0x41093000;
89
define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
90
}
91
92
@@ -XXX,XX +XXX,XX @@ static void cortex_a7_initfn(Object *obj)
93
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
94
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
95
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
96
+ cpu->isar.reset_pmcr_el0 = 0x41072000;
97
define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
98
}
99
100
@@ -XXX,XX +XXX,XX @@ static void cortex_a15_initfn(Object *obj)
101
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
102
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
103
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
104
+ cpu->isar.reset_pmcr_el0 = 0x410F3000;
105
define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
106
}
107
108
@@ -XXX,XX +XXX,XX @@ static void cortex_r5_initfn(Object *obj)
109
cpu->isar.id_isar6 = 0x0;
110
cpu->mp_is_up = true;
111
cpu->pmsav7_dregion = 16;
112
+ cpu->isar.reset_pmcr_el0 = 0x41151800;
113
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
114
}
115
116
diff --git a/target/arm/helper.c b/target/arm/helper.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/arm/helper.c
119
+++ b/target/arm/helper.c
120
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@
121
#endif
16
#endif
122
17
123
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
18
typedef struct S1Translate {
124
-#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
19
+ /*
125
20
+ * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
126
#ifndef CONFIG_USER_ONLY
21
+ * Together with in_space, specifies the architectural translation regime.
127
22
+ */
128
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
23
ARMMMUIdx in_mmu_idx;
129
24
+ /*
130
static inline uint32_t pmu_num_counters(CPUARMState *env)
25
+ * in_ptw_idx: specifies which mmuidx to use for the actual
131
{
26
+ * page table descriptor load operations. This will be one of the
132
- return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
27
+ * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
133
+ ARMCPU *cpu = env_archcpu(env);
28
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
134
+
29
+ * this field is updated accordingly.
135
+ return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
30
+ */
136
}
31
ARMMMUIdx in_ptw_idx;
137
32
+ /*
138
/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
33
+ * in_space: the security space for this walk. This plus
139
@@ -XXX,XX +XXX,XX @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
34
+ * the in_mmu_idx specify the architectural translation regime.
140
.resetvalue = 0,
35
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
141
.writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
36
+ * this field is updated accordingly.
142
#endif
37
+ *
143
- /* The only field of MDCR_EL2 that has a defined architectural reset value
38
+ * Note that the security space for the in_ptw_idx may be different
144
- * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
39
+ * from that for the in_mmu_idx. We do not need to explicitly track
145
- */
40
+ * the in_ptw_idx security space because:
146
- { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
41
+ * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
147
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
42
+ * itself specifies the security space
148
- .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
43
+ * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
149
- .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
44
+ * space used for ptw reads is the same as that of the security
150
{ .name = "HPFAR", .state = ARM_CP_STATE_AA32,
45
+ * space of the stage 1 translation for all cases except where
151
.cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
46
+ * stage 1 is Secure; in that case the only possibilities for
152
.access = PL2_RW, .accessfn = access_el3_aa32ns,
47
+ * the ptw read are Secure and NonSecure, and the in_ptw_idx
153
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
48
+ * value being Stage2 vs Stage2_S distinguishes those.
154
* field as main ID register, and we implement four counters in
49
+ */
155
* addition to the cycle count register.
50
ARMSecuritySpace in_space;
156
*/
51
+ /*
157
- unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
52
+ * in_secure: whether the translation regime is a Secure one.
158
+ unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
53
+ * This is always equal to arm_space_is_secure(in_space).
159
ARMCPRegInfo pmcr = {
54
+ * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
160
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
55
+ * this field is updated accordingly.
161
.access = PL0_RW,
56
+ */
162
@@ -XXX,XX +XXX,XX @@ static void define_pmu_regs(ARMCPU *cpu)
57
bool in_secure;
163
.access = PL0_RW, .accessfn = pmreg_access,
58
+ /*
164
.type = ARM_CP_IO,
59
+ * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
165
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
60
+ * accesses will not update the guest page table access flags
166
- .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
61
+ * and will not change the state of the softmmu TLBs.
167
- PMCRLC,
62
+ */
168
+ .resetvalue = cpu->isar.reset_pmcr_el0,
63
bool in_debug;
169
.writefn = pmcr_write, .raw_writefn = raw_write,
64
/*
170
};
65
* If this is stage 2 of a stage 1+2 page table walk, then this must
171
+
172
define_one_arm_cp_reg(cpu, &pmcr);
173
define_one_arm_cp_reg(cpu, &pmcr64);
174
for (i = 0; i < pmcrn; i++) {
175
@@ -XXX,XX +XXX,XX @@ void register_cp_regs_for_features(ARMCPU *cpu)
176
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
177
REGINFO_SENTINEL
178
};
179
+ /*
180
+ * The only field of MDCR_EL2 that has a defined architectural reset
181
+ * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
182
+ */
183
+ ARMCPRegInfo mdcr_el2 = {
184
+ .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
185
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
186
+ .access = PL2_RW, .resetvalue = pmu_num_counters(env),
187
+ .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
188
+ };
189
+ define_one_arm_cp_reg(cpu, &mdcr_el2);
190
define_arm_cp_regs(cpu, vpidr_regs);
191
define_arm_cp_regs(cpu, el2_cp_reginfo);
192
if (arm_feature(env, ARM_FEATURE_V8)) {
193
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/target/arm/kvm64.c
196
+++ b/target/arm/kvm64.c
197
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
198
ARM64_SYS_REG(3, 0, 0, 7, 1));
199
err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
200
ARM64_SYS_REG(3, 0, 0, 7, 2));
201
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
202
+ ARM64_SYS_REG(3, 3, 9, 12, 0));
203
204
/*
205
* Note that if AArch32 support is not present in the host,
206
--
66
--
207
2.20.1
67
2.34.1
208
209
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
In commit fe4a5472ccd6 we rearranged the logic in S1_ptw_translate()
2
so that the debug-access "call get_phys_addr_*" codepath is used both
3
when S1 is doing ptw reads from stage 2 and when it is doing ptw
4
reads from physical memory. However, we didn't update the
5
calculation of s2ptw->in_space and s2ptw->in_secure to account for
6
the "ptw reads from physical memory" case. This meant that debug
7
accesses when in Secure state broke.
2
8
3
When building with --enable-sanitizers we get:
9
Create a new function S2_security_space() which returns the
10
correct security space to use for the ptw load, and use it to
11
determine the correct .in_secure and .in_space fields for the
12
stage 2 lookup for the ptw load.
4
13
5
Direct leak of 16 byte(s) in 1 object(s) allocated from:
14
Reported-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
6
#0 0x5618479ec7cf in malloc (qemu-system-aarch64+0x233b7cf)
15
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
7
#1 0x7f675745f958 in g_malloc (/lib64/libglib-2.0.so.0+0x58958)
16
Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
8
#2 0x561847c2dcc9 in xlnx_dp_init hw/display/xlnx_dp.c:1259:5
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
#3 0x56184a5bdab8 in object_init_with_type qom/object.c:375:9
18
Message-id: 20230710152130.3928330-3-peter.maydell@linaro.org
10
#4 0x56184a5a2bda in object_initialize_with_type qom/object.c:517:5
19
Fixes: fe4a5472ccd6 ("target/arm: Use get_phys_addr_with_struct in S1_ptw_translate")
11
#5 0x56184a5a24d5 in object_initialize qom/object.c:536:5
12
#6 0x56184a5a2f6c in object_initialize_child_with_propsv qom/object.c:566:5
13
#7 0x56184a5a2e60 in object_initialize_child_with_props qom/object.c:549:10
14
#8 0x56184a5a3a1e in object_initialize_child_internal qom/object.c:603:5
15
#9 0x5618495aa431 in xlnx_zynqmp_init hw/arm/xlnx-zynqmp.c:273:5
16
17
The RX/TX FIFOs are created in xlnx_dp_init(), add xlnx_dp_finalize()
18
to destroy them.
19
20
Fixes: 58ac482a66d ("introduce xlnx-dp")
21
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
22
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Message-id: 20210323182958.277654-1-f4bug@amsat.org
24
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
25
---
21
---
26
hw/display/xlnx_dp.c | 9 +++++++++
22
target/arm/ptw.c | 37 ++++++++++++++++++++++++++++++++-----
27
1 file changed, 9 insertions(+)
23
1 file changed, 32 insertions(+), 5 deletions(-)
28
24
29
diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c
25
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
30
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/display/xlnx_dp.c
27
--- a/target/arm/ptw.c
32
+++ b/hw/display/xlnx_dp.c
28
+++ b/target/arm/ptw.c
33
@@ -XXX,XX +XXX,XX @@ static void xlnx_dp_init(Object *obj)
29
@@ -XXX,XX +XXX,XX @@ static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
34
fifo8_create(&s->tx_fifo, 16);
30
}
35
}
31
}
36
32
37
+static void xlnx_dp_finalize(Object *obj)
33
+static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
34
+ ARMMMUIdx s2_mmu_idx)
38
+{
35
+{
39
+ XlnxDPState *s = XLNX_DP(obj);
36
+ /*
40
+
37
+ * Return the security space to use for stage 2 when doing
41
+ fifo8_destroy(&s->tx_fifo);
38
+ * the S1 page table descriptor load.
42
+ fifo8_destroy(&s->rx_fifo);
39
+ */
40
+ if (regime_is_stage2(s2_mmu_idx)) {
41
+ /*
42
+ * The security space for ptw reads is almost always the same
43
+ * as that of the security space of the stage 1 translation.
44
+ * The only exception is when stage 1 is Secure; in that case
45
+ * the ptw read might be to the Secure or the NonSecure space
46
+ * (but never Realm or Root), and the s2_mmu_idx tells us which.
47
+ * Root translations are always single-stage.
48
+ */
49
+ if (s1_space == ARMSS_Secure) {
50
+ return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
51
+ } else {
52
+ assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
53
+ assert(s1_space != ARMSS_Root);
54
+ return s1_space;
55
+ }
56
+ } else {
57
+ /* ptw loads are from phys: the mmu idx itself says which space */
58
+ return arm_phys_to_space(s2_mmu_idx);
59
+ }
43
+}
60
+}
44
+
61
+
45
static void xlnx_dp_realize(DeviceState *dev, Error **errp)
62
/* Translate a S1 pagetable walk through S2 if needed. */
63
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
64
hwaddr addr, ARMMMUFaultInfo *fi)
46
{
65
{
47
XlnxDPState *s = XLNX_DP(dev);
66
- ARMSecuritySpace space = ptw->in_space;
48
@@ -XXX,XX +XXX,XX @@ static const TypeInfo xlnx_dp_info = {
67
bool is_secure = ptw->in_secure;
49
.parent = TYPE_SYS_BUS_DEVICE,
68
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
50
.instance_size = sizeof(XlnxDPState),
69
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
51
.instance_init = xlnx_dp_init,
70
@@ -XXX,XX +XXX,XX @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
52
+ .instance_finalize = xlnx_dp_finalize,
71
* From gdbstub, do not use softmmu so that we don't modify the
53
.class_init = xlnx_dp_class_init,
72
* state of the cpu at all, including softmmu tlb contents.
54
};
73
*/
55
74
+ ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
75
S1Translate s2ptw = {
76
.in_mmu_idx = s2_mmu_idx,
77
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
78
- .in_secure = s2_mmu_idx == ARMMMUIdx_Stage2_S,
79
- .in_space = (s2_mmu_idx == ARMMMUIdx_Stage2_S ? ARMSS_Secure
80
- : space == ARMSS_Realm ? ARMSS_Realm
81
- : ARMSS_NonSecure),
82
+ .in_secure = arm_space_is_secure(s2_space),
83
+ .in_space = s2_space,
84
.in_debug = true,
85
};
86
GetPhysAddrResult s2 = { };
56
--
87
--
57
2.20.1
88
2.34.1
58
59
diff view generated by jsdifflib
New patch
1
In get_phys_addr_twostage() the code that applies the effects of
2
VSTCR.{SA,SW} and VTCR.{NSA,NSW} only updates result->f.attrs.secure.
3
Now we also have f.attrs.space for FEAT_RME, we need to keep the two
4
in sync.
1
5
6
These bits only have an effect for Secure space translations, not
7
for Root, so use the input in_space field to determine whether to
8
apply them rather than the input is_secure. This doesn't actually
9
make a difference because Root translations are never two-stage,
10
but it's a little clearer.
11
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-id: 20230710152130.3928330-4-peter.maydell@linaro.org
15
---
16
target/arm/ptw.c | 13 ++++++++-----
17
1 file changed, 8 insertions(+), 5 deletions(-)
18
19
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/arm/ptw.c
22
+++ b/target/arm/ptw.c
23
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
24
hwaddr ipa;
25
int s1_prot, s1_lgpgsz;
26
bool is_secure = ptw->in_secure;
27
+ ARMSecuritySpace in_space = ptw->in_space;
28
bool ret, ipa_secure;
29
ARMCacheAttrs cacheattrs1;
30
ARMSecuritySpace ipa_space;
31
@@ -XXX,XX +XXX,XX @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
32
* Check if IPA translates to secure or non-secure PA space.
33
* Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
34
*/
35
- result->f.attrs.secure =
36
- (is_secure
37
- && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
38
- && (ipa_secure
39
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
40
+ if (in_space == ARMSS_Secure) {
41
+ result->f.attrs.secure =
42
+ !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
43
+ && (ipa_secure
44
+ || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
45
+ result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
46
+ }
47
48
return false;
49
}
50
--
51
2.34.1
diff view generated by jsdifflib
1
From: Zenghui Yu <yuzenghui@huawei.com>
1
In commit f0a08b0913befbd we changed the type of the PC from
2
target_ulong to vaddr. In doing so we inadvertently dropped the
3
zero-padding on the PC in trace lines (the second item inside the []
4
in these lines). They used to look like this on AArch64, for
5
instance:
2
6
3
They were introduced in commit 9bde7f0674fe ("hw/arm/smmuv3: Implement
7
Trace 0: 0x7f2260000100 [00000000/0000000040000000/00000061/ff200000]
4
translate callback") but never actually used. Drop them.
5
8
6
Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
9
and now they look like this:
7
Acked-by: Eric Auger <eric.auger@redhat.com>
10
Trace 0: 0x7f4f50000100 [00000000/40000000/00000061/ff200000]
8
Message-id: 20210325142702.790-1-yuzenghui@huawei.com
11
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
12
and if the PC happens to be somewhere low like 0x5000
13
then the field is shown as /5000/.
14
15
This is because TARGET_FMT_lx is a "%08x" or "%016x" specifier,
16
depending on TARGET_LONG_SIZE, whereas VADDR_PRIx is just PRIx64
17
with no width specifier.
18
19
Restore the zero-padding by adding an 016 width specifier to
20
this tracing and a couple of others that were similarly recently
21
changed to use VADDR_PRIx without a width specifier.
22
23
We can't unfortunately restore the "32-bit guests are padded to
24
8 hex digits and 64-bit guests to 16 hex digits" behaviour so
25
easily.
26
27
Fixes: f0a08b0913befbd ("accel/tcg/cpu-exec.c: Widen pc to vaddr")
10
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
28
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
29
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
30
Reviewed-by: Anton Johansson <anjo@rev.ng>
31
Message-id: 20230711165434.4123674-1-peter.maydell@linaro.org
11
---
32
---
12
hw/arm/smmuv3-internal.h | 7 -------
33
accel/tcg/cpu-exec.c | 4 ++--
13
1 file changed, 7 deletions(-)
34
accel/tcg/translate-all.c | 2 +-
35
2 files changed, 3 insertions(+), 3 deletions(-)
14
36
15
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
37
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
16
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/arm/smmuv3-internal.h
39
--- a/accel/tcg/cpu-exec.c
18
+++ b/hw/arm/smmuv3-internal.h
40
+++ b/accel/tcg/cpu-exec.c
19
@@ -XXX,XX +XXX,XX @@ static inline int pa_range(STE *ste)
41
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
20
#define CD_A(x) extract32((x)->word[1], 14, 1)
42
if (qemu_log_in_addr_range(pc)) {
21
#define CD_AARCH64(x) extract32((x)->word[1], 9 , 1)
43
qemu_log_mask(CPU_LOG_EXEC,
22
44
"Trace %d: %p [%08" PRIx64
23
-#define CDM_VALID(x) ((x)->word[0] & 0x1)
45
- "/%" VADDR_PRIx "/%08x/%08x] %s\n",
24
-
46
+ "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
25
-static inline int is_cd_valid(SMMUv3State *s, STE *ste, CD *cd)
47
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
26
-{
48
tb->flags, tb->cflags, lookup_symbol(pc));
27
- return CD_VALID(cd);
49
28
-}
50
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
29
-
51
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
30
/**
52
vaddr pc = log_pc(cpu, last_tb);
31
* tg2granule - Decodes the CD translation granule size field according
53
if (qemu_log_in_addr_range(pc)) {
32
* to the ttbr in use
54
- qemu_log("Stopped execution of TB chain before %p [%"
55
+ qemu_log("Stopped execution of TB chain before %p [%016"
56
VADDR_PRIx "] %s\n",
57
last_tb->tc.ptr, pc, lookup_symbol(pc));
58
}
59
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/accel/tcg/translate-all.c
62
+++ b/accel/tcg/translate-all.c
63
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
64
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
65
vaddr pc = log_pc(cpu, tb);
66
if (qemu_log_in_addr_range(pc)) {
67
- qemu_log("cpu_io_recompile: rewound execution of TB to %"
68
+ qemu_log("cpu_io_recompile: rewound execution of TB to %016"
69
VADDR_PRIx "\n", pc);
70
}
71
}
33
--
72
--
34
2.20.1
73
2.34.1
35
74
36
75
diff view generated by jsdifflib
1
From: Doug Evans <dje@google.com>
1
From: Tong Ho <tong.ho@amd.com>
2
2
3
Turning REG_MCMDR_RXON is enough to start receiving packets.
3
Add a check in the bit-set operation to write the backstore
4
only if the affected bit is 0 before.
4
5
5
Signed-off-by: Doug Evans <dje@google.com>
6
With this in place, there will be no need for callers to
6
Message-id: 20210319195044.741821-1-dje@google.com
7
do the checking in order to avoid unnecessary writes.
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
9
Signed-off-by: Tong Ho <tong.ho@amd.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Reviewed-by: Francisco Iglesias <frasse.iglesias@gmail.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
---
14
---
10
hw/net/npcm7xx_emc.c | 4 +++-
15
hw/nvram/xlnx-efuse.c | 11 +++++++++--
11
tests/qtest/npcm7xx_emc-test.c | 30 +++++++++++++++++++++---------
16
1 file changed, 9 insertions(+), 2 deletions(-)
12
2 files changed, 24 insertions(+), 10 deletions(-)
13
17
14
diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c
18
diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/net/npcm7xx_emc.c
20
--- a/hw/nvram/xlnx-efuse.c
17
+++ b/hw/net/npcm7xx_emc.c
21
+++ b/hw/nvram/xlnx-efuse.c
18
@@ -XXX,XX +XXX,XX @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset,
22
@@ -XXX,XX +XXX,XX @@ static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k)
19
!(value & REG_MCMDR_RXON)) {
23
20
emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA;
24
bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
21
}
25
{
22
- if (!(value & REG_MCMDR_RXON)) {
26
+ uint32_t set, *row;
23
+ if (value & REG_MCMDR_RXON) {
27
+
24
+ emc->rx_active = true;
28
if (efuse_ro_bits_find(s, bit)) {
25
+ } else {
29
g_autofree char *path = object_get_canonical_path(OBJECT(s));
26
emc_halt_rx(emc, 0);
30
27
}
31
@@ -XXX,XX +XXX,XX @@ bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit)
28
break;
32
return false;
29
diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tests/qtest/npcm7xx_emc-test.c
32
+++ b/tests/qtest/npcm7xx_emc-test.c
33
@@ -XXX,XX +XXX,XX @@ static void enable_tx(QTestState *qts, const EMCModule *mod,
34
mcmdr |= REG_MCMDR_TXON;
35
emc_write(qts, mod, REG_MCMDR, mcmdr);
36
}
33
}
37
-
34
38
- /* Prod the device to send the packet. */
35
- s->fuse32[bit / 32] |= 1 << (bit % 32);
39
- emc_write(qts, mod, REG_TSDR, 1);
36
- efuse_bdrv_sync(s, bit);
37
+ /* Avoid back-end write unless there is a real update */
38
+ row = &s->fuse32[bit / 32];
39
+ set = 1 << (bit % 32);
40
+ if (!(set & *row)) {
41
+ *row |= set;
42
+ efuse_bdrv_sync(s, bit);
43
+ }
44
return true;
40
}
45
}
41
46
42
static void emc_send_verify1(QTestState *qts, const EMCModule *mod, int fd,
43
@@ -XXX,XX +XXX,XX @@ static void emc_send_verify(QTestState *qts, const EMCModule *mod, int fd,
44
enable_tx(qts, mod, &desc[0], NUM_TX_DESCRIPTORS, desc_addr,
45
with_irq ? REG_MIEN_ENTXINTR : 0);
46
47
+ /* Prod the device to send the packet. */
48
+ emc_write(qts, mod, REG_TSDR, 1);
49
+
50
/*
51
* It's problematic to observe the interrupt for each packet.
52
* Instead just wait until all the packets go out.
53
@@ -XXX,XX +XXX,XX @@ static void enable_rx(QTestState *qts, const EMCModule *mod,
54
mcmdr |= REG_MCMDR_RXON | mcmdr_flags;
55
emc_write(qts, mod, REG_MCMDR, mcmdr);
56
}
57
-
58
- /* Prod the device to accept a packet. */
59
- emc_write(qts, mod, REG_RSDR, 1);
60
}
61
62
static void emc_recv_verify(QTestState *qts, const EMCModule *mod, int fd,
63
- bool with_irq)
64
+ bool with_irq, bool pump_rsdr)
65
{
66
NPCM7xxEMCRxDesc desc[NUM_RX_DESCRIPTORS];
67
uint32_t desc_addr = DESC_ADDR;
68
@@ -XXX,XX +XXX,XX @@ static void emc_recv_verify(QTestState *qts, const EMCModule *mod, int fd,
69
enable_rx(qts, mod, &desc[0], NUM_RX_DESCRIPTORS, desc_addr,
70
with_irq ? REG_MIEN_ENRXINTR : 0, 0);
71
72
+ /*
73
+ * If requested, prod the device to accept a packet.
74
+ * This isn't necessary, the linux driver doesn't do this.
75
+ * Test doing/not-doing this for robustness.
76
+ */
77
+ if (pump_rsdr) {
78
+ emc_write(qts, mod, REG_RSDR, 1);
79
+ }
80
+
81
/* Send test packet to device's socket. */
82
ret = iov_send(fd, iov, 2, 0, sizeof(len) + sizeof(test));
83
g_assert_cmpint(ret, == , sizeof(test) + sizeof(len));
84
@@ -XXX,XX +XXX,XX @@ static void test_rx(gconstpointer test_data)
85
86
qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
87
88
- emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/false);
89
- emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/true);
90
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/false,
91
+ /*pump_rsdr=*/false);
92
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/false,
93
+ /*pump_rsdr=*/true);
94
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/true,
95
+ /*pump_rsdr=*/false);
96
+ emc_recv_verify(qts, td->module, test_sockets[0], /*with_irq=*/true,
97
+ /*pump_rsdr=*/true);
98
emc_test_ptle(qts, td->module, test_sockets[0]);
99
100
qtest_quit(qts);
101
--
47
--
102
2.20.1
48
2.34.1
103
49
104
50
diff view generated by jsdifflib