target/arm/ptw.c | 65 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 17 deletions(-)
It was reported that QEMU monitor command gva2gpa was reporting unmapped
memory for a valid access (qemu-system-aarch64), during a copy from
kernel to user space (__arch_copy_to_user symbol in Linux) [1].
This was affecting cpu_memory_rw_debug also, which
is used in numerous places in our codebase. After investigating, the
problem was specific to arm_cpu_get_phys_page_attrs_debug.
[1] https://lists.nongnu.org/archive/html/qemu-discuss/2025-04/msg00013.html
When performing user access from a privileged space, we need to do a
second lookup for user mmu idx, following what get_a64_user_mem_index is
doing at translation time.
This series first extract some functions, and then perform the second lookup
expected using extracted functions.
Besides running all QEMU tests, it was explicitely checked that during a linux
boot sequence, accesses now report a valid physical address inconditionnally
using this (non sent) patch:
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -997,9 +997,7 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
if (enable) {
address |= flags & TLB_FLAGS_MASK;
flags &= TLB_SLOW_FLAGS_MASK;
- if (flags) {
address |= TLB_FORCE_SLOW;
- }
} else {
address = -1;
flags = 0;
@@ -1658,6 +1656,10 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
+ vaddr page = addr & TARGET_PAGE_MASK;
+ hwaddr physaddr = cpu_get_phys_page_debug(cpu, page);
+ g_assert(physaddr != -1);
+
full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
v2:
- fix style in first commit (philmd)
Pierrick Bouvier (4):
target/arm/ptw: extract arm_mmu_idx_to_security_space
target/arm/ptw: get current security_space for current mmu_idx
target/arm/ptw: extract arm_cpu_get_phys_page
target/arm/ptw: fix arm_cpu_get_phys_page_attrs_debug
target/arm/ptw.c | 65 +++++++++++++++++++++++++++++++++++-------------
1 file changed, 48 insertions(+), 17 deletions(-)
--
2.39.5
On 4/14/25 8:30 AM, Pierrick Bouvier wrote: > It was reported that QEMU monitor command gva2gpa was reporting unmapped > memory for a valid access (qemu-system-aarch64), during a copy from > kernel to user space (__arch_copy_to_user symbol in Linux) [1]. > This was affecting cpu_memory_rw_debug also, which > is used in numerous places in our codebase. After investigating, the > problem was specific to arm_cpu_get_phys_page_attrs_debug. > > [1] https://lists.nongnu.org/archive/html/qemu-discuss/2025-04/msg00013.html > > When performing user access from a privileged space, we need to do a > second lookup for user mmu idx, following what get_a64_user_mem_index is > doing at translation time. > > This series first extract some functions, and then perform the second lookup > expected using extracted functions. > > Besides running all QEMU tests, it was explicitely checked that during a linux > boot sequence, accesses now report a valid physical address inconditionnally > using this (non sent) patch: > > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -997,9 +997,7 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, > if (enable) { > address |= flags & TLB_FLAGS_MASK; > flags &= TLB_SLOW_FLAGS_MASK; > - if (flags) { > address |= TLB_FORCE_SLOW; > - } > } else { > address = -1; > flags = 0; > @@ -1658,6 +1656,10 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop, > tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; > } > > + vaddr page = addr & TARGET_PAGE_MASK; > + hwaddr physaddr = cpu_get_phys_page_debug(cpu, page); > + g_assert(physaddr != -1); > + > full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; > flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); > flags |= full->slow_flags[access_type]; > > v2: > - fix style in first commit (philmd) > > Pierrick Bouvier (4): > target/arm/ptw: extract arm_mmu_idx_to_security_space > target/arm/ptw: get current security_space for current mmu_idx > target/arm/ptw: extract arm_cpu_get_phys_page > target/arm/ptw: fix arm_cpu_get_phys_page_attrs_debug > > target/arm/ptw.c | 65 +++++++++++++++++++++++++++++++++++------------- > 1 file changed, 48 insertions(+), 17 deletions(-) > Gentle ping on this series. Any plan to queue it to tcg-next @Richard? Regards, Pierrick
On Mon, 28 Apr 2025 at 20:34, Pierrick Bouvier <pierrick.bouvier@linaro.org> wrote: > > On 4/14/25 8:30 AM, Pierrick Bouvier wrote: > > It was reported that QEMU monitor command gva2gpa was reporting unmapped > > memory for a valid access (qemu-system-aarch64), during a copy from > > kernel to user space (__arch_copy_to_user symbol in Linux) [1]. > > This was affecting cpu_memory_rw_debug also, which > > is used in numerous places in our codebase. After investigating, the > > problem was specific to arm_cpu_get_phys_page_attrs_debug. > > > > [1] https://lists.nongnu.org/archive/html/qemu-discuss/2025-04/msg00013.html > > > > When performing user access from a privileged space, we need to do a > > second lookup for user mmu idx, following what get_a64_user_mem_index is > > doing at translation time. > > > > This series first extract some functions, and then perform the second lookup > > expected using extracted functions. > > > > Besides running all QEMU tests, it was explicitely checked that during a linux > > boot sequence, accesses now report a valid physical address inconditionnally > > using this (non sent) patch: > > > > --- a/accel/tcg/cputlb.c > > +++ b/accel/tcg/cputlb.c > > @@ -997,9 +997,7 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, > > if (enable) { > > address |= flags & TLB_FLAGS_MASK; > > flags &= TLB_SLOW_FLAGS_MASK; > > - if (flags) { > > address |= TLB_FORCE_SLOW; > > - } > > } else { > > address = -1; > > flags = 0; > > @@ -1658,6 +1656,10 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop, > > tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; > > } > > > > + vaddr page = addr & TARGET_PAGE_MASK; > > + hwaddr physaddr = cpu_get_phys_page_debug(cpu, page); > > + g_assert(physaddr != -1); > > + > > full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; > > flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); > > flags |= full->slow_flags[access_type]; > > > > v2: > > - fix style in first commit (philmd) > > > > Pierrick Bouvier (4): > > target/arm/ptw: extract arm_mmu_idx_to_security_space > > target/arm/ptw: get current security_space for current mmu_idx > > target/arm/ptw: extract arm_cpu_get_phys_page > > target/arm/ptw: fix arm_cpu_get_phys_page_attrs_debug > > > > target/arm/ptw.c | 65 +++++++++++++++++++++++++++++++++++------------- > > 1 file changed, 48 insertions(+), 17 deletions(-) > > > > Gentle ping on this series. > Any plan to queue it to tcg-next @Richard? I've queued this series to target-arm.next; thanks. -- PMM
On 5/2/25 6:05 AM, Peter Maydell wrote: > > I've queued this series to target-arm.next; thanks. > > -- PMM Thank you Peter :)
We'll reuse this function later.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/arm/ptw.c | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 8d4e9e07a94..fdc575ec8c7 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -3550,13 +3550,9 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
memop, result, fi);
}
-bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+static ARMSecuritySpace
+arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- S1Translate ptw = {
- .in_mmu_idx = mmu_idx,
- };
ARMSecuritySpace ss;
switch (mmu_idx) {
@@ -3617,7 +3613,18 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
g_assert_not_reached();
}
- ptw.in_space = ss;
+ return ss;
+}
+
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+{
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
+ };
+
return get_phys_addr_gpc(env, &ptw, address, access_type,
memop, result, fi);
}
--
2.39.5
It should be equivalent to previous code.
Allow to call common function to get a page address later.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/arm/ptw.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index fdc575ec8c7..c3e4390175e 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -3635,7 +3635,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_security_space(env);
+ ARMSecuritySpace ss = arm_mmu_idx_to_security_space(env, mmu_idx);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_space = ss,
--
2.39.5
Allow to call that function easily several times in next commit.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/arm/ptw.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index c3e4390175e..bf92c165175 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -3629,23 +3629,17 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
memop, result, fi);
}
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
+static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr,
+ MemTxAttrs *attrs, ARMMMUIdx mmu_idx)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_mmu_idx_to_security_space(env, mmu_idx);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_space = ss,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
- bool ret;
-
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
+ bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
@@ -3653,3 +3647,13 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
}
return res.f.phys_addr;
}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+
+ return arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+}
--
2.39.5
It was reported that QEMU monitor command gva2gpa was reporting unmapped
memory for a valid access (qemu-system-aarch64), during a copy from
kernel to user space (__arch_copy_to_user symbol in Linux) [1].
This was affecting cpu_memory_rw_debug also, which
is used in numerous places in our codebase. After investigating, the
problem was specific to arm_cpu_get_phys_page_attrs_debug.
When performing user access from a privileged space, we need to do a
second lookup for user mmu idx, following what get_a64_user_mem_index is
doing at translation time.
[1] https://lists.nongnu.org/archive/html/qemu-discuss/2025-04/msg00013.html
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/arm/ptw.c | 22 +++++++++++++++++++++-
1 file changed, 21 insertions(+), 1 deletion(-)
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index bf92c165175..cdcb6a49fa5 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -3655,5 +3655,25 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
CPUARMState *env = &cpu->env;
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- return arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+ hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+
+ if (res != -1) {
+ return res;
+ }
+
+ /*
+ * Memory may be accessible for an "unprivileged load/store" variant.
+ * In this case, get_a64_user_mem_index function generates an op using an
+ * unprivileged mmu idx, so we need to try with it.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0);
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0);
+ default:
+ return -1;
+ }
}
--
2.39.5
© 2016 - 2025 Red Hat, Inc.