The ((GUEST_PAGING_LEVELS - 2) << 8) expression in the event field is common
to all shadow trace events, so introduce sh_trace() as a very thin wrapper
around trace().
Then, rename trace_shadow_gen() to sh_trace_va() to better describe what it is
doing, and to be more consistent with later cleanup.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: George Dunlap <george.dunlap@citrix.com>
v2:
* New
---
xen/arch/x86/mm/shadow/multi.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index bcd02b2d0037..1775952d7e18 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1974,13 +1974,17 @@ typedef u32 guest_va_t;
typedef u32 guest_pa_t;
#endif
-static inline void trace_shadow_gen(u32 event, guest_va_t va)
+/* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */
+static void sh_trace(uint32_t event, unsigned int extra, const void *extra_data)
+{
+ trace(event | ((GUEST_PAGING_LEVELS - 2) << 8), extra, extra_data);
+}
+
+/* Shadow trace event with the guest's linear address. */
+static void sh_trace_va(uint32_t event, guest_va_t va)
{
if ( tb_init_done )
- {
- event |= (GUEST_PAGING_LEVELS-2)<<8;
- trace(event, sizeof(va), &va);
- }
+ sh_trace(event, sizeof(va), &va);
}
static inline void trace_shadow_fixup(guest_l1e_t gl1e,
@@ -2239,7 +2243,7 @@ static int cf_check sh_page_fault(
sh_reset_early_unshadow(v);
perfc_incr(shadow_fault_fast_gnp);
SHADOW_PRINTK("fast path not-present\n");
- trace_shadow_gen(TRC_SHADOW_FAST_PROPAGATE, va);
+ sh_trace_va(TRC_SHADOW_FAST_PROPAGATE, va);
return 0;
}
#ifdef CONFIG_HVM
@@ -2250,7 +2254,7 @@ static int cf_check sh_page_fault(
perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
sh_reset_early_unshadow(v);
- trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
+ sh_trace_va(TRC_SHADOW_FAST_MMIO, va);
return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
? EXCRET_fault_fixed : 0;
#else
@@ -2265,7 +2269,7 @@ static int cf_check sh_page_fault(
* Retry and let the hardware give us the right fault next time. */
perfc_incr(shadow_fault_fast_fail);
SHADOW_PRINTK("fast path false alarm!\n");
- trace_shadow_gen(TRC_SHADOW_FALSE_FAST_PATH, va);
+ sh_trace_va(TRC_SHADOW_FALSE_FAST_PATH, va);
return EXCRET_fault_fixed;
}
}
@@ -2481,7 +2485,7 @@ static int cf_check sh_page_fault(
#endif
paging_unlock(d);
put_gfn(d, gfn_x(gfn));
- trace_shadow_gen(TRC_SHADOW_DOMF_DYING, va);
+ sh_trace_va(TRC_SHADOW_DOMF_DYING, va);
return 0;
}
@@ -2569,7 +2573,7 @@ static int cf_check sh_page_fault(
put_gfn(d, gfn_x(gfn));
perfc_incr(shadow_fault_mmio);
- trace_shadow_gen(TRC_SHADOW_MMIO, va);
+ sh_trace_va(TRC_SHADOW_MMIO, va);
return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
? EXCRET_fault_fixed : 0;
--
2.30.2
On 22.05.2024 15:17, Andrew Cooper wrote: > --- a/xen/arch/x86/mm/shadow/multi.c > +++ b/xen/arch/x86/mm/shadow/multi.c > @@ -1974,13 +1974,17 @@ typedef u32 guest_va_t; > typedef u32 guest_pa_t; > #endif > > -static inline void trace_shadow_gen(u32 event, guest_va_t va) > +/* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */ > +static void sh_trace(uint32_t event, unsigned int extra, const void *extra_data) > +{ > + trace(event | ((GUEST_PAGING_LEVELS - 2) << 8), extra, extra_data); > +} > + > +/* Shadow trace event with the guest's linear address. */ > +static void sh_trace_va(uint32_t event, guest_va_t va) > { > if ( tb_init_done ) > - { > - event |= (GUEST_PAGING_LEVELS-2)<<8; > - trace(event, sizeof(va), &va); > - } > + sh_trace(event, sizeof(va), &va); > } If any tb_init_done check, then perhaps rather in sh_trace()? With that (and provided you agree) Reviewed-by: Jan Beulich <jbeulich@suse.com> Jan
On 22/05/2024 2:40 pm, Jan Beulich wrote: > On 22.05.2024 15:17, Andrew Cooper wrote: >> --- a/xen/arch/x86/mm/shadow/multi.c >> +++ b/xen/arch/x86/mm/shadow/multi.c >> @@ -1974,13 +1974,17 @@ typedef u32 guest_va_t; >> typedef u32 guest_pa_t; >> #endif >> >> -static inline void trace_shadow_gen(u32 event, guest_va_t va) >> +/* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */ >> +static void sh_trace(uint32_t event, unsigned int extra, const void *extra_data) >> +{ >> + trace(event | ((GUEST_PAGING_LEVELS - 2) << 8), extra, extra_data); >> +} >> + >> +/* Shadow trace event with the guest's linear address. */ >> +static void sh_trace_va(uint32_t event, guest_va_t va) >> { >> if ( tb_init_done ) >> - { >> - event |= (GUEST_PAGING_LEVELS-2)<<8; >> - trace(event, sizeof(va), &va); >> - } >> + sh_trace(event, sizeof(va), &va); >> } > If any tb_init_done check, then perhaps rather in sh_trace()? With that > (and provided you agree) > Reviewed-by: Jan Beulich <jbeulich@suse.com> Sadly not. That leads to double reads of tb_init_done when tracing is compiled in. When GCC can't fully inline the structure initialisation, it can't prove that a function call modified tb_init_done. This is why I arranged all the trace cleanup in this way. ~Andrew
On 22.05.2024 15:47, Andrew Cooper wrote: > On 22/05/2024 2:40 pm, Jan Beulich wrote: >> On 22.05.2024 15:17, Andrew Cooper wrote: >>> --- a/xen/arch/x86/mm/shadow/multi.c >>> +++ b/xen/arch/x86/mm/shadow/multi.c >>> @@ -1974,13 +1974,17 @@ typedef u32 guest_va_t; >>> typedef u32 guest_pa_t; >>> #endif >>> >>> -static inline void trace_shadow_gen(u32 event, guest_va_t va) >>> +/* Shadow trace event with GUEST_PAGING_LEVELS folded into the event field. */ >>> +static void sh_trace(uint32_t event, unsigned int extra, const void *extra_data) >>> +{ >>> + trace(event | ((GUEST_PAGING_LEVELS - 2) << 8), extra, extra_data); >>> +} >>> + >>> +/* Shadow trace event with the guest's linear address. */ >>> +static void sh_trace_va(uint32_t event, guest_va_t va) >>> { >>> if ( tb_init_done ) >>> - { >>> - event |= (GUEST_PAGING_LEVELS-2)<<8; >>> - trace(event, sizeof(va), &va); >>> - } >>> + sh_trace(event, sizeof(va), &va); >>> } >> If any tb_init_done check, then perhaps rather in sh_trace()? With that >> (and provided you agree) >> Reviewed-by: Jan Beulich <jbeulich@suse.com> > > Sadly not. That leads to double reads of tb_init_done when tracing is > compiled in. Not here, but I can see how that could happen in principle, when ... > When GCC can't fully inline the structure initialisation, it can't prove > that a function call modified tb_init_done. This is why I arranged all > the trace cleanup in this way. ... inlining indeed doesn't happen. Patch 2 fits the one here in this regard (no function calls); I have yet to look at patch 3, though. But anyway, the present placement, while likely a little redundant, is not the end of the world, so my R-b holds either way. Jan
© 2016 - 2024 Red Hat, Inc.