On Tue, Sep 17, 2024 at 01:14:18PM +0200, Antonino Maniscalco wrote:
> Add trace points corresponding to preemption being triggered and being
> completed for latency measurement purposes.
>
> Signed-off-by: Antonino Maniscalco <antomani103@gmail.com>
> Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8650-QRD
Reviewed-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
-Akhil
> ---
> drivers/gpu/drm/msm/adreno/a6xx_preempt.c | 6 ++++++
> drivers/gpu/drm/msm/msm_gpu_trace.h | 28 ++++++++++++++++++++++++++++
> 2 files changed, 34 insertions(+)
>
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
> index 77c4d5e91854..4fbc66d6860a 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
> @@ -7,6 +7,7 @@
> #include "a6xx_gpu.h"
> #include "a6xx_gmu.xml.h"
> #include "msm_mmu.h"
> +#include "msm_gpu_trace.h"
>
> /*
> * Try to transition the preemption state from old to new. Return
> @@ -174,6 +175,8 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
>
> set_preempt_state(a6xx_gpu, PREEMPT_NONE);
>
> + trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
> +
> /*
> * Retrigger preemption to avoid a deadlock that might occur when preemption
> * is skipped due to it being already in flight when requested.
> @@ -295,6 +298,9 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
> */
> ring->restore_wptr = false;
>
> + trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id,
> + ring ? ring->id : -1);
> +
> spin_unlock_irqrestore(&ring->preempt_lock, flags);
>
> gpu_write64(gpu,
> diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
> index ac40d857bc45..7f863282db0d 100644
> --- a/drivers/gpu/drm/msm/msm_gpu_trace.h
> +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
> @@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume,
> TP_printk("%u", __entry->dummy)
> );
>
> +TRACE_EVENT(msm_gpu_preemption_trigger,
> + TP_PROTO(int ring_id_from, int ring_id_to),
> + TP_ARGS(ring_id_from, ring_id_to),
> + TP_STRUCT__entry(
> + __field(int, ring_id_from)
> + __field(int, ring_id_to)
> + ),
> + TP_fast_assign(
> + __entry->ring_id_from = ring_id_from;
> + __entry->ring_id_to = ring_id_to;
> + ),
> + TP_printk("preempting %u -> %u",
> + __entry->ring_id_from,
> + __entry->ring_id_to)
> +);
> +
> +TRACE_EVENT(msm_gpu_preemption_irq,
> + TP_PROTO(u32 ring_id),
> + TP_ARGS(ring_id),
> + TP_STRUCT__entry(
> + __field(u32, ring_id)
> + ),
> + TP_fast_assign(
> + __entry->ring_id = ring_id;
> + ),
> + TP_printk("preempted to %u", __entry->ring_id)
> +);
> +
> #endif
>
> #undef TRACE_INCLUDE_PATH
>
> --
> 2.46.0
>