Platforms without a LLCC (e.g. milos) still need to be able to read and
write to the cx_mem region. Previously if LLCC slices were unavailable
the cx_mmio mapping was overwritten with ERR_PTR, causing a crash when
the GMU later accessed cx_mem.
Move the cx_mmio mapping out of a6xx_llc_slices_init() into
a6xx_gpu_init() so that cx_mem mapping is independent of LLCC.
Signed-off-by: Alexander Koskovich <akoskovich@pm.me>
---
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 38 ++++++++++++++++-------------------
1 file changed, 17 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 6ce14407007e..eea18ae7c019 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2039,7 +2039,7 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
struct msm_gpu *gpu = &adreno_gpu->base;
u32 cntl1_regval = 0;
- if (IS_ERR(a6xx_gpu->cx_mmio))
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
@@ -2098,7 +2098,7 @@ static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- if (IS_ERR(a6xx_gpu->cx_mmio))
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
@@ -2135,31 +2135,12 @@ static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
static void a6xx_llc_slices_init(struct platform_device *pdev,
struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
{
- struct device_node *phandle;
-
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
return;
- /*
- * There is a different programming path for A6xx targets with an
- * mmu500 attached, so detect if that is the case
- */
- phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
- a6xx_gpu->have_mmu500 = (phandle &&
- of_device_is_compatible(phandle, "arm,mmu-500"));
- of_node_put(phandle);
-
- if (is_a7xx || !a6xx_gpu->have_mmu500)
- a6xx_gpu->cx_mmio = msm_ioremap(pdev, "cx_mem");
- else
- a6xx_gpu->cx_mmio = NULL;
-
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
-
- if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
- a6xx_gpu->cx_mmio = ERR_PTR(-EINVAL);
}
#define GBIF_CLIENT_HALT_MASK BIT(0)
@@ -2621,6 +2602,7 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
const struct adreno_info *info = config->info;
+ struct device_node *phandle;
struct device_node *node;
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
@@ -2656,6 +2638,20 @@ static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
+ /*
+ * There is a different programming path for A6xx targets with an
+ * mmu500 attached, so detect if that is the case
+ */
+ phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
+ a6xx_gpu->have_mmu500 = (phandle &&
+ of_device_is_compatible(phandle, "arm,mmu-500"));
+ of_node_put(phandle);
+
+ if (is_a7xx || !a6xx_gpu->have_mmu500)
+ a6xx_gpu->cx_mmio = msm_ioremap(pdev, "cx_mem");
+ else
+ a6xx_gpu->cx_mmio = NULL;
+
ret = a6xx_set_supported_hw(&pdev->dev, a6xx_gpu, info);
if (ret) {
a6xx_llc_slices_destroy(a6xx_gpu);
--
2.53.0