ACD a.k.a Adaptive Clock Distribution is a feature which helps to reduce
the power consumption. In some chipsets, it is also a requirement to
support higher GPU frequencies. This patch adds support for GPU ACD by
sending necessary data to GMU and AOSS. The feature support for the
chipset is detected based on devicetree data.
Signed-off-by: Akhil P Oommen <quic_akhilpo@quicinc.com>
---
drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 81 ++++++++++++++++++++++++++++-------
drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 1 +
drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 36 ++++++++++++++++
drivers/gpu/drm/msm/adreno/a6xx_hfi.h | 21 +++++++++
4 files changed, 124 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 37927bdd6fbe..09fb3f397dbb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1021,14 +1021,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1476,6 +1468,64 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* We are done here if ACD is not required for any of the OPPs */
+ if (!cmd->enable_by_level)
+ return 0;
+
+ /* Initialize qmp node to talk to AOSS */
+ gmu->qmp = qmp_get(gmu->dev);
+ if (IS_ERR(gmu->qmp)) {
+ cmd->enable_by_level = 0;
+ return dev_err_probe(gmu->dev, PTR_ERR(gmu->qmp), "Failed to initialize qmp\n");
+ }
+
+ /* Notify AOSS about the ACD state */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", 1);
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "failed to send GPU ACD state\n");
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1792,12 +1842,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
- gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
- }
-
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
@@ -1811,6 +1855,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -1821,7 +1869,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
device_link_del(link);
detach_cxpd:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 94b6c5cab6f4..2690511149ed 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -81,6 +81,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
+ struct a6xx_hfi_acd_table acd_table;
int nr_gmu_freqs;
unsigned long gmu_freqs[4];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cdb3f6e74d3e..af94e339188b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -659,6 +659,38 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
NULL, 0);
}
+#define HFI_FEATURE_ACD 12
+
+static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = HFI_FEATURE_ACD,
+ .enable = 1,
+ .data = 0,
+ };
+ int ret;
+
+ if (!acd_table->enable_by_level)
+ return 0;
+
+ /* Enable ACD feature at GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
+ return ret;
+ }
+
+ /* Send ACD table to GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
@@ -756,6 +788,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_acd(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 528110169398..51864c8ad0e6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_ACD 7
+#define MAX_ACD_STRIDE 2
+
+struct a6xx_hfi_acd_table {
+ u32 header;
+ u32 version;
+ u32 enable_by_level;
+ u32 stride;
+ u32 num_levels;
+ u32 data[16 * MAX_ACD_STRIDE];
+};
+
#define HFI_H2F_MSG_START 10
struct a6xx_hfi_msg_start {
u32 header;
};
+#define HFI_H2F_FEATURE_CTRL 11
+
+struct a6xx_hfi_msg_feature_ctrl {
+ u32 header;
+ u32 feature;
+ u32 enable;
+ u32 data;
+};
+
#define HFI_H2F_MSG_CORE_FW_START 14
struct a6xx_hfi_msg_core_fw_start {
--
2.45.2
On 21/10/2024 12:53, Akhil P Oommen wrote: > ACD a.k.a Adaptive Clock Distribution is a feature which helps to reduce > the power consumption. In some chipsets, it is also a requirement to > support higher GPU frequencies. This patch adds support for GPU ACD by > sending necessary data to GMU and AOSS. The feature support for the > chipset is detected based on devicetree data. > > Signed-off-by: Akhil P Oommen <quic_akhilpo@quicinc.com> > --- > drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 81 ++++++++++++++++++++++++++++------- > drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 1 + > drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 36 ++++++++++++++++ > drivers/gpu/drm/msm/adreno/a6xx_hfi.h | 21 +++++++++ > 4 files changed, 124 insertions(+), 15 deletions(-) > > diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c > index 37927bdd6fbe..09fb3f397dbb 100644 > --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c > +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c > @@ -1021,14 +1021,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) > > gmu->hung = false; > > - /* Notify AOSS about the ACD state (unimplemented for now => disable it) */ > - if (!IS_ERR(gmu->qmp)) { > - ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", > - 0 /* Hardcode ACD to be disabled for now */); > - if (ret) > - dev_err(gmu->dev, "failed to send GPU ACD state\n"); > - } > - > /* Turn on the resources */ > pm_runtime_get_sync(gmu->dev); > > @@ -1476,6 +1468,64 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) > return a6xx_gmu_rpmh_votes_init(gmu); > } > > +static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu) > +{ > + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); > + struct a6xx_hfi_acd_table *cmd = &gmu->acd_table; > + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; > + struct msm_gpu *gpu = &adreno_gpu->base; > + int ret, i, cmd_idx = 0; > + > + cmd->version = 1; > + cmd->stride = 1; > + cmd->enable_by_level = 0; > + > + /* Skip freq = 0 and parse acd-level for rest of the OPPs */ > + for (i = 1; i < gmu->nr_gpu_freqs; i++) { > + struct dev_pm_opp *opp; > + struct device_node *np; > + unsigned long freq; > + u32 val; > + > + freq = gmu->gpu_freqs[i]; > + opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true); > + np = dev_pm_opp_get_of_node(opp); > + > + ret = of_property_read_u32(np, "qcom,opp-acd-level", &val); > + of_node_put(np); > + dev_pm_opp_put(opp); > + if (ret == -EINVAL) > + continue; > + else if (ret) { > + DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq); > + return ret; > + } > + > + cmd->enable_by_level |= BIT(i); > + cmd->data[cmd_idx++] = val; How do you know that cmd_idx is always < sizeof(cmd->data); ? > + } > + > + cmd->num_levels = cmd_idx; > + > + /* We are done here if ACD is not required for any of the OPPs */ > + if (!cmd->enable_by_level) > + return 0; > + > + /* Initialize qmp node to talk to AOSS */ > + gmu->qmp = qmp_get(gmu->dev); > + if (IS_ERR(gmu->qmp)) { > + cmd->enable_by_level = 0; > + return dev_err_probe(gmu->dev, PTR_ERR(gmu->qmp), "Failed to initialize qmp\n"); > + } > + > + /* Notify AOSS about the ACD state */ > + ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", 1); > + if (ret) > + DRM_DEV_ERROR(gmu->dev, "failed to send GPU ACD state\n"); > + > + return 0; Shouldn't the ret from gmp_send() get propogated in the return of this function ? i.e. how can your probe be successful if the notification failed ? --- bod
© 2016 - 2024 Red Hat, Inc.