From nobody Thu Dec 18 17:43:58 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 441BF25A2BF; Wed, 19 Feb 2025 21:03:30 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739999010; cv=none; b=s16gcfk51168mE8/Wou49IuaoqD19m+YKSCc2y19PhZ1cfJ2BvZcaaD9TZ2MpXwjlG7mT5yuwLwySYnvhDuejWjWL1UhlNO7hyLmSRq4NX+minB54/fxu5WQKiDrS0mJkYpUxy0G/EU68Yx90FUPFJpID/H57AFTjcaxE0SSkGk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1739999010; c=relaxed/simple; bh=vOxJ4hXaO/lIGAke3nv/1AqkLy3LazHnoLwRyWWK1UQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=QS0/ZAOFOtl7L5T3OCubL0jCxQtdxdoCP5lu1nPWMAGjYOTwwwoDbNReC1fwJaXvQ+0kHnxLytUIlx7rExXlzd1Ro+KPVugn3v5ATlF2OaxwDFdrdOanx/O/KLWkQ9ip0iceZ0RNrAuTkO2wPV9qPAcPM4EQSZ1hnc/QEHPzEqM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=QSoKwyEc; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="QSoKwyEc" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 711DFC4CED1; Wed, 19 Feb 2025 21:03:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1739999010; bh=vOxJ4hXaO/lIGAke3nv/1AqkLy3LazHnoLwRyWWK1UQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QSoKwyEcEKo+7XqLDsTBq2SYN2VMlnt87pVr/y0rSMfJFUDpnHh6F5EywJn4E2OfB +XAgZExA5RFiqleGC72bAiei0G4iV6muhKbd6yL9teT3ZieJ8VeQ9N+jkqE57RjLOH 3QSWPNt3I5iDzYLUoZqyIBWB6d/H6Pm6+UwVHboHrNDHG3L6FybKRZIgXT2PwV8k+P Oux0Hu7VV2TOxmlq152JF2jKDHB/bh4HoavkfBvxvv8x0dZKaOmOE/1zOdX/AWOu8K Z+ZmD/beN1zaSfqtgcqmntwJLAQWC7wEDbxACb2YF0cXYo9tqnBKq6a15euEPzTMzV UHkQz6Qv2Ejqw== From: Mario Limonciello To: "Gautham R . Shenoy" , Perry Yuan Cc: Dhananjay Ugwekar , linux-kernel@vger.kernel.org (open list:X86 ARCHITECTURE (32-BIT AND 64-BIT)), linux-pm@vger.kernel.org (open list:CPU FREQUENCY SCALING FRAMEWORK), Mario Limonciello , Dhananjay Ugwekar Subject: [PATCH v4 14/19] cpufreq/amd-pstate: Move all EPP tracing into *_update_perf and *_set_epp functions Date: Wed, 19 Feb 2025 15:02:57 -0600 Message-ID: <20250219210302.442954-15-superm1@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250219210302.442954-1-superm1@kernel.org> References: <20250219210302.442954-1-superm1@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Mario Limonciello The EPP tracing is done by the caller today, but this precludes the information about whether the CPPC request has changed. Move it into the update_perf and set_epp functions and include information about whether the request has changed from the last one. amd_pstate_update_perf() and amd_pstate_set_epp() now require the policy as an argument instead of the cpudata. Reviewed-by: Dhananjay Ugwekar Reviewed-by: Gautham R. Shenoy Signed-off-by: Mario Limonciello --- v4: * Drop unused variables v3: * Add tag * Update commit message --- drivers/cpufreq/amd-pstate-trace.h | 13 +++- drivers/cpufreq/amd-pstate.c | 118 +++++++++++++++++------------ 2 files changed, 80 insertions(+), 51 deletions(-) diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstat= e-trace.h index f457d4af2c62e..32e1bdc588c52 100644 --- a/drivers/cpufreq/amd-pstate-trace.h +++ b/drivers/cpufreq/amd-pstate-trace.h @@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf, u8 epp, u8 min_perf, u8 max_perf, - bool boost + bool boost, + bool changed ), =20 TP_ARGS(cpu_id, @@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf, epp, min_perf, max_perf, - boost), + boost, + changed), =20 TP_STRUCT__entry( __field(unsigned int, cpu_id) @@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf, __field(u8, min_perf) __field(u8, max_perf) __field(bool, boost) + __field(bool, changed) ), =20 TP_fast_assign( @@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf, __entry->min_perf =3D min_perf; __entry->max_perf =3D max_perf; __entry->boost =3D boost; + __entry->changed =3D changed; ), =20 - TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=3D%hhu, boost=3D%u", + TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=3D%hhu, boost=3D%u, changed=3D%= u", (unsigned int)__entry->cpu_id, (u8)__entry->min_perf, (u8)__entry->max_perf, (u8)__entry->highest_perf, (u8)__entry->epp, - (bool)__entry->boost + (bool)__entry->boost, + (bool)__entry->changed ) ); =20 diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 4f97c8c104b62..da6c39564c9ea 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata) return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); } =20 -static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, +static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { + struct amd_cpudata *cpudata =3D policy->driver_data; u64 value, prev; =20 value =3D prev =3D READ_ONCE(cpudata->cppc_req_cached); @@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata= , u8 min_perf, value |=3D FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); value |=3D FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); =20 + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf =3D READ_ONCE(cpudata->perf); + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value !=3D prev); + } + if (value =3D=3D prev) return 0; =20 @@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudat= a, u8 min_perf, } =20 WRITE_ONCE(cpudata->cppc_req_cached, value); - WRITE_ONCE(cpudata->epp_cached, epp); + if (epp !=3D cpudata->epp_cached) + WRITE_ONCE(cpudata->epp_cached, epp); =20 return 0; } =20 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); =20 -static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata, +static inline int amd_pstate_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { - return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, + return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf, max_perf, epp, fast_switch); } =20 -static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) +static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) { + struct amd_cpudata *cpudata =3D policy->driver_data; u64 value, prev; int ret; =20 @@ -281,6 +296,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8= epp) value &=3D ~AMD_CPPC_EPP_PERF_MASK; value |=3D FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); =20 + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf =3D cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + value !=3D prev); + } + if (value =3D=3D prev) return 0; =20 @@ -299,15 +327,29 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u= 8 epp) =20 DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp); =20 -static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp) +static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp) { - return static_call(amd_pstate_set_epp)(cpudata, epp); + return static_call(amd_pstate_set_epp)(policy, epp); } =20 -static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) +static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) { - int ret; + struct amd_cpudata *cpudata =3D policy->driver_data; struct cppc_perf_ctrls perf_ctrls; + int ret; + + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf =3D cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + epp !=3D cpudata->epp_cached); + } =20 if (epp =3D=3D cpudata->epp_cached) return 0; @@ -339,17 +381,7 @@ static int amd_pstate_set_energy_pref_index(struct cpu= freq_policy *policy, return -EBUSY; } =20 - if (trace_amd_pstate_epp_perf_enabled()) { - union perf_cached perf =3D READ_ONCE(cpudata->perf); - - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - epp, - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), - FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), - policy->boost_enabled); - } - - return amd_pstate_set_epp(cpudata, epp); + return amd_pstate_set_epp(policy, epp); } =20 static inline int msr_cppc_enable(bool enable) @@ -492,15 +524,16 @@ static inline int amd_pstate_init_perf(struct amd_cpu= data *cpudata) return static_call(amd_pstate_init_perf)(cpudata); } =20 -static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, +static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { + struct amd_cpudata *cpudata =3D policy->driver_data; struct cppc_perf_ctrls perf_ctrls; u64 value, prev; int ret; =20 if (cppc_state =3D=3D AMD_PSTATE_ACTIVE) { - int ret =3D shmem_set_epp(cpudata, epp); + int ret =3D shmem_set_epp(policy, epp); =20 if (ret) return ret; @@ -515,6 +548,18 @@ static int shmem_update_perf(struct amd_cpudata *cpuda= ta, u8 min_perf, value |=3D FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); value |=3D FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); =20 + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf =3D READ_ONCE(cpudata->perf); + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value !=3D prev); + } + if (value =3D=3D prev) return 0; =20 @@ -592,7 +637,7 @@ static void amd_pstate_update(struct amd_cpudata *cpuda= ta, u8 min_perf, cpudata->cpu, fast_switch); } =20 - amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_swi= tch); + amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_swit= ch); } =20 static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) @@ -1530,7 +1575,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_pol= icy *policy) return ret; WRITE_ONCE(cpudata->cppc_req_cached, value); } - ret =3D amd_pstate_set_epp(cpudata, cpudata->epp_default); + ret =3D amd_pstate_set_epp(policy, cpudata->epp_default); if (ret) return ret; =20 @@ -1571,14 +1616,8 @@ static int amd_pstate_epp_update_limit(struct cpufre= q_policy *policy) epp =3D READ_ONCE(cpudata->epp_cached); =20 perf =3D READ_ONCE(cpudata->perf); - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp, - perf.min_limit_perf, - perf.max_limit_perf, - policy->boost_enabled); - } =20 - return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U, + return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U, perf.max_limit_perf, epp, false); } =20 @@ -1610,20 +1649,12 @@ static int amd_pstate_epp_set_policy(struct cpufreq= _policy *policy) =20 static int amd_pstate_epp_reenable(struct cpufreq_policy *policy) { - struct amd_cpudata *cpudata =3D policy->driver_data; - union perf_cached perf =3D READ_ONCE(cpudata->perf); int ret; =20 ret =3D amd_pstate_cppc_enable(true); if (ret) pr_err("failed to enable amd pstate during resume, return %d\n", ret); =20 - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - cpudata->epp_cached, - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), - perf.highest_perf, policy->boost_enabled); - } =20 return amd_pstate_epp_update_limit(policy); } @@ -1651,14 +1682,7 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq= _policy *policy) if (cpudata->suspended) return 0; =20 - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - AMD_CPPC_EPP_BALANCE_POWERSAVE, - perf.lowest_perf, perf.lowest_perf, - policy->boost_enabled); - } - - return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_p= erf, + return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_pe= rf, AMD_CPPC_EPP_BALANCE_POWERSAVE, false); } =20 --=20 2.43.0