Certain amd_pstate freq_attrs such as amd_pstate_hw_prefcore and
amd_pstate_prefcore_ranking are enabled even when preferred core is
not supported on the platform.
Similarly there are common freq_attrs between the amd-pstate and the
amd-pstate-epp drivers (eg: amd_pstate_max_freq,
amd_pstate_lowest_non_linear_freq, etc.) but are duplicated in two
different freq_attr structs.
Unify all the attributes in a single place and associate each of them
with a visibility function that determines whether the attribute
should be visible based on the underlying platform support and the
current amd_pstate mode.
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 123 ++++++++++++++++++++++++++---------
1 file changed, 92 insertions(+), 31 deletions(-)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 24cdeffbcd40e..fb5d7bb320c15 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1220,12 +1220,86 @@ static ssize_t show_energy_performance_preference(
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
+cpufreq_freq_attr_ro(amd_pstate_max_freq);
+cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
+
+cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+
+struct freq_attr_visibility {
+ struct freq_attr *attr;
+ bool (*visibility_fn)(void);
+};
+
+/* For attributes which are always visible */
+static bool always_visible(void)
+{
+ return true;
+}
+
+/* Determines whether prefcore related attributes should be visible */
+static bool prefcore_visibility(void)
+{
+ return amd_pstate_prefcore;
+}
+
+/* Determines whether energy performance preference should be visible */
+static bool epp_visibility(void)
+{
+ return cppc_state == AMD_PSTATE_ACTIVE;
+}
+
+static struct freq_attr_visibility amd_pstate_attr_visibility[] = {
+ {&amd_pstate_max_freq, always_visible},
+ {&amd_pstate_lowest_nonlinear_freq, always_visible},
+ {&amd_pstate_highest_perf, always_visible},
+ {&amd_pstate_prefcore_ranking, prefcore_visibility},
+ {&amd_pstate_hw_prefcore, prefcore_visibility},
+ {&energy_performance_preference, epp_visibility},
+ {&energy_performance_available_preferences, epp_visibility},
+};
+
+static struct freq_attr **get_freq_attrs(void)
+{
+ bool attr_visible[ARRAY_SIZE(amd_pstate_attr_visibility)];
+ struct freq_attr **attrs;
+ int i, j, count;
+
+ for (i = 0, count = 0; i < ARRAY_SIZE(amd_pstate_attr_visibility); i++) {
+ struct freq_attr_visibility *v = &amd_pstate_attr_visibility[i];
+
+ attr_visible[i] = v->visibility_fn();
+ if (attr_visible[i])
+ count++;
+ }
+
+ /* amd_pstate_{max_freq, lowest_nonlinear_freq, highest_freq} should always be visible */
+ BUG_ON(!count);
+
+ attrs = kcalloc(count + 1, sizeof(struct freq_attr *), GFP_KERNEL);
+ if (!attrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(amd_pstate_attr_visibility); i++) {
+ if (!attr_visible[i])
+ continue;
+
+ attrs[j++] = amd_pstate_attr_visibility[i].attr;
+ }
+
+ return attrs;
+}
+
static void amd_pstate_driver_cleanup(void)
{
if (amd_pstate_prefcore)
sched_clear_itmt_support();
cppc_state = AMD_PSTATE_DISABLE;
+ kfree(current_pstate_driver->attr);
current_pstate_driver = NULL;
}
@@ -1250,6 +1324,7 @@ static int amd_pstate_set_driver(int mode_idx)
static int amd_pstate_register_driver(int mode)
{
+ struct freq_attr **attr = NULL;
int ret;
ret = amd_pstate_set_driver(mode);
@@ -1258,6 +1333,22 @@ static int amd_pstate_register_driver(int mode)
cppc_state = mode;
+ /*
+ * Note: It is important to compute the attrs _after_
+ * re-initializing the cppc_state. Some attributes become
+ * visible only when cppc_state is AMD_PSTATE_ACTIVE.
+ */
+ attr = get_freq_attrs();
+ if (IS_ERR(attr)) {
+ ret = (int) PTR_ERR(attr);
+ pr_err("Couldn't compute freq_attrs for current mode %s [%d]\n",
+ amd_pstate_get_mode_string(cppc_state), ret);
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
+
+ current_pstate_driver->attr = attr;
+
/* at least one CPU supports CPB */
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
@@ -1399,37 +1490,9 @@ static ssize_t prefcore_show(struct device *dev,
return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
}
-cpufreq_freq_attr_ro(amd_pstate_max_freq);
-cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
-
-cpufreq_freq_attr_ro(amd_pstate_highest_perf);
-cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
-cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
-cpufreq_freq_attr_rw(energy_performance_preference);
-cpufreq_freq_attr_ro(energy_performance_available_preferences);
static DEVICE_ATTR_RW(status);
static DEVICE_ATTR_RO(prefcore);
-static struct freq_attr *amd_pstate_attr[] = {
- &amd_pstate_max_freq,
- &amd_pstate_lowest_nonlinear_freq,
- &amd_pstate_highest_perf,
- &amd_pstate_prefcore_ranking,
- &amd_pstate_hw_prefcore,
- NULL,
-};
-
-static struct freq_attr *amd_pstate_epp_attr[] = {
- &amd_pstate_max_freq,
- &amd_pstate_lowest_nonlinear_freq,
- &amd_pstate_highest_perf,
- &amd_pstate_prefcore_ranking,
- &amd_pstate_hw_prefcore,
- &energy_performance_preference,
- &energy_performance_available_preferences,
- NULL,
-};
-
static struct attribute *pstate_global_attributes[] = {
&dev_attr_status.attr,
&dev_attr_prefcore.attr,
@@ -1696,7 +1759,6 @@ static struct cpufreq_driver amd_pstate_driver = {
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
- .attr = amd_pstate_attr,
};
static struct cpufreq_driver amd_pstate_epp_driver = {
@@ -1712,7 +1774,6 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.update_limits = amd_pstate_update_limits,
.set_boost = amd_pstate_set_boost,
.name = "amd-pstate-epp",
- .attr = amd_pstate_epp_attr,
};
/*
@@ -1858,7 +1919,7 @@ static int __init amd_pstate_init(void)
return ret;
global_attr_free:
- cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_unregister_driver(0);
return ret;
}
device_initcall(amd_pstate_init);
--
2.34.1
On 3/11/2026 9:01 AM, Gautham R. Shenoy wrote:
> Certain amd_pstate freq_attrs such as amd_pstate_hw_prefcore and
> amd_pstate_prefcore_ranking are enabled even when preferred core is
> not supported on the platform.
>
> Similarly there are common freq_attrs between the amd-pstate and the
> amd-pstate-epp drivers (eg: amd_pstate_max_freq,
> amd_pstate_lowest_non_linear_freq, etc.) but are duplicated in two
> different freq_attr structs.
>
> Unify all the attributes in a single place and associate each of them
> with a visibility function that determines whether the attribute
> should be visible based on the underlying platform support and the
> current amd_pstate mode.
>
> Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
One thing that might make sense as a follow up suggestion though would
be some changes to amd-pstate-ut to validate the right things are always
showing up on the right configuration.
> ---
> drivers/cpufreq/amd-pstate.c | 123 ++++++++++++++++++++++++++---------
> 1 file changed, 92 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 24cdeffbcd40e..fb5d7bb320c15 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -1220,12 +1220,86 @@ static ssize_t show_energy_performance_preference(
> return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
> }
>
> +cpufreq_freq_attr_ro(amd_pstate_max_freq);
> +cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
> +
> +cpufreq_freq_attr_ro(amd_pstate_highest_perf);
> +cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
> +cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
> +cpufreq_freq_attr_rw(energy_performance_preference);
> +cpufreq_freq_attr_ro(energy_performance_available_preferences);
> +
> +struct freq_attr_visibility {
> + struct freq_attr *attr;
> + bool (*visibility_fn)(void);
> +};
> +
> +/* For attributes which are always visible */
> +static bool always_visible(void)
> +{
> + return true;
> +}
> +
> +/* Determines whether prefcore related attributes should be visible */
> +static bool prefcore_visibility(void)
> +{
> + return amd_pstate_prefcore;
> +}
> +
> +/* Determines whether energy performance preference should be visible */
> +static bool epp_visibility(void)
> +{
> + return cppc_state == AMD_PSTATE_ACTIVE;
> +}
> +
> +static struct freq_attr_visibility amd_pstate_attr_visibility[] = {
> + {&amd_pstate_max_freq, always_visible},
> + {&amd_pstate_lowest_nonlinear_freq, always_visible},
> + {&amd_pstate_highest_perf, always_visible},
> + {&amd_pstate_prefcore_ranking, prefcore_visibility},
> + {&amd_pstate_hw_prefcore, prefcore_visibility},
> + {&energy_performance_preference, epp_visibility},
> + {&energy_performance_available_preferences, epp_visibility},
> +};
> +
> +static struct freq_attr **get_freq_attrs(void)
> +{
> + bool attr_visible[ARRAY_SIZE(amd_pstate_attr_visibility)];
> + struct freq_attr **attrs;
> + int i, j, count;
> +
> + for (i = 0, count = 0; i < ARRAY_SIZE(amd_pstate_attr_visibility); i++) {
> + struct freq_attr_visibility *v = &amd_pstate_attr_visibility[i];
> +
> + attr_visible[i] = v->visibility_fn();
> + if (attr_visible[i])
> + count++;
> + }
> +
> + /* amd_pstate_{max_freq, lowest_nonlinear_freq, highest_freq} should always be visible */
> + BUG_ON(!count);
> +
> + attrs = kcalloc(count + 1, sizeof(struct freq_attr *), GFP_KERNEL);
> + if (!attrs)
> + return ERR_PTR(-ENOMEM);
> +
> + for (i = 0, j = 0; i < ARRAY_SIZE(amd_pstate_attr_visibility); i++) {
> + if (!attr_visible[i])
> + continue;
> +
> + attrs[j++] = amd_pstate_attr_visibility[i].attr;
> + }
> +
> + return attrs;
> +}
> +
> static void amd_pstate_driver_cleanup(void)
> {
> if (amd_pstate_prefcore)
> sched_clear_itmt_support();
>
> cppc_state = AMD_PSTATE_DISABLE;
> + kfree(current_pstate_driver->attr);
> current_pstate_driver = NULL;
> }
>
> @@ -1250,6 +1324,7 @@ static int amd_pstate_set_driver(int mode_idx)
>
> static int amd_pstate_register_driver(int mode)
> {
> + struct freq_attr **attr = NULL;
> int ret;
>
> ret = amd_pstate_set_driver(mode);
> @@ -1258,6 +1333,22 @@ static int amd_pstate_register_driver(int mode)
>
> cppc_state = mode;
>
> + /*
> + * Note: It is important to compute the attrs _after_
> + * re-initializing the cppc_state. Some attributes become
> + * visible only when cppc_state is AMD_PSTATE_ACTIVE.
> + */
> + attr = get_freq_attrs();
> + if (IS_ERR(attr)) {
> + ret = (int) PTR_ERR(attr);
> + pr_err("Couldn't compute freq_attrs for current mode %s [%d]\n",
> + amd_pstate_get_mode_string(cppc_state), ret);
> + amd_pstate_driver_cleanup();
> + return ret;
> + }
> +
> + current_pstate_driver->attr = attr;
> +
> /* at least one CPU supports CPB */
> current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
>
> @@ -1399,37 +1490,9 @@ static ssize_t prefcore_show(struct device *dev,
> return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
> }
>
> -cpufreq_freq_attr_ro(amd_pstate_max_freq);
> -cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
> -
> -cpufreq_freq_attr_ro(amd_pstate_highest_perf);
> -cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking);
> -cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
> -cpufreq_freq_attr_rw(energy_performance_preference);
> -cpufreq_freq_attr_ro(energy_performance_available_preferences);
> static DEVICE_ATTR_RW(status);
> static DEVICE_ATTR_RO(prefcore);
>
> -static struct freq_attr *amd_pstate_attr[] = {
> - &amd_pstate_max_freq,
> - &amd_pstate_lowest_nonlinear_freq,
> - &amd_pstate_highest_perf,
> - &amd_pstate_prefcore_ranking,
> - &amd_pstate_hw_prefcore,
> - NULL,
> -};
> -
> -static struct freq_attr *amd_pstate_epp_attr[] = {
> - &amd_pstate_max_freq,
> - &amd_pstate_lowest_nonlinear_freq,
> - &amd_pstate_highest_perf,
> - &amd_pstate_prefcore_ranking,
> - &amd_pstate_hw_prefcore,
> - &energy_performance_preference,
> - &energy_performance_available_preferences,
> - NULL,
> -};
> -
> static struct attribute *pstate_global_attributes[] = {
> &dev_attr_status.attr,
> &dev_attr_prefcore.attr,
> @@ -1696,7 +1759,6 @@ static struct cpufreq_driver amd_pstate_driver = {
> .set_boost = amd_pstate_set_boost,
> .update_limits = amd_pstate_update_limits,
> .name = "amd-pstate",
> - .attr = amd_pstate_attr,
> };
>
> static struct cpufreq_driver amd_pstate_epp_driver = {
> @@ -1712,7 +1774,6 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
> .update_limits = amd_pstate_update_limits,
> .set_boost = amd_pstate_set_boost,
> .name = "amd-pstate-epp",
> - .attr = amd_pstate_epp_attr,
> };
>
> /*
> @@ -1858,7 +1919,7 @@ static int __init amd_pstate_init(void)
> return ret;
>
> global_attr_free:
> - cpufreq_unregister_driver(current_pstate_driver);
> + amd_pstate_unregister_driver(0);
> return ret;
> }
> device_initcall(amd_pstate_init);
Hello,
Claude Opus 4.6 along with Chris Mason's Linux Prompts to which I
added an amd-pstate.md found the following issue with his patch.
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 24cdeffbcd40..fb5d7bb320c1 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
[ ... ]
> static void amd_pstate_driver_cleanup(void)
> {
> if (amd_pstate_prefcore)
> sched_clear_itmt_support();
>
> cppc_state = AMD_PSTATE_DISABLE;
> + kfree(current_pstate_driver->attr);
> current_pstate_driver = NULL;
> }
After the kfree, the .attr field in the static driver struct
(amd_pstate_driver or amd_pstate_epp_driver) still holds the freed
pointer. Should current_pstate_driver->attr be set to NULL before
current_pstate_driver is set to NULL?
Without that, on a mode switch via
amd_pstate_change_driver_mode()->amd_pstate_unregister_driver()
->amd_pstate_driver_cleanup(), the .attr pointer in the old static
driver struct becomes dangling. When
amd_pstate_register_driver()->amd_pstate_set_driver() later re-assigns
current_pstate_driver to that same struct, .attr still holds the stale
pointer. If get_freq_attrs() then fails (kcalloc returns NULL), the
error path calls amd_pstate_driver_cleanup() again, which kfrees the
already-freed .attr -- a double-free.
[ ... ]
> + attr = get_freq_attrs();
> + if (IS_ERR(attr)) {
> + ret = (int) PTR_ERR(attr);
> + pr_err("Couldn't compute freq_attrs for current mode %s [%d]\n",
> + amd_pstate_get_mode_string(cppc_state), ret);
> + amd_pstate_driver_cleanup();
> ^^^^
This is where the double-free would be triggered. At this point
current_pstate_driver->attr was never updated with the new allocation
(it failed), so it still holds whatever the static struct had -- which
is the dangling pointer from a previous amd_pstate_driver_cleanup()
call during a prior mode switch.
I will send a v3 to incorporate this fix.
> + return ret;
> + }
> +
> + current_pstate_driver->attr = attr;
FWIW, the amd-pstate.md for review-prompts/linux is as follows:
x8-------------x8---------------------x8-----------------x8
# AMD Pstate / CPPC Subsystem Delta
Load when patch touches: `drivers/cpufreq/amd-pstate*.c`, `amd_pstate_`, `cppc_` (in cpufreq context).
Generic patterns (memory leaks on error path, NULL checks, uninitialized variables) are covered by
CS-001, patterns/null.md, and false-positive-guide. Apply those; this file adds amd-pstate-specific checks.
## AMD Pstate Patterns [APST]
### APST-001: cppc_req_cached sync on fast_switch path
**Risk**: Stale cached state, wrong EPP/perf on subsequent reads
**Details**: `msr_update_perf()` writes to MSR_AMD_CPPC_REQ but does not update
`cpudata->cppc_req_cached` when the update happens from the fast path (fast_switch).
The cached value is used elsewhere; desync causes incorrect behavior.
- **Check**: Any MSR_AMD_CPPC_REQ write path must update `cppc_req_cached` consistently
- **Fixes context**: Introduced by "Always write EPP value when updating perf"; fast path was missed
### APST-002: Online vs present CPUs for cpc_desc_ptr
**Risk**: NULL deref, crash when accessing offline CPU CPC data
**Details**: `cpc_desc_ptr` (per-CPU) is initialized only for **online** CPUs via
`acpi_soft_cpu_online()` -> `__acpi_processor_start()` -> `acpi_cppc_processor_probe()`.
Code that iterates over **present** CPUs and calls into `cppc_`* (e.g. `cppc_set_auto_sel()` ->
`cppc_set_reg_val()`) can touch uninitialized CPC data for offline CPUs.
- **Check**: Restrict `cppc_set_auto_sel()` and similar CPC ops to online CPUs only
- **Fixes context**: Guided mode control; `amd_pstate_change_mode_without_dvr_change()` iterated present CPUs
### APST-003: EPP 0 after hibernate (S4)
**Risk**: Wrong EPP on resume, performance/power regression
**Details**: During S4 hibernate, CPUs are offlined. When offlined, EPP was reset to 0.
On resume, all CPUs except boot CPU end up with EPP 0 programmed instead of policy value.
- **Check**: When offlining CPUs, do not reset EPP to 0; preserve or reset to policy values so onlining restores correctly
- **Fixes context**: "Requested CPU Min frequency" BIOS option changed offlining behavior
### APST-004: EPP 0 after resume (S3)
**Risk**: Wrong EPP on resume, performance/power regression
**Details**: During suspend, the cached CPPC request was invalidated/destroyed with the
expectation it would be restored on resume. Removing the separate EPP cache and later
explicitly setting EPP to 0 during suspend broke resume.
- **Check**: Preserve or re-apply EPP/CPPC request values during suspend so resume path can restore correctly
- **Fixes context**: "Requested CPU Min frequency" BIOS option; also b7a41156588a (Invalidate cppc_req_cached during suspend)
### APST-005: CPPC.min_perf wrong after governor switch
**Risk**: Performance governor not achieving nominal_perf, throttling incorrectly
**Details**: In active mode with performance governor, CPPC.min_perf must equal nominal_perf.
After "Drop min and max cached frequencies", `amd_pstate_update_min_max_limit()` is
called only when scaling_{min,max}_freq differ from cached values. Governor switch
powersave -> performance does not change scaling limits, so the constraint is never
re-applied and CPPC.min_perf remains at the old powersave value.
- **Check**: Invoke limit update when policy/governor changes, not only when scaling limits change
- **Fixes context**: a9b9b4c2a4cd
### APST-006: ITMT / sched domain init ordering
**Risk**: Wrong asym_prefer_cpu, suboptimal scheduling
**Details**: ITMT support is enabled from `amd_pstate*_cpu_init()`, which runs per CPU.
Sched domains are rebuilt when ITMT is first enabled. Enabling after the first CPU
means other CPUs have not yet initialized their asym priorities; the domain rebuild
captures incomplete data and asym_prefer_cpu is wrong (e.g. always first CPU in group).
- **Check**: Initialize asym priorities for all CPUs first, then enable ITMT (e.g. from `amd_pstate_register_driver()`)
- **Check**: Clear ITMT when driver unregisters; core rankings require update_limits() to be operational
- **Fixes context**: f3a052391822 (Enable amd-pstate preferred core support)
### APST-007: min_limit perf/freq desync for performance governor
**Risk**: Inconsistent min_limit state, wrong scaling behavior
**Details**: With performance governor, min_limit perf and freq are kept in sync. A
special-case path modified only the perf value; the freq value was not updated,
causing perf and freq to diverge.
- **Check**: When updating min_limit perf in performance governor path, update min_limit freq as well
- **Fixes context**: 009d1c29a451 (Move perf values into a union)
### APST-008: freq_to_perf clamping and u8 overflow
**Risk**: Wrong perf values from overflow, wraparound
**Details**: `freq_to_perf()` produces a u8. Values >255 overflow when cast to u8 before
clamping. Also, `clamp_t(u8, ...)` typecasts first then clamps, which does not fix
overflow. Must use a wider type (e.g. u32) for the intermediate value, then clamp,
then cast to u8.
- **Check**: Use intermediate u32 for >255 values; clamp then cast to u8
- **Fixes context**: 620136ced35a / 305621eb6a8b (Modularize perf<->freq conversion)
## Driver Context
### Modes
- **Passive**: Legacy ACPI P-state style
- **Active (EPP)**: Uses MSR_AMD_CPPC_REQ, EPP hint
- **Guided**: Platform-guided; `cppc_set_auto_sel()` involved
### MSR Paths
- **MSR_AMD_CPPC_REQ**: Primary request register; `cppc_req_cached` must stay in sync
- **MSR_AMD_CPPC_REQ2**: Floor perf (newer platforms)
- **MSR_AMD_CPPC_CAP1**: Capabilities, nominal/lowest_perf, etc.
- **Fast path vs slow path**: Both must update `cppc_req_cached` when writing CPPC_REQ
### Per-CPU vs Online
- `cpc_desc_ptr`: Initialized only for **online** CPUs (ACPI CPU hotplug)
- Iterating present CPUs and calling `cppc_`* can access uninitialized data
- Prefer `for_each_online_cpu` or equivalent when touching CPC
### Suspend/Resume
- EPP and CPPC request values must be preserved or explicitly restored
- Offlining during hibernate must not reset EPP to 0; use policy values
- Cached request must be usable for resume restoration
### Governor Interactions
- **Active mode only**: Performance governor CPPC.min_perf = nominal_perf; scaling limits may be ignored. Powersave scaling_min_freq / scaling_max_freq apply.
- **Passive / guided modes**: scaling_min_freq / scaling_max_freq apply regardless of governor.
- Governor switch (in active mode) must re-apply performance constraints; limit updates are not only scaling-limit driven.
## Quick Checks
- Every MSR_AMD_CPPC_REQ write path updates `cppc_req_cached`
- CPC/ACPI ops (e.g. `cppc_set_auto_sel`) restricted to online CPUs
- Suspend/offline paths preserve or reset EPP to policy values, not 0
- Governor switch (especially to performance) triggers limit/constraint refresh
- `freq_to_perf` / `perf_to_freq` use safe clamping (no u8 overflow before clamp)
- Init paths: apply CS-001 error-path validation; amd-pstate has had leaks on init failure
x8-------------x8---------------------x8-----------------x8
--
Thanks and Regards
gautham.
© 2016 - 2026 Red Hat, Inc.