[PATCH Resend 3/8] tools/power x86_energy_perf_policy: rename get_msr() and put_msr() with intel prefix

Perry Yuan posted 8 patches 1 year, 10 months ago
[PATCH Resend 3/8] tools/power x86_energy_perf_policy: rename get_msr() and put_msr() with intel prefix
Posted by Perry Yuan 1 year, 10 months ago
From: Perry Yuan <Perry.Yuan@amd.com>

Rename the get_msr() and put_msr() function with Intel prefix,that will
better help to differentiate with other processor msr operation.

No functional impact.

Signed-off-by: Perry Yuan <Perry.Yuan@amd.com>
Suggested-by: Fontenot Nathan <Nathan.Fontenot@amd.com>
---
 .../x86_energy_perf_policy.c                  | 32 +++++++++----------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index da9087873915..3f09c12f24fa 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -679,7 +679,7 @@ void err_on_hypervisor(void)
 		    "not supported on this virtual machine");
 }
 
-int get_msr(int cpu, int offset, unsigned long long *msr)
+int intel_get_msr(int cpu, int offset, unsigned long long *msr)
 {
 	int retval;
 	char pathname[32];
@@ -697,13 +697,13 @@ int get_msr(int cpu, int offset, unsigned long long *msr)
 	}
 
 	if (debug > 1)
-		fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
+		fprintf(stderr, "intel_get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
 
 	close(fd);
 	return 0;
 }
 
-int put_msr(int cpu, int offset, unsigned long long new_msr)
+int intel_put_msr(int cpu, int offset, unsigned long long new_msr)
 {
 	char pathname[32];
 	int retval;
@@ -721,7 +721,7 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
 	close(fd);
 
 	if (debug > 1)
-		fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
+		fprintf(stderr, "intel_put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
 
 	return 0;
 }
@@ -829,7 +829,7 @@ void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
 	int ret;
 
 	if (genuine_intel) {
-		get_msr(cpu, msr_offset, &msr);
+		intel_get_msr(cpu, msr_offset, &msr);
 		cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
 		cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
 		cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
@@ -872,7 +872,7 @@ void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr
 	int ret;
 
 	if (genuine_intel) {
-		get_msr(cpu, msr_offset, &msr);
+		intel_get_msr(cpu, msr_offset, &msr);
 
 		hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
 		hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
@@ -909,7 +909,7 @@ void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int ms
 		msr |= HWP_ENERGY_PERF_PREFERENCE(hwp_req->hwp_epp);
 		msr |= HWP_ACTIVITY_WINDOW(hwp_req->hwp_window);
 		msr |= HWP_PACKAGE_CONTROL(hwp_req->hwp_use_pkg);
-		put_msr(cpu, msr_offset, msr);
+		intel_put_msr(cpu, msr_offset, msr);
 	} else if (authentic_amd) {
 		/* AMD EPP need to set desired perf with zero */
 		hwp_req->hwp_desired = 0;
@@ -1011,14 +1011,14 @@ int print_pkg_msrs(int pkg)
 	print_hwp_request_pkg(pkg, &req, "");
 
 	if (has_hwp_notify) {
-		get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
+		intel_get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
 		fprintf(stderr,
 		"pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
 		pkg, msr,
 		((msr) & 0x2) ? "EN" : "Dis",
 		((msr) & 0x1) ? "EN" : "Dis");
 	}
-	get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
+	intel_get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
 	fprintf(stderr,
 		"pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n",
 		pkg, msr,
@@ -1274,8 +1274,8 @@ int enable_hwp_on_cpu(int cpu)
 	int ret;
 
 	if (genuine_intel) {
-		get_msr(cpu, MSR_PM_ENABLE, &msr);
-		put_msr(cpu, MSR_PM_ENABLE, 1);
+		intel_get_msr(cpu, MSR_PM_ENABLE, &msr);
+		intel_put_msr(cpu, MSR_PM_ENABLE, 1);
 	} else if (authentic_amd) {
 		ret = amd_get_msr(cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
 		if (ret < 0)
@@ -1312,14 +1312,14 @@ int update_cpu_msrs(int cpu)
 	if (update_turbo) {
 		int turbo_is_present_and_disabled;
 
-		get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
+		intel_get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
 
 		turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0);
 
 		if (turbo_update_value == 1)	{
 			if (turbo_is_present_and_disabled) {
 				msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
-				put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+				intel_put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
 				if (verbose)
 					printf("cpu%d: turbo ENABLE\n", cpu);
 			}
@@ -1330,7 +1330,7 @@ int update_cpu_msrs(int cpu)
 			 * but cpu may be in a different package, so we always write.
 			 */
 			msr |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
-			put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
+			intel_put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
 			if (verbose)
 				printf("cpu%d: turbo DISABLE\n", cpu);
 		}
@@ -1455,7 +1455,7 @@ void verify_hwp_is_enabled(void)
 
 	/* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
 	if (genuine_intel)
-		get_msr(base_cpu, MSR_PM_ENABLE, &msr);
+		intel_get_msr(base_cpu, MSR_PM_ENABLE, &msr);
 	else if (authentic_amd) {
 		ret = amd_get_msr(base_cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
 		if (ret < 0)
@@ -1561,7 +1561,7 @@ void early_cpuid(void)
 		if (model == 0x4F) {
 			unsigned long long msr;
 
-			get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
+			intel_get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
 
 			bdx_highest_ratio = msr & 0xFF;
 		}
-- 
2.34.1
Re: [PATCH Resend 3/8] tools/power x86_energy_perf_policy: rename get_msr() and put_msr() with intel prefix
Posted by Mario Limonciello 1 year, 10 months ago
On 1/29/2024 23:56, Perry Yuan wrote:
> From: Perry Yuan <Perry.Yuan@amd.com>
> 
> Rename the get_msr() and put_msr() function with Intel prefix,that will
> better help to differentiate with other processor msr operation.
> 
> No functional impact.
> 
> Signed-off-by: Perry Yuan <Perry.Yuan@amd.com>
> Suggested-by: Fontenot Nathan <Nathan.Fontenot@amd.com>

Maybe this should squash with patch 8 since the same lines get changed.
> ---
>   .../x86_energy_perf_policy.c                  | 32 +++++++++----------
>   1 file changed, 16 insertions(+), 16 deletions(-)
> 
> diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> index da9087873915..3f09c12f24fa 100644
> --- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> +++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
> @@ -679,7 +679,7 @@ void err_on_hypervisor(void)
>   		    "not supported on this virtual machine");
>   }
>   
> -int get_msr(int cpu, int offset, unsigned long long *msr)
> +int intel_get_msr(int cpu, int offset, unsigned long long *msr)
>   {
>   	int retval;
>   	char pathname[32];
> @@ -697,13 +697,13 @@ int get_msr(int cpu, int offset, unsigned long long *msr)
>   	}
>   
>   	if (debug > 1)
> -		fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
> +		fprintf(stderr, "intel_get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr);
>   
>   	close(fd);
>   	return 0;
>   }
>   
> -int put_msr(int cpu, int offset, unsigned long long new_msr)
> +int intel_put_msr(int cpu, int offset, unsigned long long new_msr)
>   {
>   	char pathname[32];
>   	int retval;
> @@ -721,7 +721,7 @@ int put_msr(int cpu, int offset, unsigned long long new_msr)
>   	close(fd);
>   
>   	if (debug > 1)
> -		fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
> +		fprintf(stderr, "intel_put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr);
>   
>   	return 0;
>   }
> @@ -829,7 +829,7 @@ void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset)
>   	int ret;
>   
>   	if (genuine_intel) {
> -		get_msr(cpu, msr_offset, &msr);
> +		intel_get_msr(cpu, msr_offset, &msr);
>   		cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr));
>   		cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr));
>   		cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr));
> @@ -872,7 +872,7 @@ void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr
>   	int ret;
>   
>   	if (genuine_intel) {
> -		get_msr(cpu, msr_offset, &msr);
> +		intel_get_msr(cpu, msr_offset, &msr);
>   
>   		hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff));
>   		hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff));
> @@ -909,7 +909,7 @@ void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int ms
>   		msr |= HWP_ENERGY_PERF_PREFERENCE(hwp_req->hwp_epp);
>   		msr |= HWP_ACTIVITY_WINDOW(hwp_req->hwp_window);
>   		msr |= HWP_PACKAGE_CONTROL(hwp_req->hwp_use_pkg);
> -		put_msr(cpu, msr_offset, msr);
> +		intel_put_msr(cpu, msr_offset, msr);
>   	} else if (authentic_amd) {
>   		/* AMD EPP need to set desired perf with zero */
>   		hwp_req->hwp_desired = 0;
> @@ -1011,14 +1011,14 @@ int print_pkg_msrs(int pkg)
>   	print_hwp_request_pkg(pkg, &req, "");
>   
>   	if (has_hwp_notify) {
> -		get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
> +		intel_get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr);
>   		fprintf(stderr,
>   		"pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n",
>   		pkg, msr,
>   		((msr) & 0x2) ? "EN" : "Dis",
>   		((msr) & 0x1) ? "EN" : "Dis");
>   	}
> -	get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
> +	intel_get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr);
>   	fprintf(stderr,
>   		"pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n",
>   		pkg, msr,
> @@ -1274,8 +1274,8 @@ int enable_hwp_on_cpu(int cpu)
>   	int ret;
>   
>   	if (genuine_intel) {
> -		get_msr(cpu, MSR_PM_ENABLE, &msr);
> -		put_msr(cpu, MSR_PM_ENABLE, 1);
> +		intel_get_msr(cpu, MSR_PM_ENABLE, &msr);
> +		intel_put_msr(cpu, MSR_PM_ENABLE, 1);
>   	} else if (authentic_amd) {
>   		ret = amd_get_msr(cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
>   		if (ret < 0)
> @@ -1312,14 +1312,14 @@ int update_cpu_msrs(int cpu)
>   	if (update_turbo) {
>   		int turbo_is_present_and_disabled;
>   
> -		get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
> +		intel_get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr);
>   
>   		turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0);
>   
>   		if (turbo_update_value == 1)	{
>   			if (turbo_is_present_and_disabled) {
>   				msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
> -				put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
> +				intel_put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
>   				if (verbose)
>   					printf("cpu%d: turbo ENABLE\n", cpu);
>   			}
> @@ -1330,7 +1330,7 @@ int update_cpu_msrs(int cpu)
>   			 * but cpu may be in a different package, so we always write.
>   			 */
>   			msr |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
> -			put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
> +			intel_put_msr(cpu, MSR_IA32_MISC_ENABLE, msr);
>   			if (verbose)
>   				printf("cpu%d: turbo DISABLE\n", cpu);
>   		}
> @@ -1455,7 +1455,7 @@ void verify_hwp_is_enabled(void)
>   
>   	/* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */
>   	if (genuine_intel)
> -		get_msr(base_cpu, MSR_PM_ENABLE, &msr);
> +		intel_get_msr(base_cpu, MSR_PM_ENABLE, &msr);
>   	else if (authentic_amd) {
>   		ret = amd_get_msr(base_cpu, MSR_AMD_CPPC_ENABLE, (unsigned long *)(&msr));
>   		if (ret < 0)
> @@ -1561,7 +1561,7 @@ void early_cpuid(void)
>   		if (model == 0x4F) {
>   			unsigned long long msr;
>   
> -			get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
> +			intel_get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
>   
>   			bdx_highest_ratio = msr & 0xFF;
>   		}