Remove abundant data_cfg_match calculation.
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
---
x86/pmu_pebs.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/x86/pmu_pebs.c b/x86/pmu_pebs.c
index 6e73fc34..2848cc1e 100644
--- a/x86/pmu_pebs.c
+++ b/x86/pmu_pebs.c
@@ -296,7 +296,6 @@ static void check_pebs_records(u64 bitmask, u64 pebs_data_cfg, bool use_adaptive
pebs_record_size = pebs_rec->format_size >> RECORD_SIZE_OFFSET;
pebs_idx_match = pebs_rec->applicable_counters & bitmask;
pebs_size_match = pebs_record_size == get_pebs_record_size(pebs_data_cfg, use_adaptive);
- data_cfg_match = (pebs_rec->format_size & GENMASK_ULL(47, 0)) == pebs_data_cfg;
data_cfg_match = (pebs_rec->format_size & GENMASK_ULL(47, 0)) ==
(use_adaptive ? pebs_data_cfg : 0);
expected = pebs_idx_match && pebs_size_match && data_cfg_match;
--
2.34.1