Several drivers cannot support groups, but enforce this inconsistently
(including not at all) in their event_init routines. Add a helper so
that such drivers can simply and robustly check for the acceptable
conditions that their event is either standalone, or the first one
being added to a software-only group.
In particular it took a while to see that marvell_cn10k_tad_pmu was
seemingly trying to rely on the empirical behaviour of perf tool
creating group leader events with disabled=1 and subsequent siblings
with disabled=0. Down with this sort of thing!
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
arch/x86/events/amd/ibs.c | 30 ++++++---------------------
drivers/devfreq/event/rockchip-dfi.c | 3 +++
drivers/perf/alibaba_uncore_drw_pmu.c | 11 +---------
drivers/perf/arm_dmc620_pmu.c | 12 +----------
drivers/perf/dwc_pcie_pmu.c | 10 ++-------
drivers/perf/marvell_cn10k_tad_pmu.c | 6 ++----
drivers/perf/marvell_pem_pmu.c | 11 ++--------
include/linux/perf_event.h | 7 +++++++
8 files changed, 24 insertions(+), 66 deletions(-)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 112f43b23ebf..95de309fc7d5 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -248,27 +248,6 @@ int forward_event_to_ibs(struct perf_event *event)
return -ENOENT;
}
-/*
- * Grouping of IBS events is not possible since IBS can have only
- * one event active at any point in time.
- */
-static int validate_group(struct perf_event *event)
-{
- struct perf_event *sibling;
-
- if (event->group_leader == event)
- return 0;
-
- if (event->group_leader->pmu == event->pmu)
- return -EINVAL;
-
- for_each_sibling_event(sibling, event->group_leader) {
- if (sibling->pmu == event->pmu)
- return -EINVAL;
- }
- return 0;
-}
-
static bool perf_ibs_ldlat_event(struct perf_ibs *perf_ibs,
struct perf_event *event)
{
@@ -309,9 +288,12 @@ static int perf_ibs_init(struct perf_event *event)
event->attr.exclude_hv))
return -EINVAL;
- ret = validate_group(event);
- if (ret)
- return ret;
+ /*
+ * Grouping of IBS events is not possible since IBS can have only
+ * one event active at any point in time.
+ */
+ if (in_hardware_group(event))
+ return -EINVAL;
if (hwc->sample_period) {
if (config & perf_ibs->cnt_mask)
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..88a9ecbe96ce 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -413,6 +413,9 @@ static int rockchip_ddr_perf_event_init(struct perf_event *event)
dev_warn(dfi->dev, "Can't provide per-task data!\n");
return -EINVAL;
}
+ /* Disallow groups since we can't start/stop/read multiple counters at once */
+ if (in_hardware_group(event))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 99a0ef9817e0..0081618741c3 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -526,7 +526,6 @@ static int ali_drw_pmu_event_init(struct perf_event *event)
{
struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct perf_event *sibling;
struct device *dev = drw_pmu->pmu.dev;
if (event->attr.type != event->pmu->type)
@@ -548,19 +547,11 @@ static int ali_drw_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- if (event->group_leader != event &&
- !is_software_event(event->group_leader)) {
+ if (in_hardware_group(event)) {
dev_err(dev, "driveway only allow one event!\n");
return -EINVAL;
}
- for_each_sibling_event(sibling, event->group_leader) {
- if (sibling != event && !is_software_event(sibling)) {
- dev_err(dev, "driveway event not allowed!\n");
- return -EINVAL;
- }
- }
-
/* reset all the pmu counters */
writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 619cf937602f..24308de80246 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -513,7 +513,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
@@ -544,22 +543,13 @@ static int dmc620_pmu_event_init(struct perf_event *event)
hwc->idx = -1;
- if (event->group_leader == event)
- return 0;
-
/*
* We can't atomically disable all HW counters so only one event allowed,
* although software events are acceptable.
*/
- if (!is_software_event(event->group_leader))
+ if (in_hardware_group(event))
return -EINVAL;
- for_each_sibling_event(sibling, event->group_leader) {
- if (sibling != event &&
- !is_software_event(sibling))
- return -EINVAL;
- }
-
return 0;
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 146ff57813fb..78c522658d84 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -353,7 +353,6 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
{
struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
- struct perf_event *sibling;
u32 lane;
if (event->attr.type != event->pmu->type)
@@ -367,15 +366,10 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- if (event->group_leader != event &&
- !is_software_event(event->group_leader))
+ /* Disallow groups since we can't start/stop/read multiple counters at once */
+ if (in_hardware_group(event))
return -EINVAL;
- for_each_sibling_event(sibling, event->group_leader) {
- if (sibling->pmu != event->pmu && !is_software_event(sibling))
- return -EINVAL;
- }
-
if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
return -EINVAL;
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 51ccb0befa05..ee6505cb01a7 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -152,10 +152,8 @@ static int tad_pmu_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (!event->attr.disabled)
- return -EINVAL;
-
- if (event->state != PERF_EVENT_STATE_OFF)
+ /* Disallow groups since we can't start/stop/read multiple counters at once */
+ if (in_hardware_group(event))
return -EINVAL;
event->cpu = tad_pmu->cpu;
diff --git a/drivers/perf/marvell_pem_pmu.c b/drivers/perf/marvell_pem_pmu.c
index 29fbcd1848e4..53a35a5de7f8 100644
--- a/drivers/perf/marvell_pem_pmu.c
+++ b/drivers/perf/marvell_pem_pmu.c
@@ -190,7 +190,6 @@ static int pem_perf_event_init(struct perf_event *event)
{
struct pem_pmu *pmu = to_pem_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
@@ -206,16 +205,10 @@ static int pem_perf_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EOPNOTSUPP;
- /* We must NOT create groups containing mixed PMUs */
- if (event->group_leader->pmu != event->pmu &&
- !is_software_event(event->group_leader))
+ /* Disallow groups since we can't start/stop/read multiple counters at once */
+ if (in_hardware_group(event))
return -EINVAL;
- for_each_sibling_event(sibling, event->group_leader) {
- if (sibling->pmu != event->pmu &&
- !is_software_event(sibling))
- return -EINVAL;
- }
/*
* Set ownership of event to one CPU, same event can not be observed
* on multiple cpus at same time.
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ec9d96025683..4d439c24c901 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1556,6 +1556,13 @@ static inline int in_software_context(struct perf_event *event)
return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
}
+/* True if the event has (or would have) any non-software siblings */
+static inline bool in_hardware_group(const struct perf_event *event)
+{
+ return event != event->group_leader &&
+ !in_software_context(event->group_leader);
+}
+
static inline int is_exclusive_pmu(struct pmu *pmu)
{
return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
--
2.39.2.101.g768bb238c484.dirty
Hi Robin, kernel test robot noticed the following build warnings: [auto build test WARNING on linus/master] [also build test WARNING on v6.17-rc1 next-20250814] [cannot apply to perf-tools-next/perf-tools-next tip/perf/core perf-tools/perf-tools acme/perf/core] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Robin-Murphy/perf-arm-cmn-Fix-event-validation/20250814-010626 base: linus/master patch link: https://lore.kernel.org/r/b05607c3ce0d3ce52de1784823ef9f6de324283c.1755096883.git.robin.murphy%40arm.com patch subject: [PATCH 13/19] perf: Add helper for checking grouped events config: i386-randconfig-003-20250814 (https://download.01.org/0day-ci/archive/20250814/202508141353.JZWHsrYP-lkp@intel.com/config) compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250814/202508141353.JZWHsrYP-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202508141353.JZWHsrYP-lkp@intel.com/ All warnings (new ones prefixed by >>): >> arch/x86/events/amd/ibs.c:264:6: warning: unused variable 'ret' [-Wunused-variable] 264 | int ret; | ^~~ 1 warning generated. vim +/ret +264 arch/x86/events/amd/ibs.c d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 258 b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 259 static int perf_ibs_init(struct perf_event *event) b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 260 { 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 261 struct hw_perf_event *hwc = &event->hw; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 262 struct perf_ibs *perf_ibs; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 263 u64 config; 7c2128235eff99 arch/x86/events/amd/ibs.c Ravi Bangoria 2023-06-20 @264 int ret; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 265 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 266 perf_ibs = get_ibs_pmu(event->attr.type); 2fad201fe38ff9 arch/x86/events/amd/ibs.c Ravi Bangoria 2023-05-04 267 if (!perf_ibs) 2fad201fe38ff9 arch/x86/events/amd/ibs.c Ravi Bangoria 2023-05-04 268 return -ENOENT; 2fad201fe38ff9 arch/x86/events/amd/ibs.c Ravi Bangoria 2023-05-04 269 450bbd493d436f arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-03-12 270 config = event->attr.config; 450bbd493d436f arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-03-12 271 450bbd493d436f arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-03-12 272 if (event->pmu != &perf_ibs->pmu) b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 273 return -ENOENT; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 274 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 275 if (config & ~perf_ibs->config_mask) 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 276 return -EINVAL; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 277 0f9e0d7928d8e8 arch/x86/events/amd/ibs.c Namhyung Kim 2023-11-30 278 if (has_branch_stack(event)) 0f9e0d7928d8e8 arch/x86/events/amd/ibs.c Namhyung Kim 2023-11-30 279 return -EOPNOTSUPP; 0f9e0d7928d8e8 arch/x86/events/amd/ibs.c Namhyung Kim 2023-11-30 280 d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 281 /* handle exclude_{user,kernel} in the IRQ handler */ d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 282 if (event->attr.exclude_host || event->attr.exclude_guest || d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 283 event->attr.exclude_idle) d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 284 return -EINVAL; d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 285 d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 286 if (!(event->attr.config2 & IBS_SW_FILTER_MASK) && d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 287 (event->attr.exclude_kernel || event->attr.exclude_user || d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 288 event->attr.exclude_hv)) d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 289 return -EINVAL; d29e744c71673a arch/x86/events/amd/ibs.c Namhyung Kim 2024-12-03 290 ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 291 /* ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 292 * Grouping of IBS events is not possible since IBS can have only ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 293 * one event active at any point in time. ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 294 */ ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 295 if (in_hardware_group(event)) ccec93f5de464b arch/x86/events/amd/ibs.c Robin Murphy 2025-08-13 296 return -EINVAL; 7c2128235eff99 arch/x86/events/amd/ibs.c Ravi Bangoria 2023-06-20 297 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 298 if (hwc->sample_period) { 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 299 if (config & perf_ibs->cnt_mask) 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 300 /* raw max_cnt may not be set */ 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 301 return -EINVAL; 88c7bcad71c83f arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 302 b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 303 if (event->attr.freq) { b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 304 hwc->sample_period = perf_ibs->min_period; b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 305 } else { 88c7bcad71c83f arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 306 /* Silently mask off lower nibble. IBS hw mandates it. */ 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 307 hwc->sample_period &= ~0x0FULL; b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 308 if (hwc->sample_period < perf_ibs->min_period) b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 309 return -EINVAL; b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 310 } 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 311 } else { 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 312 u64 period = 0; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 313 e1e7844ced88f9 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 314 if (event->attr.freq) e1e7844ced88f9 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 315 return -EINVAL; e1e7844ced88f9 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 316 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 317 if (perf_ibs == &perf_ibs_op) { 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 318 period = (config & IBS_OP_MAX_CNT) << 4; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 319 if (ibs_caps & IBS_CAPS_OPCNTEXT) 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 320 period |= config & IBS_OP_MAX_CNT_EXT_MASK; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 321 } else { 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 322 period = (config & IBS_FETCH_MAX_CNT) << 4; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 323 } 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 324 db98c5faf8cb35 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 325 config &= ~perf_ibs->cnt_mask; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 326 event->attr.sample_period = period; 598bdf4fefff5a arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 327 hwc->sample_period = period; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 328 b2fc7b282bf7c1 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-01-15 329 if (hwc->sample_period < perf_ibs->min_period) 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 330 return -EINVAL; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 331 } 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 332 d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 333 if (perf_ibs_ldlat_event(perf_ibs, event)) { d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 334 u64 ldlat = event->attr.config1 & 0xFFF; d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 335 d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 336 if (ldlat < 128 || ldlat > 2048) 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 337 return -EINVAL; d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 338 ldlat >>= 7; d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 339 d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 340 config |= (ldlat - 1) << 59; d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 341 config |= IBS_OP_L3MISSONLY | IBS_OP_LDLAT_EN; d20610c19b4a22 arch/x86/events/amd/ibs.c Ravi Bangoria 2025-02-05 342 } 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 343 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 344 /* 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 345 * If we modify hwc->sample_period, we also need to update 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 346 * hwc->last_period and hwc->period_left. 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 347 */ 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 348 hwc->last_period = hwc->sample_period; 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 349 local64_set(&hwc->period_left, hwc->sample_period); 6accb9cf760804 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2012-04-02 350 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 351 hwc->config_base = perf_ibs->msr; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 352 hwc->config = config; 510419435c6948 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-12-15 353 b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 354 return 0; b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 355 } b716916679e720 arch/x86/kernel/cpu/perf_event_amd_ibs.c Robert Richter 2011-09-21 356 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.