kernel/events/core.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)
The following commit has been merged into the perf/core branch of tip:
Commit-ID: b9e52b11d2e5e403afaf69a7f8d6b29f8380ed38
Gitweb: https://git.kernel.org/tip/b9e52b11d2e5e403afaf69a7f8d6b29f8380ed38
Author: Kan Liang <kan.liang@linux.intel.com>
AuthorDate: Fri, 05 Dec 2025 16:16:38 -08:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 17 Dec 2025 13:31:03 +01:00
perf: Add generic exclude_guest support
Only KVM knows the exact time when a guest is entering/exiting. Expose
two interfaces to KVM to switch the ownership of the PMU resources.
All the pinned events must be scheduled in first. Extend the
perf_event_sched_in() helper to support extra flag, e.g., EVENT_GUEST.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Xudong Hao <xudong.hao@intel.com>
Link: https://patch.msgid.link/20251206001720.468579-3-seanjc@google.com
---
kernel/events/core.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 406371c..fab358d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2870,14 +2870,15 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
- struct pmu *pmu)
+ struct pmu *pmu,
+ enum event_type_t event_type)
{
- ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED);
+ ctx_sched_in(&cpuctx->ctx, pmu, EVENT_PINNED | event_type);
if (ctx)
- ctx_sched_in(ctx, pmu, EVENT_PINNED);
- ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, pmu, EVENT_PINNED | event_type);
+ ctx_sched_in(&cpuctx->ctx, pmu, EVENT_FLEXIBLE | event_type);
if (ctx)
- ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE);
+ ctx_sched_in(ctx, pmu, EVENT_FLEXIBLE | event_type);
}
/*
@@ -2933,7 +2934,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
else if (event_type & EVENT_PINNED)
ctx_sched_out(&cpuctx->ctx, pmu, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, task_ctx, pmu);
+ perf_event_sched_in(cpuctx, task_ctx, pmu, 0);
for_each_epc(epc, &cpuctx->ctx, pmu, 0)
perf_pmu_enable(epc->pmu);
@@ -4151,7 +4152,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
ctx_sched_out(&cpuctx->ctx, NULL, EVENT_FLEXIBLE);
}
- perf_event_sched_in(cpuctx, ctx, NULL);
+ perf_event_sched_in(cpuctx, ctx, NULL, 0);
perf_ctx_sched_task_cb(cpuctx->task_ctx, task, true);
© 2016 - 2026 Red Hat, Inc.