In the BPF schedulers that use bpf_ktime_get_ns() -- scx_central and
scx_flatcg, replace bpf_ktime_get_ns() calls to scx_bpf_clock_get_ns().
Signed-off-by: Changwoo Min <changwoo@igalia.com>
---
tools/sched_ext/scx_central.bpf.c | 4 ++--
tools/sched_ext/scx_flatcg.bpf.c | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c
index e6fad6211f6c..cb7428b6a198 100644
--- a/tools/sched_ext/scx_central.bpf.c
+++ b/tools/sched_ext/scx_central.bpf.c
@@ -245,7 +245,7 @@ void BPF_STRUCT_OPS(central_running, struct task_struct *p)
s32 cpu = scx_bpf_task_cpu(p);
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
if (started_at)
- *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */
+ *started_at = scx_bpf_clock_get_ns() ?: 1; /* 0 indicates idle */
}
void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
@@ -258,7 +258,7 @@ void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
{
- u64 now = bpf_ktime_get_ns();
+ u64 now = scx_bpf_clock_get_ns();
u64 nr_to_kick = nr_queued;
s32 i, curr_cpu;
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 4e3afcd260bf..3be99f3c32fd 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -734,7 +734,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
struct fcg_cpu_ctx *cpuc;
struct fcg_cgrp_ctx *cgc;
struct cgroup *cgrp;
- u64 now = bpf_ktime_get_ns();
+ u64 now = scx_bpf_op_clock_get_ns();
bool picked_next = false;
cpuc = find_cpu_ctx();
--
2.47.0