:p
atchew
Login
The following commit has been merged into the timers/core branch of tip: Commit-ID: a6347864d97506a021c469dad35875088edc03fc Gitweb: https://git.kernel.org/tip/a6347864d97506a021c469dad35875088edc03fc Author: Frederic Weisbecker <frederic@kernel.org> AuthorDate: Tue, 29 Oct 2024 13:54:44 +01:00 Committer: Thomas Gleixner <tglx@linutronix.de> CommitterDate: Thu, 31 Oct 2024 10:41:42 +01:00 tick: Remove now unneeded low-res tick stop on CPUHP_AP_TICK_DYING The generic clockevent layer now detaches and stops the underlying clockevent from the dying CPU, unifying the tick behaviour for both periodic and oneshot mode on offline CPUs. There is no more need for the tick layer to care about that. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20241029125451.54574-4-frederic@kernel.org --- kernel/time/tick-sched.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -XXX,XX +XXX,XX @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) return HRTIMER_RESTART; } -static void tick_sched_timer_cancel(struct tick_sched *ts) -{ - if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) - hrtimer_cancel(&ts->sched_timer); - else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) - tick_program_event(KTIME_MAX, 1); -} - #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); @@ -XXX,XX +XXX,XX @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { - tick_sched_timer_cancel(ts); + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) + hrtimer_cancel(&ts->sched_timer); + else + tick_program_event(KTIME_MAX, 1); return; } @@ -XXX,XX +XXX,XX @@ void tick_setup_sched_timer(bool hrtimer) */ void tick_sched_timer_dying(int cpu) { - struct tick_device *td = &per_cpu(tick_cpu_device, cpu); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - struct clock_event_device *dev = td->evtdev; ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; /* This must happen before hrtimers are migrated! */ - tick_sched_timer_cancel(ts); - - /* - * If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED, - * make sure not to call low-res tick handler. - */ - if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) - dev->event_handler = clockevents_handle_noop; + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) + hrtimer_cancel(&ts->sched_timer); idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime;
The following commit has been merged into the timers/core branch of tip: Commit-ID: 8d7784373a498aaeed597edaefda35fe40162e5d Gitweb: https://git.kernel.org/tip/8d7784373a498aaeed597edaefda35fe40162e5d Author: Frederic Weisbecker <frederic@kernel.org> AuthorDate: Tue, 29 Oct 2024 13:54:44 +01:00 Committer: Thomas Gleixner <tglx@linutronix.de> CommitterDate: Wed, 30 Oct 2024 21:02:21 +01:00 tick: Remove now unneeded low-res tick stop on CPUHP_AP_TICK_DYING The generic clockevent layer now detaches and stops the underlying clockevent from the dying CPU, unifying the tick behaviour for both periodic and oneshot mode on offline CPUs. There is no more need for the tick layer to care about that. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20241029125451.54574-4-frederic@kernel.org --- kernel/time/tick-sched.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index XXXXXXX..XXXXXXX 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -XXX,XX +XXX,XX @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) return HRTIMER_RESTART; } -static void tick_sched_timer_cancel(struct tick_sched *ts) -{ - if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) - hrtimer_cancel(&ts->sched_timer); - else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) - tick_program_event(KTIME_MAX, 1); -} - #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); @@ -XXX,XX +XXX,XX @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { - tick_sched_timer_cancel(ts); + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) + hrtimer_cancel(&ts->sched_timer); + else + tick_program_event(KTIME_MAX, 1); return; } @@ -XXX,XX +XXX,XX @@ void tick_setup_sched_timer(bool hrtimer) */ void tick_sched_timer_dying(int cpu) { - struct tick_device *td = &per_cpu(tick_cpu_device, cpu); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - struct clock_event_device *dev = td->evtdev; ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; /* This must happen before hrtimers are migrated! */ - tick_sched_timer_cancel(ts); - - /* - * If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED, - * make sure not to call low-res tick handler. - */ - if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) - dev->event_handler = clockevents_handle_noop; + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) + hrtimer_cancel(&ts->sched_timer); idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime;