kernel/sched/ext.c | 116 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 75 insertions(+), 41 deletions(-)
Pure reorganization. No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/sched/ext.c | 116 ++++++++++++++++++++++++++++++++++-------------------
1 file changed, 75 insertions(+), 41 deletions(-)
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2368,6 +2368,73 @@ static inline bool task_can_run_on_remot
static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
#endif /* CONFIG_SMP */
+/**
+ * move_task_between_dsqs() - Move a task from one DSQ to another
+ * @p: target task
+ * @enq_flags: %SCX_ENQ_*
+ * @src_dsq: DSQ @p is currently on, must not be a local DSQ
+ * @dst_dsq: DSQ @p is being moved to, can be any DSQ
+ *
+ * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
+ * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
+ * will change. As @p's task_rq is locked, this function doesn't need to use the
+ * holding_cpu mechanism.
+ *
+ * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
+ * return value, is locked.
+ */
+static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
+ struct scx_dispatch_q *src_dsq,
+ struct scx_dispatch_q *dst_dsq)
+{
+ struct rq *src_rq = task_rq(p), *dst_rq;
+
+ BUG_ON(src_dsq->id == SCX_DSQ_LOCAL);
+ lockdep_assert_held(&src_dsq->lock);
+ lockdep_assert_rq_held(src_rq);
+
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
+ if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
+ dst_dsq = find_global_dsq(p);
+ dst_rq = src_rq;
+ }
+ } else {
+ /* no need to migrate if destination is a non-local DSQ */
+ dst_rq = src_rq;
+ }
+
+ /*
+ * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
+ * CPU, @p will be migrated.
+ */
+ if (dst_dsq->id == SCX_DSQ_LOCAL) {
+ /* @p is going from a non-local DSQ to a local DSQ */
+ if (src_rq == dst_rq) {
+ task_unlink_from_dsq(p, src_dsq);
+ move_local_task_to_local_dsq(p, enq_flags,
+ src_dsq, dst_rq);
+ raw_spin_unlock(&src_dsq->lock);
+ } else {
+ raw_spin_unlock(&src_dsq->lock);
+ move_remote_task_to_local_dsq(p, enq_flags,
+ src_rq, dst_rq);
+ }
+ } else {
+ /*
+ * @p is going from a non-local DSQ to a non-local DSQ. As
+ * $src_dsq is already locked, do an abbreviated dequeue.
+ */
+ task_unlink_from_dsq(p, src_dsq);
+ p->scx.dsq = NULL;
+ raw_spin_unlock(&src_dsq->lock);
+
+ dispatch_enqueue(dst_dsq, p, enq_flags);
+ }
+
+ return dst_rq;
+}
+
static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
{
struct task_struct *p;
@@ -6041,7 +6108,7 @@ static bool scx_dispatch_from_dsq(struct
u64 enq_flags)
{
struct scx_dispatch_q *src_dsq = kit->dsq, *dst_dsq;
- struct rq *this_rq, *src_rq, *dst_rq, *locked_rq;
+ struct rq *this_rq, *src_rq, *locked_rq;
bool dispatched = false;
bool in_balance;
unsigned long flags;
@@ -6087,51 +6154,18 @@ static bool scx_dispatch_from_dsq(struct
/* @p is still on $src_dsq and stable, determine the destination */
dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p);
- if (dst_dsq->id == SCX_DSQ_LOCAL) {
- dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
- if (!task_can_run_on_remote_rq(p, dst_rq, true)) {
- dst_dsq = find_global_dsq(p);
- dst_rq = src_rq;
- }
- } else {
- /* no need to migrate if destination is a non-local DSQ */
- dst_rq = src_rq;
- }
-
/*
- * Move @p into $dst_dsq. If $dst_dsq is the local DSQ of a different
- * CPU, @p will be migrated.
+ * Apply vtime and slice updates before moving so that the new time is
+ * visible before inserting into $dst_dsq. @p is still on $src_dsq but
+ * this is safe as we're locking it.
*/
- if (dst_dsq->id == SCX_DSQ_LOCAL) {
- /* @p is going from a non-local DSQ to a local DSQ */
- if (src_rq == dst_rq) {
- task_unlink_from_dsq(p, src_dsq);
- move_local_task_to_local_dsq(p, enq_flags,
- src_dsq, dst_rq);
- raw_spin_unlock(&src_dsq->lock);
- } else {
- raw_spin_unlock(&src_dsq->lock);
- move_remote_task_to_local_dsq(p, enq_flags,
- src_rq, dst_rq);
- locked_rq = dst_rq;
- }
- } else {
- /*
- * @p is going from a non-local DSQ to a non-local DSQ. As
- * $src_dsq is already locked, do an abbreviated dequeue.
- */
- task_unlink_from_dsq(p, src_dsq);
- p->scx.dsq = NULL;
- raw_spin_unlock(&src_dsq->lock);
-
- if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
- p->scx.dsq_vtime = kit->vtime;
- dispatch_enqueue(dst_dsq, p, enq_flags);
- }
-
+ if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_VTIME)
+ p->scx.dsq_vtime = kit->vtime;
if (kit->cursor.flags & __SCX_DSQ_ITER_HAS_SLICE)
p->scx.slice = kit->slice;
+ /* execute move */
+ locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq);
dispatched = true;
out:
if (in_balance) {
On Tue, Sep 24, 2024 at 02:08:52PM -1000, Tejun Heo wrote: > Pure reorganization. No functional changes. > > Signed-off-by: Tejun Heo <tj@kernel.org> Applied to sched_ext/for-6.13. Thanks. -- tejun
© 2016 - 2024 Red Hat, Inc.