drivers/irqchip/irq-gic-v3-its.c | 14 ++++++++++++-- kernel/irq/msi.c | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-)
Linus,
please pull the latest irq/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-urgent-2024-11-03
up to: e6c24e2d05bb: irqchip/gic-v4: Correctly deal with set_affinity on lazily-mapped VPEs
Two fixes for the interrupt subsystem:
- Fix an off-by-one error in the failure path of msi_domain_alloc(),
which causes the cleanup loop to terminate early and leaking the first
allocated interrupt.
- Handle a corner case in GIC-V4 versus a lazily mapped Virtual
Processing Element (VPE). If the VPE has not been mapped because the
guest has not yet emitted a mapping command, then the set_affinity()
callback returns an error code, which causes the vCPU management to fail.
Return success in this case without touching the hardware. This will be
done later when the guest issues the mapping command.
Thanks,
tglx
------------------>
Jinjie Ruan (1):
genirq/msi: Fix off-by-one error in msi_domain_alloc()
Marc Zyngier (1):
irqchip/gic-v4: Correctly deal with set_affinity on lazily-mapped VPEs
drivers/irqchip/irq-gic-v3-its.c | 14 ++++++++++++--
kernel/irq/msi.c | 2 +-
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ab597e74ba08..52f625e07658 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3810,8 +3810,18 @@ static int its_vpe_set_affinity(struct irq_data *d,
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
- if (!atomic_read(&vpe->vmapp_count))
- return -EINVAL;
+ if (!atomic_read(&vpe->vmapp_count)) {
+ if (gic_requires_eager_mapping())
+ return -EINVAL;
+
+ /*
+ * If we lazily map the VPEs, this isn't an error and
+ * we can exit cleanly.
+ */
+ cpu = cpumask_first(mask_val);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ return IRQ_SET_MASK_OK_DONE;
+ }
/*
* Changing affinity is mega expensive, so let's be as lazy as
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 3a24d6b5f559..396a067a8a56 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -718,7 +718,7 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
if (ret < 0) {
if (ops->msi_free) {
- for (i--; i > 0; i--)
+ for (i--; i >= 0; i--)
ops->msi_free(domain, info, virq + i);
}
irq_domain_free_irqs_top(domain, virq, nr_irqs);
The pull request you sent on Sun, 3 Nov 2024 11:30:58 +0100 (CET): > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-urgent-2024-11-03 has been merged into torvalds/linux.git: https://git.kernel.org/torvalds/c/8f0b844adc096feee437c6271a1419ee81383fc6 Thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/prtracker.html
Linus,
please pull the latest x86/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86-urgent-2024-11-03
up to: fce9642c765a: x86/amd_nb: Fix compile-testing without CONFIG_AMD_NB
A trivial compile test fix for x86
When CONFIG_AMD_NB is not set a COMPILE_TEST of an AMD specific driver
fails due to a missing inline stub. Add the stub to cure it.
Thanks,
tglx
------------------>
Arnd Bergmann (1):
x86/amd_nb: Fix compile-testing without CONFIG_AMD_NB
arch/x86/include/asm/amd_nb.h | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6f3b6aef47ba..d0caac26533f 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -116,7 +116,10 @@ static inline bool amd_gart_present(void)
#define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false
-#define node_to_amd_nb(x) NULL
+static inline struct amd_northbridge *node_to_amd_nb(int node)
+{
+ return NULL;
+}
#define amd_gart_present(x) false
#endif
The pull request you sent on Sun, 3 Nov 2024 11:31:05 +0100 (CET): > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86-urgent-2024-11-03 has been merged into torvalds/linux.git: https://git.kernel.org/torvalds/c/b9021de3ec2f39074aae92ed69c3823e30cd8cdb Thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/prtracker.html
Linus,
please pull the latest perf/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf-urgent-2024-11-03
up to: e3dfd64c1f34: perf: Fix missing RCU reader protection in perf_event_clear_cpumask()
A single fix for perf:
perf_event_clear_cpumask() uses list_for_each_entry_rcu() without being
in a RCU read side critical section, which triggers a "suspicious RCU
usage" warning.
It turns out that the list walk does not be RCU protected because the
write side lock is held in this contxt.
Change it to a regular list walk.
Thanks,
tglx
------------------>
Kan Liang (1):
perf: Fix missing RCU reader protection in perf_event_clear_cpumask()
kernel/events/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cdd09769e6c5..df27d08a7232 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -13959,7 +13959,7 @@ static void perf_event_clear_cpumask(unsigned int cpu)
}
/* migrate */
- list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
+ list_for_each_entry(pmu, &pmus, entry) {
if (pmu->scope == PERF_PMU_SCOPE_NONE ||
WARN_ON_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE))
continue;
The pull request you sent on Sun, 3 Nov 2024 11:31:00 +0100 (CET): > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf-urgent-2024-11-03 has been merged into torvalds/linux.git: https://git.kernel.org/torvalds/c/68f05b251b7156b10a6f6547f7f8672ffb94100f Thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/prtracker.html
Linus,
please pull the latest timers/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-urgent-2024-11-03
up to: b5413156bad9: posix-cpu-timers: Clear TICK_DEP_BIT_POSIX_TIMER on clone
A single fix for posix CPU timers
When a thread is cloned, the posix CPU timers are not inherited.
If the parent has a CPU timer armed the corresponding tick dependency in
the tasks tick_dep_mask is set and copied to the new thread, which means
the new thread and all decendants will prevent the system to go into full
NOHZ operation.
Clear the tick dependency mask in copy_process() to fix this.
Thanks,
tglx
------------------>
Benjamin Segall (1):
posix-cpu-timers: Clear TICK_DEP_BIT_POSIX_TIMER on clone
include/linux/tick.h | 8 ++++++++
kernel/fork.c | 2 ++
2 files changed, 10 insertions(+)
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 72744638c5b0..99c9c5a7252a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -251,12 +251,19 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
if (tick_nohz_full_enabled())
tick_nohz_dep_set_task(tsk, bit);
}
+
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit)
{
if (tick_nohz_full_enabled())
tick_nohz_dep_clear_task(tsk, bit);
}
+
+static inline void tick_dep_init_task(struct task_struct *tsk)
+{
+ atomic_set(&tsk->tick_dep_mask, 0);
+}
+
static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
@@ -290,6 +297,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
+static inline void tick_dep_init_task(struct task_struct *tsk) { }
static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_signal(struct signal_struct *signal,
diff --git a/kernel/fork.c b/kernel/fork.c
index 89ceb4a68af2..6fa9fe62e01e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -105,6 +105,7 @@
#include <linux/rseq.h>
#include <uapi/linux/pidfd.h>
#include <linux/pidfs.h>
+#include <linux/tick.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
@@ -2292,6 +2293,7 @@ __latent_entropy struct task_struct *copy_process(
acct_clear_integrals(p);
posix_cputimers_init(&p->posix_cputimers);
+ tick_dep_init_task(p);
p->io_context = NULL;
audit_set_context(p, NULL);
The pull request you sent on Sun, 3 Nov 2024 11:31:03 +0100 (CET): > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-urgent-2024-11-03 has been merged into torvalds/linux.git: https://git.kernel.org/torvalds/c/b019b4a6706f3ee133d68a29ae92cc6695e86d6e Thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/prtracker.html
Linus,
please pull the latest sched/urgent branch from:
git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched-urgent-2024-11-03
up to: 69d5e722be94: sched/ext: Fix scx vs sched_delayed
A few scheduler fixes:
- Plug a race between pick_next_task_fair() and try_to_wake_up() where
both try to write to the same task, even though both paths hold a
runqueue lock, but obviously from different runqueues.
The problem is that the store to task::on_rq in __block_task() is
visible to try_to_wake_up() which assumes that the task is not queued.
Both sides then operate on the same task.
Cure it by rearranging __block_task() so the the store to task::on_rq is
the last operation on the task.
- Prevent a potential NULL pointer dereference in task_numa_work()
task_numa_work() iterates the VMAs of a process. A concurrent unmap of
the address space can result in a NULL pointer return from vma_next()
which is unchecked.
Add the missing NULL pointer check to prevent this.
- Operate on the correct scheduler policy in task_should_scx()
task_should_scx() returns true when a task should be handled by sched
EXT. It checks the tasks scheduling policy.
This fails when the check is done before a policy has been set.
Cure it by handing the policy into task_should_scx() so it operates
on the requested value.
- Add the missing handling of sched EXT in the delayed dequeue
mechanism. This was simply forgotten.
Thanks,
tglx
------------------>
Aboorva Devarajan (1):
sched: Pass correct scheduling policy to __setscheduler_class
Peter Zijlstra (2):
sched: Fix pick_next_task_fair() vs try_to_wake_up() race
sched/ext: Fix scx vs sched_delayed
Shawn Wang (1):
sched/numa: Fix the potential null pointer dereference in task_numa_work()
kernel/sched/core.c | 8 ++++----
kernel/sched/ext.c | 18 ++++++++++++++----
kernel/sched/ext.h | 2 +-
kernel/sched/fair.c | 25 ++++++++++++++++---------
kernel/sched/sched.h | 36 +++++++++++++++++++++++++++++++++---
kernel/sched/syscalls.c | 2 +-
6 files changed, 69 insertions(+), 22 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dbfb5717d6af..719e0ed1e976 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4711,7 +4711,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
#ifdef CONFIG_SCHED_CLASS_EXT
- } else if (task_should_scx(p)) {
+ } else if (task_should_scx(p->policy)) {
p->sched_class = &ext_sched_class;
#endif
} else {
@@ -7025,7 +7025,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
-const struct sched_class *__setscheduler_class(struct task_struct *p, int prio)
+const struct sched_class *__setscheduler_class(int policy, int prio)
{
if (dl_prio(prio))
return &dl_sched_class;
@@ -7034,7 +7034,7 @@ const struct sched_class *__setscheduler_class(struct task_struct *p, int prio)
return &rt_sched_class;
#ifdef CONFIG_SCHED_CLASS_EXT
- if (task_should_scx(p))
+ if (task_should_scx(policy))
return &ext_sched_class;
#endif
@@ -7142,7 +7142,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
queue_flag &= ~DEQUEUE_MOVE;
prev_class = p->sched_class;
- next_class = __setscheduler_class(p, prio);
+ next_class = __setscheduler_class(p->policy, prio);
if (prev_class != next_class && p->se.sched_delayed)
dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 5900b06fd036..721a75489ff5 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4256,14 +4256,14 @@ static const struct kset_uevent_ops scx_uevent_ops = {
* Used by sched_fork() and __setscheduler_prio() to pick the matching
* sched_class. dl/rt are already handled.
*/
-bool task_should_scx(struct task_struct *p)
+bool task_should_scx(int policy)
{
if (!scx_enabled() ||
unlikely(scx_ops_enable_state() == SCX_OPS_DISABLING))
return false;
if (READ_ONCE(scx_switching_all))
return true;
- return p->policy == SCHED_EXT;
+ return policy == SCHED_EXT;
}
/**
@@ -4489,11 +4489,16 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
scx_task_iter_start(&sti);
while ((p = scx_task_iter_next_locked(&sti))) {
const struct sched_class *old_class = p->sched_class;
+ const struct sched_class *new_class =
+ __setscheduler_class(p->policy, p->prio);
struct sched_enq_and_set_ctx ctx;
+ if (old_class != new_class && p->se.sched_delayed)
+ dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+
sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
- p->sched_class = __setscheduler_class(p, p->prio);
+ p->sched_class = new_class;
check_class_changing(task_rq(p), p, old_class);
sched_enq_and_set_task(&ctx);
@@ -5199,12 +5204,17 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
scx_task_iter_start(&sti);
while ((p = scx_task_iter_next_locked(&sti))) {
const struct sched_class *old_class = p->sched_class;
+ const struct sched_class *new_class =
+ __setscheduler_class(p->policy, p->prio);
struct sched_enq_and_set_ctx ctx;
+ if (old_class != new_class && p->se.sched_delayed)
+ dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
+
sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
p->scx.slice = SCX_SLICE_DFL;
- p->sched_class = __setscheduler_class(p, p->prio);
+ p->sched_class = new_class;
check_class_changing(task_rq(p), p, old_class);
sched_enq_and_set_task(&ctx);
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index 246019519231..b1675bb59fc4 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -18,7 +18,7 @@ bool scx_can_stop_tick(struct rq *rq);
void scx_rq_activate(struct rq *rq);
void scx_rq_deactivate(struct rq *rq);
int scx_check_setscheduler(struct task_struct *p, int policy);
-bool task_should_scx(struct task_struct *p);
+bool task_should_scx(int policy);
void init_sched_ext_class(void);
static inline u32 scx_cpuperf_target(s32 cpu)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c157d4860a3b..2d16c8545c71 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3369,7 +3369,7 @@ static void task_numa_work(struct callback_head *work)
vma = vma_next(&vmi);
}
- do {
+ for (; vma; vma = vma_next(&vmi)) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_UNSUITABLE);
@@ -3491,7 +3491,7 @@ static void task_numa_work(struct callback_head *work)
*/
if (vma_pids_forced)
break;
- } for_each_vma(vmi, vma);
+ }
/*
* If no VMAs are remaining and VMAs were skipped due to the PID
@@ -5625,8 +5625,9 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
struct sched_entity *se = pick_eevdf(cfs_rq);
if (se->sched_delayed) {
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
- SCHED_WARN_ON(se->sched_delayed);
- SCHED_WARN_ON(se->on_rq);
+ /*
+ * Must not reference @se again, see __block_task().
+ */
return NULL;
}
return se;
@@ -7176,7 +7177,11 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
/* Fix-up what dequeue_task_fair() skipped */
hrtick_update(rq);
- /* Fix-up what block_task() skipped. */
+ /*
+ * Fix-up what block_task() skipped.
+ *
+ * Must be last, @p might not be valid after this.
+ */
__block_task(rq, p);
}
@@ -7193,12 +7198,14 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE))))
util_est_dequeue(&rq->cfs, p);
- if (dequeue_entities(rq, &p->se, flags) < 0) {
- util_est_update(&rq->cfs, p, DEQUEUE_SLEEP);
+ util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
+ if (dequeue_entities(rq, &p->se, flags) < 0)
return false;
- }
- util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
+ /*
+ * Must not reference @p after dequeue_entities(DEQUEUE_DELAYED).
+ */
+
hrtick_update(rq);
return true;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 081519ffab46..6c54a57275cc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2769,8 +2769,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
static inline void __block_task(struct rq *rq, struct task_struct *p)
{
- WRITE_ONCE(p->on_rq, 0);
- ASSERT_EXCLUSIVE_WRITER(p->on_rq);
if (p->sched_contributes_to_load)
rq->nr_uninterruptible++;
@@ -2778,6 +2776,38 @@ static inline void __block_task(struct rq *rq, struct task_struct *p)
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
+
+ ASSERT_EXCLUSIVE_WRITER(p->on_rq);
+
+ /*
+ * The moment this write goes through, ttwu() can swoop in and migrate
+ * this task, rendering our rq->__lock ineffective.
+ *
+ * __schedule() try_to_wake_up()
+ * LOCK rq->__lock LOCK p->pi_lock
+ * pick_next_task()
+ * pick_next_task_fair()
+ * pick_next_entity()
+ * dequeue_entities()
+ * __block_task()
+ * RELEASE p->on_rq = 0 if (p->on_rq && ...)
+ * break;
+ *
+ * ACQUIRE (after ctrl-dep)
+ *
+ * cpu = select_task_rq();
+ * set_task_cpu(p, cpu);
+ * ttwu_queue()
+ * ttwu_do_activate()
+ * LOCK rq->__lock
+ * activate_task()
+ * STORE p->on_rq = 1
+ * UNLOCK rq->__lock
+ *
+ * Callers must ensure to not reference @p after this -- we no longer
+ * own it.
+ */
+ smp_store_release(&p->on_rq, 0);
}
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
@@ -3800,7 +3830,7 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);
extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
-extern const struct sched_class *__setscheduler_class(struct task_struct *p, int prio);
+extern const struct sched_class *__setscheduler_class(int policy, int prio);
extern void set_load_weight(struct task_struct *p, bool update_load);
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 0470bcc3d204..24f9f90b6574 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -707,7 +707,7 @@ int __sched_setscheduler(struct task_struct *p,
}
prev_class = p->sched_class;
- next_class = __setscheduler_class(p, newprio);
+ next_class = __setscheduler_class(policy, newprio);
if (prev_class != next_class && p->se.sched_delayed)
dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
The pull request you sent on Sun, 3 Nov 2024 11:31:02 +0100 (CET): > git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched-urgent-2024-11-03 has been merged into torvalds/linux.git: https://git.kernel.org/torvalds/c/33e83ffe4c57132c73b7d3fb7919006c5296c496 Thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/prtracker.html
© 2016 - 2024 Red Hat, Inc.