This patch optimizes the enqueue and dequeue of rt_se, the strategy employs
a bottom-up removal approach. Specifically, when removing an rt_se at a
certain level, if it is determined that the highest priority of the rq
associated with that rt_se has not changed, there is no need to continue
removing rt_se at higher levels. At this point, only the total number
of removed rt_se needs to be recorded, and the rt_nr_running count of
higher-level rq should be removed accordingly.
Signed-off-by: Xavier <xavier_qy@163.com>
---
kernel/sched/debug.c | 50 ++++++++
kernel/sched/rt.c | 278 ++++++++++++++++++++++++++++++++++++-------
kernel/sched/sched.h | 1 +
3 files changed, 289 insertions(+), 40 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c1eb9a1afd13..282153397e02 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -712,6 +712,56 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#endif
}
+static void print_rt_se(struct seq_file *m, struct sched_rt_entity *rt_se)
+{
+ struct task_struct *task;
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ if (rt_se->my_q) {
+ SEQ_printf_task_group_path(m, rt_se->my_q->tg, "%s\n");
+ return;
+ }
+#endif
+ task = container_of(rt_se, struct task_struct, rt);
+ SEQ_printf(m, " prio-%d, pid-%d, %s\n", task->prio, task->pid, task->comm);
+}
+
+/*shall be called in rq lock*/
+void print_rt_rq_task(struct seq_file *m, struct rt_rq *rt_rq)
+{
+ struct rt_prio_array *array = &rt_rq->active;
+ struct sched_rt_entity *rt_se;
+ struct list_head *queue, *head;
+ unsigned long bitmap[2];
+ int idx;
+ int count = 0;
+
+ if (!rt_rq->rt_nr_running)
+ return;
+
+ memcpy(bitmap, array->bitmap, sizeof(unsigned long) * 2);
+ idx = sched_find_first_bit(bitmap);
+ WARN_ON_ONCE(idx >= MAX_RT_PRIO);
+
+ while (1) {
+ clear_bit(idx, bitmap);
+ queue = array->queue + idx;
+ head = queue;
+ queue = queue->next;
+ do {
+ rt_se = list_entry(queue, struct sched_rt_entity, run_list);
+ print_rt_se(m, rt_se);
+ queue = queue->next;
+ count++;
+ } while (queue != head);
+ idx = sched_find_first_bit(bitmap);
+ if (idx >= MAX_RT_PRIO)
+ break;
+ }
+
+ WARN_ON_ONCE(count != rt_rq->rt_nr_running);
+}
+
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
{
#ifdef CONFIG_RT_GROUP_SCHED
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index aa4c1c874fa4..0673bce0c145 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1113,7 +1113,7 @@ void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-static void
+static int
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
@@ -1122,9 +1122,11 @@ inc_rt_prio(struct rt_rq *rt_rq, int prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
+
+ return prev_prio > prio;
}
-static void
+static int
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
@@ -1149,12 +1151,22 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio)
}
dec_rt_prio_smp(rt_rq, prio, prev_prio);
+ if (rt_rq->highest_prio.curr > prio)
+ return prio;
+ else
+ return 0;
}
#else
-static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
-static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
+static inline int inc_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+ return 0;
+}
+static inline int dec_rt_prio(struct rt_rq *rt_rq, int prio)
+{
+ return 0;
+}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
@@ -1218,28 +1230,31 @@ unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
}
static inline
-void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+int inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
+ int prio_change;
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
- inc_rt_prio(rt_rq, prio);
+ prio_change = inc_rt_prio(rt_rq, prio);
inc_rt_group(rt_se, rt_rq);
+ return prio_change;
}
static inline
-void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+int dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq, int prio)
{
+ int prio_changed;
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
- WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
- dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+ prio_changed = dec_rt_prio(rt_rq, prio);
dec_rt_group(rt_se, rt_rq);
+ return prio_changed;
}
/*
@@ -1255,12 +1270,13 @@ static inline bool move_entity(unsigned int flags)
return true;
}
-static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
+static void __delist_rt_entity(struct sched_rt_entity *rt_se,
+ struct rt_prio_array *array, int last_prio)
{
list_del_init(&rt_se->run_list);
- if (list_empty(array->queue + rt_se_prio(rt_se)))
- __clear_bit(rt_se_prio(rt_se), array->bitmap);
+ if (list_empty(array->queue + last_prio))
+ __clear_bit(last_prio, array->bitmap);
rt_se->on_list = 0;
}
@@ -1371,7 +1387,12 @@ update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
}
}
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
+/*
+ * Returns: -1 indicates that rt_se was not enqueued, 0 indicates that the highest
+ * priority of the rq did not change after enqueue, and 1 indicates that the highest
+ * priority of the rq changed after enqueue.
+ */
+static int __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
@@ -1386,8 +1407,8 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flag
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
- __delist_rt_entity(rt_se, array);
- return;
+ __delist_rt_entity(rt_se, array, rt_se_prio(rt_se));
+ return -1;
}
if (move_entity(flags)) {
@@ -1402,73 +1423,250 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flag
}
rt_se->on_rq = 1;
- inc_rt_tasks(rt_se, rt_rq);
+ return inc_rt_tasks(rt_se, rt_rq);
}
-static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
+/**
+ * delete rt_se from rt_rq
+ *
+ * @rt_se Nodes to be deleted
+ * @last_prio The highest priority of this rt_se before the previous round
+ * of deletion
+ * @flags operation flags
+ *
+ * Returns: =0 indicates that the highest priority of the current rq did not
+ * change during this deletion. >0 indicates it changed, and it returns the
+ * previous highest priority to use in the next round of deletion.
+ */
+static int __dequeue_rt_entity(struct sched_rt_entity *rt_se, int last_prio,
+ unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
if (move_entity(flags)) {
WARN_ON_ONCE(!rt_se->on_list);
- __delist_rt_entity(rt_se, array);
+ __delist_rt_entity(rt_se, array, last_prio);
}
rt_se->on_rq = 0;
- dec_rt_tasks(rt_se, rt_rq);
+ return dec_rt_tasks(rt_se, rt_rq, last_prio);
+}
+
+static inline void dec_rq_nr_running(struct sched_rt_entity *rt_se,
+ unsigned int rt, unsigned int rr)
+{
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+
+ rt_rq->rt_nr_running -= rt;
+ rt_rq->rr_nr_running -= rr;
+}
+
+static inline void add_rq_nr_running(struct sched_rt_entity *rt_se,
+ unsigned int rt, unsigned int rr)
+{
+ struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
+
+ rt_rq->rt_nr_running += rt;
+ rt_rq->rr_nr_running += rr;
+}
+
+static inline bool on_top_rt_rq(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_RT_GROUP_SCHED
+ if (rt_se->parent)
+ return false;
+#endif
+ return true;
}
/*
- * Because the prio of an upper entry depends on the lower
- * entries, we must remove entries top - down.
+ * To optimize the enqueue and dequeue of rt_se, this strategy employs a
+ * bottom-up removal approach. Specifically, when removing an rt_se at a
+ * certain level, if it is determined that the highest priority of the rq
+ * associated with that rt_se has not changed, there is no need to continue
+ * removing rt_se at higher levels. At this point, only the total number
+ * of removed rt_se needs to be recorded, and the rt_nr_running count of
+ * higher-level rq should be removed accordingly.
+ *
+ * For enqueue operations, if an rt_se at a certain level is in the rq,
+ * it is still necessary to check the priority of the higher-level rq.
+ * If the priority of the higher-level rq is found to be lower than that
+ * of the rt_se to be added, it should be removed, as updating the highest
+ * priority of the rq during addition will cause the rq to be repositioned
+ * in the parent rq.
+ *
+ * Conversely, for dequeue operations, if an rt_se at a certain level is
+ * not in the rq, the operation can be exited immediately to reduce
+ * unnecessary checks and handling.
+ *
+ * The return value refers to the last rt_se that was removed for enqueue
+ * operations. And for dequeue operations, it refers to the last rt_se
+ * that was either removed or had its rt_nr_running updated.
*/
-static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
+static struct sched_rt_entity *dequeue_rt_stack(struct sched_rt_entity *rt_se,
+ unsigned int flags, int for_enqueue)
{
- struct sched_rt_entity *back = NULL;
- unsigned int rt_nr_running;
+ struct sched_rt_entity *last = rt_se;
+ struct sched_rt_entity *origin = rt_se;
+ unsigned int del_rt_nr = 0;
+ unsigned int del_rr_nr = 0;
+ int prio_changed = rt_se_prio(rt_se);
+ int sub_on_rq = 1;
for_each_sched_rt_entity(rt_se) {
- rt_se->back = back;
- back = rt_se;
- }
-
- rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
+ if (on_rt_rq(rt_se)) {
+ if (sub_on_rq) {
+ /*
+ * The number of tasks removed from the sub-level rt_se also needs
+ * to be subtracted from the rq of the current rt_se, as the current
+ * rt_se's rq no longer includes the number of removed tasks.
+ */
+ dec_rq_nr_running(rt_se, del_rt_nr, del_rr_nr);
+
+ if (prio_changed) {
+ /*
+ * If the removal of the lower-level rt_se causes the
+ * highest priority of the current rq to change, then the
+ * current rt_se also needs to be removed from its parent
+ * rq, and the number of deleted tasks should be
+ * accumulated.
+ */
+ del_rt_nr += rt_se_nr_running(rt_se);
+ del_rr_nr += rt_se_rr_nr_running(rt_se);
+ prio_changed = __dequeue_rt_entity(rt_se,
+ prio_changed, flags);
+ last = rt_se;
+ } else if (!for_enqueue) {
+ /* For dequeue, last may only rt_nr_running was modified.*/
+ last = rt_se;
+ }
+ } else {
+ /*
+ * Entering this branch must be for enqueue, as dequeue would break
+ * if an rt_se is not online.
+ * If the sub-level node is not online, and the current rt_se's
+ * priority is lower than the one being added, current rt_se need
+ * to be removed.
+ */
+ prio_changed = rt_se_prio(rt_se);
+ if (prio_changed > rt_se_prio(origin)) {
+ del_rt_nr += rt_se_nr_running(rt_se);
+ del_rr_nr += rt_se_rr_nr_running(rt_se);
+ prio_changed = __dequeue_rt_entity(rt_se,
+ prio_changed, flags);
+ last = rt_se;
+ } else {
+ prio_changed = 0;
+ }
+ }
- for (rt_se = back; rt_se; rt_se = rt_se->back) {
- if (on_rt_rq(rt_se))
- __dequeue_rt_entity(rt_se, flags);
+ /*
+ * If the current rt_se is on the top rt_rq, then the already deleted
+ * nodes, plus the count of the rt_rq where current rt_se located,
+ * need to be removed from the top_rt_rq.
+ */
+ if (on_top_rt_rq(rt_se)) {
+ dequeue_top_rt_rq(rt_rq_of_se(rt_se),
+ del_rt_nr + rt_rq_of_se(rt_se)->rt_nr_running);
+ }
+ sub_on_rq = 1;
+ } else if (for_enqueue) {
+ /*
+ * In the case of an enqueue operation, if a certain level is found to be
+ * not online, then the previous counts need to be reset to zero.
+ */
+ prio_changed = 0;
+ sub_on_rq = 0;
+ del_rt_nr = 0;
+ del_rr_nr = 0;
+
+ if (on_top_rt_rq(rt_se))
+ dequeue_top_rt_rq(rt_rq_of_se(rt_se),
+ rt_rq_of_se(rt_se)->rt_nr_running);
+ } else {
+ last = rt_se;
+ break;
+ }
}
- dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
+ return last;
}
+
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
+ struct sched_rt_entity *last;
+ unsigned int add_rt_nr = 0;
+ unsigned int add_rr_nr = 0;
+ int enqueue = 1;
+ int prio_change = 1;
update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
- dequeue_rt_stack(rt_se, flags);
- for_each_sched_rt_entity(rt_se)
- __enqueue_rt_entity(rt_se, flags);
+ last = dequeue_rt_stack(rt_se, flags, 1);
+ for_each_sched_rt_entity(rt_se) {
+ if (enqueue || !on_rt_rq(rt_se) || (prio_change == 1)) {
+ prio_change = __enqueue_rt_entity(rt_se, flags);
+ if (prio_change >= 0) {
+ add_rt_nr = rt_se_nr_running(rt_se);
+ add_rr_nr = rt_se_rr_nr_running(rt_se);
+ } else {
+ add_rt_nr = add_rr_nr = 0;
+ }
+ } else {
+ add_rq_nr_running(rt_se, add_rt_nr, add_rr_nr);
+ }
+
+ if (rt_se == last)
+ enqueue = 0;
+ }
+
enqueue_top_rt_rq(&rq->rt);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
+ struct sched_rt_entity *last;
+ unsigned int add_rt_nr = 0;
+ unsigned int add_rr_nr = 0;
+ int prio_change = 1;
update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
- dequeue_rt_stack(rt_se, flags);
-
+ last = dequeue_rt_stack(rt_se, flags, 0);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
+ if (rt_rq && rt_rq->rt_nr_running) {
+ if (on_rt_rq(rt_se)) {
+ add_rq_nr_running(rt_se, add_rt_nr, add_rr_nr);
+ } else {
+ prio_change = __enqueue_rt_entity(rt_se, flags);
+ if (prio_change == 0) {
+ /*
+ * If enqueue is successful and the priority of the rq has
+ * not changed, then the parent node only needs to add the
+ * count of the current rt_se. Otherwise, the parent node
+ * will also need to enqueue.
+ */
+ add_rt_nr = rt_se_nr_running(rt_se);
+ add_rr_nr = rt_se_rr_nr_running(rt_se);
+ }
+ }
+ } else {
+ add_rt_nr = add_rr_nr = 0;
+ }
- if (rt_rq && rt_rq->rt_nr_running)
- __enqueue_rt_entity(rt_se, flags);
+ /*
+ * last is the rt_se of the last deletion or modification of the
+ * count, so the subsequent rt_se does not need to be updated.
+ */
+ if (rt_se == last)
+ break;
}
+
enqueue_top_rt_rq(&rq->rt);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a831af102070..b634153aacf0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2878,6 +2878,7 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
extern void print_dl_stats(struct seq_file *m, int cpu);
extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
+extern void print_rt_rq_task(struct seq_file *m, struct rt_rq *rt_rq);
extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
extern void resched_latency_warn(int cpu, u64 latency);
--
2.45.2
Hello,
kernel test robot noticed "WARNING:at_kernel/sched/rt.c:#__enqueue_rt_entity" on:
commit: ed0ed14c2b47993c00c4b3cdceabef535bcef32b ("[PATCH-RT sched v2 1/2] RT SCHED: Optimize the enqueue and dequeue operations for rt_se")
url: https://github.com/intel-lab-lkp/linux/commits/Xavier/RT-SCHED-Optimize-the-enqueue-and-dequeue-operations-for-rt_se/20240630-173825
base: https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git c793a62823d1ce8f70d9cfc7803e3ea436277cda
patch link: https://lore.kernel.org/all/20240629112812.243691-2-xavier_qy@163.com/
patch subject: [PATCH-RT sched v2 1/2] RT SCHED: Optimize the enqueue and dequeue operations for rt_se
in testcase: blktests
version: blktests-x86_64-775a058-1_20240702
with following parameters:
disk: 1SSD
test: block-group-01
compiler: gcc-13
test machine: 4 threads Intel(R) Core(TM) i5-6500 CPU @ 3.20GHz (Skylake) with 32G memory
(please refer to attached dmesg/kmsg for entire log/backtrace)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Closes: https://lore.kernel.org/oe-lkp/202407041644.de55c25-oliver.sang@intel.com
[ 54.093440][ C2] ------------[ cut here ]------------
[ 54.094193][ T705] list_add double add: new=ffff888802a8abc0, prev=ffff888802a8abc0, next=ffff8887892c4dd0.
[ 54.098261][ C2] WARNING: CPU: 2 PID: 53 at kernel/sched/rt.c:1415 __enqueue_rt_entity (kernel/sched/rt.c:1415 (discriminator 1))
[ 54.103613][ T705] ------------[ cut here ]------------
[ 54.113477][ C2] Modules linked in: dm_multipath
[ 54.122743][ T705] kernel BUG at lib/list_debug.c:35!
[ 54.128080][ C2] btrfs blake2b_generic
[ 54.132987][ T705] Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI
[ 54.138148][ C2] xor zstd_compress
[ 54.142266][ T705] CPU: 3 PID: 705 Comm: multipathd Tainted: G S 6.10.0-rc1-00010-ged0ed14c2b47 #1
[ 54.149087][ C2] raid6_pq libcrc32c
[ 54.152852][ T705] Hardware name: Dell Inc. OptiPlex 7040/0Y7WYT, BIOS 1.8.1 12/05/2017
[ 54.163339][ C2] ipmi_devintf ipmi_msghandler
[ 54.167192][ T705] RIP: 0010:__list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.175322][ C2] intel_rapl_msr intel_rapl_common
[ 54.180049][ T705] Code: 0b 48 89 f1 48 c7 c7 00 fa 26 84 48 89 de e8 d6 75 f2 fe 0f 0b 48 89 f2 48 89 d9 48 89 ee 48 c7 c7 80 fa 26 84 e8 bf 75 f2 fe <0f> 0b 48 89 f7 48 89 34 24 e8 11 cc 61 ff 48 8b 34 24 e9 71 ff ff
All code
========
0: 0b 48 89 or -0x77(%rax),%ecx
3: f1 icebp
4: 48 c7 c7 00 fa 26 84 mov $0xffffffff8426fa00,%rdi
b: 48 89 de mov %rbx,%rsi
e: e8 d6 75 f2 fe callq 0xfffffffffef275e9
13: 0f 0b ud2
15: 48 89 f2 mov %rsi,%rdx
18: 48 89 d9 mov %rbx,%rcx
1b: 48 89 ee mov %rbp,%rsi
1e: 48 c7 c7 80 fa 26 84 mov $0xffffffff8426fa80,%rdi
25: e8 bf 75 f2 fe callq 0xfffffffffef275e9
2a:* 0f 0b ud2 <-- trapping instruction
2c: 48 89 f7 mov %rsi,%rdi
2f: 48 89 34 24 mov %rsi,(%rsp)
33: e8 11 cc 61 ff callq 0xffffffffff61cc49
38: 48 8b 34 24 mov (%rsp),%rsi
3c: e9 .byte 0xe9
3d: 71 ff jno 0x3e
3f: ff .byte 0xff
Code starting with the faulting instruction
===========================================
0: 0f 0b ud2
2: 48 89 f7 mov %rsi,%rdi
5: 48 89 34 24 mov %rsi,(%rsp)
9: e8 11 cc 61 ff callq 0xffffffffff61cc1f
e: 48 8b 34 24 mov (%rsp),%rsi
12: e9 .byte 0xe9
13: 71 ff jno 0x14
15: ff .byte 0xff
[ 54.186345][ C2] sd_mod t10_pi
[ 54.191424][ T705] RSP: 0018:ffffc90000327b38 EFLAGS: 00010046
[ 54.211022][ C2] x86_pkg_temp_thermal
[ 54.214447][ T705]
[ 54.220405][ C2] crc64_rocksoft_generic crc64_rocksoft
[ 54.224435][ T705] RAX: 0000000000000058 RBX: ffff8887892c4dd0 RCX: ffffffff82424f4e
[ 54.226632][ C2] intel_powerclamp crc64
[ 54.232145][ T705] RDX: 0000000000000000 RSI: 0000000000000008 RDI: ffff8887893b5380
[ 54.240012][ C2] coretemp sg
[ 54.244217][ T705] RBP: ffff888802a8abc0 R08: 0000000000000001 R09: fffff52000064f22
[ 54.252087][ C2] kvm_intel i915
[ 54.255330][ T705] R10: ffffc90000327917 R11: 205d324320202020 R12: ffff888802a8abc0
[ 54.263200][ C2] kvm crct10dif_pclmul
[ 54.266705][ T705] R13: ffff8887892c4dd0 R14: ffff888802a8ac00 R15: ffff8887892c4dd8
[ 54.274572][ C2] crc32_pclmul crc32c_intel
[ 54.278599][ T705] FS: 00007f1b015ee680(0000) GS:ffff888789380000(0000) knlGS:0000000000000000
[ 54.286469][ C2] drm_buddy ghash_clmulni_intel
[ 54.290934][ T705] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 54.299764][ C2] intel_gtt sha512_ssse3
[ 54.304580][ T705] CR2: 000055e6a99e25f8 CR3: 000000080473e006 CR4: 00000000003706f0
[ 54.311054][ C2] drm_display_helper
[ 54.315255][ T705] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 54.323124][ C2] rapl ttm
[ 54.326976][ T705] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 54.334845][ C2] drm_kms_helper
[ 54.337825][ T705] Call Trace:
[ 54.345696][ C2] ahci mei_wdt
[ 54.349201][ T705] <TASK>
[ 54.352357][ C2] intel_cstate wmi_bmof
[ 54.355687][ T705] ? die (arch/x86/kernel/dumpstack.c:421 arch/x86/kernel/dumpstack.c:434 arch/x86/kernel/dumpstack.c:447)
[ 54.358493][ C2] intel_uncore
[ 54.362610][ T705] ? do_trap (arch/x86/kernel/traps.c:114 arch/x86/kernel/traps.c:155)
[ 54.366202][ C2] binfmt_misc video
[ 54.369533][ T705] ? __list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.373650][ C2] libahci mei_me
[ 54.377418][ T705] ? do_error_trap (arch/x86/include/asm/traps.h:58 arch/x86/kernel/traps.c:176)
[ 54.383104][ C2] i2c_i801 wmi
[ 54.386607][ T705] ? __list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.391070][ C2] intel_pch_thermal i2c_smbus
[ 54.394400][ T705] ? handle_invalid_op (arch/x86/kernel/traps.c:214)
[ 54.400087][ C2] mei libata
[ 54.404727][ T705] ? __list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.409540][ C2] acpi_pad fuse
[ 54.412697][ T705] ? exc_invalid_op (arch/x86/kernel/traps.c:267)
[ 54.418385][ C2] loop drm
[ 54.421803][ T705] ? asm_exc_invalid_op (arch/x86/include/asm/idtentry.h:621)
[ 54.426355][ C2] dm_mod ip_tables
[ 54.429337][ T705] ? llist_add_batch (lib/llist.c:33 (discriminator 14))
[ 54.434240][ C2]
[ 54.437928][ T705] ? __list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.442661][ C2] CPU: 2 PID: 53 Comm: khugepaged Tainted: G S 6.10.0-rc1-00010-ged0ed14c2b47 #1
[ 54.444859][ T705] ? __list_add_valid_or_report (lib/list_debug.c:35 (discriminator 1))
[ 54.450557][ C2] Hardware name: Dell Inc. OptiPlex 7040/0Y7WYT, BIOS 1.8.1 12/05/2017
[ 54.460974][ T705] __enqueue_rt_entity (include/linux/list.h:150 (discriminator 1) include/linux/list.h:183 (discriminator 1) kernel/sched/rt.c:1419 (discriminator 1))
[ 54.466661][ C2] RIP: 0010:__enqueue_rt_entity (kernel/sched/rt.c:1415 (discriminator 1))
[ 54.474792][ T705] enqueue_rt_entity (kernel/sched/rt.c:1616)
[ 54.479778][ C2] Code: fa 48 c1 ea 03 80 3c 02 00 0f 85 1f 03 00 00 49 8b bf 40 0a 00 00 44 89 ea 48 81 c7 b8 00 00 00 e8 15 72 05 00 e9 23 fa ff ff <0f> 0b e9 9b f6 ff ff 48 89 ee 48 89 df e8 8e d1 ff ff e9 f6 f5 ff
All code
========
0: fa cli
1: 48 c1 ea 03 shr $0x3,%rdx
5: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1)
9: 0f 85 1f 03 00 00 jne 0x32e
f: 49 8b bf 40 0a 00 00 mov 0xa40(%r15),%rdi
16: 44 89 ea mov %r13d,%edx
19: 48 81 c7 b8 00 00 00 add $0xb8,%rdi
20: e8 15 72 05 00 callq 0x5723a
25: e9 23 fa ff ff jmpq 0xfffffffffffffa4d
2a:* 0f 0b ud2 <-- trapping instruction
2c: e9 9b f6 ff ff jmpq 0xfffffffffffff6cc
31: 48 89 ee mov %rbp,%rsi
34: 48 89 df mov %rbx,%rdi
37: e8 8e d1 ff ff callq 0xffffffffffffd1ca
3c: e9 .byte 0xe9
3d: f6 f5 div %ch
3f: ff .byte 0xff
Code starting with the faulting instruction
===========================================
0: 0f 0b ud2
2: e9 9b f6 ff ff jmpq 0xfffffffffffff6a2
7: 48 89 ee mov %rbp,%rsi
a: 48 89 df mov %rbx,%rdi
d: e8 8e d1 ff ff callq 0xffffffffffffd1a0
12: e9 .byte 0xe9
13: f6 f5 div %ch
15: ff .byte 0xff
The kernel config and materials to reproduce are available at:
https://download.01.org/0day-ci/archive/20240704/202407041644.de55c25-oliver.sang@intel.com
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.