Keep track of the csg priority in panthor_group when the group is
scheduled/active.
This is useful to know the actual priority in use in the firmware
group slot.
Signed-off-by: Mary Guillemard <mary.guillemard@collabora.com>
---
drivers/gpu/drm/panthor/panthor_sched.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 86908ada7335..f15abeef4ece 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -574,6 +574,13 @@ struct panthor_group {
*/
int csg_id;
+ /**
+ * @csg_id: Priority of the FW group slot.
+ *
+ * -1 when the group is not scheduled/active.
+ */
+ int csg_priority;
+
/**
* @destroyed: True when the group has been destroyed.
*
@@ -894,11 +901,12 @@ group_get(struct panthor_group *group)
* group_bind_locked() - Bind a group to a group slot
* @group: Group.
* @csg_id: Slot.
+ * @csg_priority: Priority of the slot.
*
* Return: 0 on success, a negative error code otherwise.
*/
static int
-group_bind_locked(struct panthor_group *group, u32 csg_id)
+group_bind_locked(struct panthor_group *group, u32 csg_id, u32 csg_priority)
{
struct panthor_device *ptdev = group->ptdev;
struct panthor_csg_slot *csg_slot;
@@ -917,6 +925,7 @@ group_bind_locked(struct panthor_group *group, u32 csg_id)
csg_slot = &ptdev->scheduler->csg_slots[csg_id];
group_get(group);
group->csg_id = csg_id;
+ group->csg_priority = csg_priority;
/* Dummy doorbell allocation: doorbell is assigned to the group and
* all queues use the same doorbell.
@@ -956,6 +965,7 @@ group_unbind_locked(struct panthor_group *group)
slot = &ptdev->scheduler->csg_slots[group->csg_id];
panthor_vm_idle(group->vm);
group->csg_id = -1;
+ group->csg_priority = -1;
/* Tiler OOM events will be re-issued next time the group is scheduled. */
atomic_set(&group->tiler_oom, 0);
@@ -2193,8 +2203,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
csg_slot = &sched->csg_slots[csg_id];
- group_bind_locked(group, csg_id);
- csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
+ group_bind_locked(group, csg_id, new_csg_prio);
+ csg_slot_prog_locked(ptdev, csg_id, new_csg_prio);
+ new_csg_prio--;
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
group->state == PANTHOR_CS_GROUP_SUSPENDED ?
CSG_STATE_RESUME : CSG_STATE_START,
@@ -3111,6 +3122,7 @@ int panthor_group_create(struct panthor_file *pfile,
kref_init(&group->refcount);
group->state = PANTHOR_CS_GROUP_CREATED;
group->csg_id = -1;
+ group->csg_priority = -1;
group->ptdev = ptdev;
group->max_compute_cores = group_args->max_compute_cores;
--
2.46.0
On 18/09/2024 09:50, Mary Guillemard wrote: > Keep track of the csg priority in panthor_group when the group is > scheduled/active. > > This is useful to know the actual priority in use in the firmware > group slot. > > Signed-off-by: Mary Guillemard <mary.guillemard@collabora.com> > --- > drivers/gpu/drm/panthor/panthor_sched.c | 18 +++++++++++++++--- > 1 file changed, 15 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c > index 86908ada7335..f15abeef4ece 100644 > --- a/drivers/gpu/drm/panthor/panthor_sched.c > +++ b/drivers/gpu/drm/panthor/panthor_sched.c > @@ -574,6 +574,13 @@ struct panthor_group { > */ > int csg_id; > > + /** > + * @csg_id: Priority of the FW group slot. kerneldoc name is wrong: s/csg_id/csg_priority/ Otherwise this looks reasonable, but see my reply to the second patch. Thanks, Steve > + * > + * -1 when the group is not scheduled/active. > + */ > + int csg_priority; > + > /** > * @destroyed: True when the group has been destroyed. > * > @@ -894,11 +901,12 @@ group_get(struct panthor_group *group) > * group_bind_locked() - Bind a group to a group slot > * @group: Group. > * @csg_id: Slot. > + * @csg_priority: Priority of the slot. > * > * Return: 0 on success, a negative error code otherwise. > */ > static int > -group_bind_locked(struct panthor_group *group, u32 csg_id) > +group_bind_locked(struct panthor_group *group, u32 csg_id, u32 csg_priority) > { > struct panthor_device *ptdev = group->ptdev; > struct panthor_csg_slot *csg_slot; > @@ -917,6 +925,7 @@ group_bind_locked(struct panthor_group *group, u32 csg_id) > csg_slot = &ptdev->scheduler->csg_slots[csg_id]; > group_get(group); > group->csg_id = csg_id; > + group->csg_priority = csg_priority; > > /* Dummy doorbell allocation: doorbell is assigned to the group and > * all queues use the same doorbell. > @@ -956,6 +965,7 @@ group_unbind_locked(struct panthor_group *group) > slot = &ptdev->scheduler->csg_slots[group->csg_id]; > panthor_vm_idle(group->vm); > group->csg_id = -1; > + group->csg_priority = -1; > > /* Tiler OOM events will be re-issued next time the group is scheduled. */ > atomic_set(&group->tiler_oom, 0); > @@ -2193,8 +2203,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c > > csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); > csg_slot = &sched->csg_slots[csg_id]; > - group_bind_locked(group, csg_id); > - csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--); > + group_bind_locked(group, csg_id, new_csg_prio); > + csg_slot_prog_locked(ptdev, csg_id, new_csg_prio); > + new_csg_prio--; > csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, > group->state == PANTHOR_CS_GROUP_SUSPENDED ? > CSG_STATE_RESUME : CSG_STATE_START, > @@ -3111,6 +3122,7 @@ int panthor_group_create(struct panthor_file *pfile, > kref_init(&group->refcount); > group->state = PANTHOR_CS_GROUP_CREATED; > group->csg_id = -1; > + group->csg_priority = -1; > > group->ptdev = ptdev; > group->max_compute_cores = group_args->max_compute_cores;
Hi Mary, kernel test robot noticed the following build warnings: [auto build test WARNING on drm/drm-next] [also build test WARNING on drm-exynos/exynos-drm-next drm-intel/for-linux-next drm-intel/for-linux-next-fixes drm-misc/drm-misc-next drm-tip/drm-tip linus/master v6.11 next-20240920] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Mary-Guillemard/drm-panthor-Add-csg_priority-to-panthor_group/20240918-165602 base: git://anongit.freedesktop.org/drm/drm drm-next patch link: https://lore.kernel.org/r/20240918085056.24422-3-mary.guillemard%40collabora.com patch subject: [PATCH 1/2] drm/panthor: Add csg_priority to panthor_group config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20240922/202409220025.v4cCQjaI-lkp@intel.com/config) compiler: clang version 18.1.8 (https://github.com/llvm/llvm-project 3b5b5c1ec4a3095ab096dd780e84d7ab81f3d7ff) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240922/202409220025.v4cCQjaI-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202409220025.v4cCQjaI-lkp@intel.com/ All warnings (new ones prefixed by >>): drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'runnable' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'idle' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'waiting' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'has_ref' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'in_progress' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:319: warning: Excess struct member 'stopped_groups' description in 'panthor_scheduler' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'mem' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'input' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'output' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'input_fw_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'output_fw_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'gpu_va' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'ref' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'gt' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'sync64' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'bo' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'offset' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'kmap' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'lock' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'id' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'seqno' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'last_fence' description in 'panthor_queue' drivers/gpu/drm/panthor/panthor_sched.c:479: warning: Excess struct member 'in_flight_jobs' description in 'panthor_queue' >> drivers/gpu/drm/panthor/panthor_sched.c:669: warning: Function parameter or struct member 'csg_priority' not described in 'panthor_group' drivers/gpu/drm/panthor/panthor_sched.c:784: warning: Excess struct member 'start' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:784: warning: Excess struct member 'size' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:784: warning: Excess struct member 'latest_flush' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:784: warning: Excess struct member 'start' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:784: warning: Excess struct member 'end' description in 'panthor_job' drivers/gpu/drm/panthor/panthor_sched.c:1707: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_fw_events' drivers/gpu/drm/panthor/panthor_sched.c:1707: warning: Function parameter or struct member 'events' not described in 'panthor_sched_report_fw_events' drivers/gpu/drm/panthor/panthor_sched.c:2602: warning: Function parameter or struct member 'ptdev' not described in 'panthor_sched_report_mmu_fault' vim +669 drivers/gpu/drm/panthor/panthor_sched.c de85488138247d Boris Brezillon 2024-02-29 516 de85488138247d Boris Brezillon 2024-02-29 517 /** de85488138247d Boris Brezillon 2024-02-29 518 * struct panthor_group - Scheduling group object de85488138247d Boris Brezillon 2024-02-29 519 */ de85488138247d Boris Brezillon 2024-02-29 520 struct panthor_group { de85488138247d Boris Brezillon 2024-02-29 521 /** @refcount: Reference count */ de85488138247d Boris Brezillon 2024-02-29 522 struct kref refcount; de85488138247d Boris Brezillon 2024-02-29 523 de85488138247d Boris Brezillon 2024-02-29 524 /** @ptdev: Device. */ de85488138247d Boris Brezillon 2024-02-29 525 struct panthor_device *ptdev; de85488138247d Boris Brezillon 2024-02-29 526 de85488138247d Boris Brezillon 2024-02-29 527 /** @vm: VM bound to the group. */ de85488138247d Boris Brezillon 2024-02-29 528 struct panthor_vm *vm; de85488138247d Boris Brezillon 2024-02-29 529 de85488138247d Boris Brezillon 2024-02-29 530 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */ de85488138247d Boris Brezillon 2024-02-29 531 u64 compute_core_mask; de85488138247d Boris Brezillon 2024-02-29 532 de85488138247d Boris Brezillon 2024-02-29 533 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */ de85488138247d Boris Brezillon 2024-02-29 534 u64 fragment_core_mask; de85488138247d Boris Brezillon 2024-02-29 535 de85488138247d Boris Brezillon 2024-02-29 536 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */ de85488138247d Boris Brezillon 2024-02-29 537 u64 tiler_core_mask; de85488138247d Boris Brezillon 2024-02-29 538 de85488138247d Boris Brezillon 2024-02-29 539 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */ de85488138247d Boris Brezillon 2024-02-29 540 u8 max_compute_cores; de85488138247d Boris Brezillon 2024-02-29 541 be7ffc821f5fc2 Liviu Dudau 2024-04-02 542 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */ de85488138247d Boris Brezillon 2024-02-29 543 u8 max_fragment_cores; de85488138247d Boris Brezillon 2024-02-29 544 de85488138247d Boris Brezillon 2024-02-29 545 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */ de85488138247d Boris Brezillon 2024-02-29 546 u8 max_tiler_cores; de85488138247d Boris Brezillon 2024-02-29 547 de85488138247d Boris Brezillon 2024-02-29 548 /** @priority: Group priority (check panthor_csg_priority). */ de85488138247d Boris Brezillon 2024-02-29 549 u8 priority; de85488138247d Boris Brezillon 2024-02-29 550 de85488138247d Boris Brezillon 2024-02-29 551 /** @blocked_queues: Bitmask reflecting the blocked queues. */ de85488138247d Boris Brezillon 2024-02-29 552 u32 blocked_queues; de85488138247d Boris Brezillon 2024-02-29 553 de85488138247d Boris Brezillon 2024-02-29 554 /** @idle_queues: Bitmask reflecting the idle queues. */ de85488138247d Boris Brezillon 2024-02-29 555 u32 idle_queues; de85488138247d Boris Brezillon 2024-02-29 556 de85488138247d Boris Brezillon 2024-02-29 557 /** @fatal_lock: Lock used to protect access to fatal fields. */ de85488138247d Boris Brezillon 2024-02-29 558 spinlock_t fatal_lock; de85488138247d Boris Brezillon 2024-02-29 559 de85488138247d Boris Brezillon 2024-02-29 560 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */ de85488138247d Boris Brezillon 2024-02-29 561 u32 fatal_queues; de85488138247d Boris Brezillon 2024-02-29 562 de85488138247d Boris Brezillon 2024-02-29 563 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */ de85488138247d Boris Brezillon 2024-02-29 564 atomic_t tiler_oom; de85488138247d Boris Brezillon 2024-02-29 565 de85488138247d Boris Brezillon 2024-02-29 566 /** @queue_count: Number of queues in this group. */ de85488138247d Boris Brezillon 2024-02-29 567 u32 queue_count; de85488138247d Boris Brezillon 2024-02-29 568 de85488138247d Boris Brezillon 2024-02-29 569 /** @queues: Queues owned by this group. */ de85488138247d Boris Brezillon 2024-02-29 570 struct panthor_queue *queues[MAX_CS_PER_CSG]; de85488138247d Boris Brezillon 2024-02-29 571 de85488138247d Boris Brezillon 2024-02-29 572 /** de85488138247d Boris Brezillon 2024-02-29 573 * @csg_id: ID of the FW group slot. de85488138247d Boris Brezillon 2024-02-29 574 * de85488138247d Boris Brezillon 2024-02-29 575 * -1 when the group is not scheduled/active. de85488138247d Boris Brezillon 2024-02-29 576 */ de85488138247d Boris Brezillon 2024-02-29 577 int csg_id; de85488138247d Boris Brezillon 2024-02-29 578 474e5b301372a6 Mary Guillemard 2024-09-18 579 /** 474e5b301372a6 Mary Guillemard 2024-09-18 580 * @csg_id: Priority of the FW group slot. 474e5b301372a6 Mary Guillemard 2024-09-18 581 * 474e5b301372a6 Mary Guillemard 2024-09-18 582 * -1 when the group is not scheduled/active. 474e5b301372a6 Mary Guillemard 2024-09-18 583 */ 474e5b301372a6 Mary Guillemard 2024-09-18 584 int csg_priority; 474e5b301372a6 Mary Guillemard 2024-09-18 585 de85488138247d Boris Brezillon 2024-02-29 586 /** de85488138247d Boris Brezillon 2024-02-29 587 * @destroyed: True when the group has been destroyed. de85488138247d Boris Brezillon 2024-02-29 588 * de85488138247d Boris Brezillon 2024-02-29 589 * If a group is destroyed it becomes useless: no further jobs can be submitted de85488138247d Boris Brezillon 2024-02-29 590 * to its queues. We simply wait for all references to be dropped so we can de85488138247d Boris Brezillon 2024-02-29 591 * release the group object. de85488138247d Boris Brezillon 2024-02-29 592 */ de85488138247d Boris Brezillon 2024-02-29 593 bool destroyed; de85488138247d Boris Brezillon 2024-02-29 594 de85488138247d Boris Brezillon 2024-02-29 595 /** de85488138247d Boris Brezillon 2024-02-29 596 * @timedout: True when a timeout occurred on any of the queues owned by de85488138247d Boris Brezillon 2024-02-29 597 * this group. de85488138247d Boris Brezillon 2024-02-29 598 * de85488138247d Boris Brezillon 2024-02-29 599 * Timeouts can be reported by drm_sched or by the FW. In any case, any de85488138247d Boris Brezillon 2024-02-29 600 * timeout situation is unrecoverable, and the group becomes useless. de85488138247d Boris Brezillon 2024-02-29 601 * We simply wait for all references to be dropped so we can release the de85488138247d Boris Brezillon 2024-02-29 602 * group object. de85488138247d Boris Brezillon 2024-02-29 603 */ de85488138247d Boris Brezillon 2024-02-29 604 bool timedout; de85488138247d Boris Brezillon 2024-02-29 605 de85488138247d Boris Brezillon 2024-02-29 606 /** de85488138247d Boris Brezillon 2024-02-29 607 * @syncobjs: Pool of per-queue synchronization objects. de85488138247d Boris Brezillon 2024-02-29 608 * de85488138247d Boris Brezillon 2024-02-29 609 * One sync object per queue. The position of the sync object is de85488138247d Boris Brezillon 2024-02-29 610 * determined by the queue index. de85488138247d Boris Brezillon 2024-02-29 611 */ de85488138247d Boris Brezillon 2024-02-29 612 struct panthor_kernel_bo *syncobjs; de85488138247d Boris Brezillon 2024-02-29 613 de85488138247d Boris Brezillon 2024-02-29 614 /** @state: Group state. */ de85488138247d Boris Brezillon 2024-02-29 615 enum panthor_group_state state; de85488138247d Boris Brezillon 2024-02-29 616 de85488138247d Boris Brezillon 2024-02-29 617 /** de85488138247d Boris Brezillon 2024-02-29 618 * @suspend_buf: Suspend buffer. de85488138247d Boris Brezillon 2024-02-29 619 * de85488138247d Boris Brezillon 2024-02-29 620 * Stores the state of the group and its queues when a group is suspended. de85488138247d Boris Brezillon 2024-02-29 621 * Used at resume time to restore the group in its previous state. de85488138247d Boris Brezillon 2024-02-29 622 * de85488138247d Boris Brezillon 2024-02-29 623 * The size of the suspend buffer is exposed through the FW interface. de85488138247d Boris Brezillon 2024-02-29 624 */ de85488138247d Boris Brezillon 2024-02-29 625 struct panthor_kernel_bo *suspend_buf; de85488138247d Boris Brezillon 2024-02-29 626 de85488138247d Boris Brezillon 2024-02-29 627 /** de85488138247d Boris Brezillon 2024-02-29 628 * @protm_suspend_buf: Protection mode suspend buffer. de85488138247d Boris Brezillon 2024-02-29 629 * de85488138247d Boris Brezillon 2024-02-29 630 * Stores the state of the group and its queues when a group that's in de85488138247d Boris Brezillon 2024-02-29 631 * protection mode is suspended. de85488138247d Boris Brezillon 2024-02-29 632 * de85488138247d Boris Brezillon 2024-02-29 633 * Used at resume time to restore the group in its previous state. de85488138247d Boris Brezillon 2024-02-29 634 * de85488138247d Boris Brezillon 2024-02-29 635 * The size of the protection mode suspend buffer is exposed through the de85488138247d Boris Brezillon 2024-02-29 636 * FW interface. de85488138247d Boris Brezillon 2024-02-29 637 */ de85488138247d Boris Brezillon 2024-02-29 638 struct panthor_kernel_bo *protm_suspend_buf; de85488138247d Boris Brezillon 2024-02-29 639 de85488138247d Boris Brezillon 2024-02-29 640 /** @sync_upd_work: Work used to check/signal job fences. */ de85488138247d Boris Brezillon 2024-02-29 641 struct work_struct sync_upd_work; de85488138247d Boris Brezillon 2024-02-29 642 de85488138247d Boris Brezillon 2024-02-29 643 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */ de85488138247d Boris Brezillon 2024-02-29 644 struct work_struct tiler_oom_work; de85488138247d Boris Brezillon 2024-02-29 645 de85488138247d Boris Brezillon 2024-02-29 646 /** @term_work: Work used to finish the group termination procedure. */ de85488138247d Boris Brezillon 2024-02-29 647 struct work_struct term_work; de85488138247d Boris Brezillon 2024-02-29 648 de85488138247d Boris Brezillon 2024-02-29 649 /** de85488138247d Boris Brezillon 2024-02-29 650 * @release_work: Work used to release group resources. de85488138247d Boris Brezillon 2024-02-29 651 * de85488138247d Boris Brezillon 2024-02-29 652 * We need to postpone the group release to avoid a deadlock when de85488138247d Boris Brezillon 2024-02-29 653 * the last ref is released in the tick work. de85488138247d Boris Brezillon 2024-02-29 654 */ de85488138247d Boris Brezillon 2024-02-29 655 struct work_struct release_work; de85488138247d Boris Brezillon 2024-02-29 656 de85488138247d Boris Brezillon 2024-02-29 657 /** de85488138247d Boris Brezillon 2024-02-29 658 * @run_node: Node used to insert the group in the de85488138247d Boris Brezillon 2024-02-29 659 * panthor_group::groups::{runnable,idle} and de85488138247d Boris Brezillon 2024-02-29 660 * panthor_group::reset.stopped_groups lists. de85488138247d Boris Brezillon 2024-02-29 661 */ de85488138247d Boris Brezillon 2024-02-29 662 struct list_head run_node; de85488138247d Boris Brezillon 2024-02-29 663 de85488138247d Boris Brezillon 2024-02-29 664 /** de85488138247d Boris Brezillon 2024-02-29 665 * @wait_node: Node used to insert the group in the de85488138247d Boris Brezillon 2024-02-29 666 * panthor_group::groups::waiting list. de85488138247d Boris Brezillon 2024-02-29 667 */ de85488138247d Boris Brezillon 2024-02-29 668 struct list_head wait_node; de85488138247d Boris Brezillon 2024-02-29 @669 }; de85488138247d Boris Brezillon 2024-02-29 670 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2024 Red Hat, Inc.