From: luca abeni <luca.abeni@santannapisa.it>
Completely remove the old RT_GROUP_SCHED's functions and data structures:
- Remove the fields back and my_q from sched_rt_entity.
- Remove the rt_bandwidth data structure.
- Remove the field rt_bandwidth from task_group.
- Remove the rt_bandwidth_enabled function.
- Remove the fields rt_queued, rt_throttled, rt_time, rt_runtime,
rt_runtime_lock and rt_nr_boosted from rt_rq.
All of the removed fields and data are similarly represented in previously
added fields in rq, rt_rq, dl_bandwidth and in the dl server themselves.
Co-developed-by: Yuri Andriaccio <yurand2000@gmail.com>
Signed-off-by: Yuri Andriaccio <yurand2000@gmail.com>
Signed-off-by: luca abeni <luca.abeni@santannapisa.it>
---
include/linux/sched.h | 3 ---
kernel/sched/sched.h | 26 --------------------------
2 files changed, 29 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3f1f15b6d2..9ef7797983 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -627,12 +627,9 @@ struct sched_rt_entity {
unsigned short on_rq;
unsigned short on_list;
- struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
/* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq;
- /* rq "owned" by this entity/group: */
- struct rt_rq *my_q;
#endif
} __randomize_layout;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 334ab6d597..4b65775ada 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -310,15 +310,6 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};
-struct rt_bandwidth {
- /* nests inside the rq lock: */
- raw_spinlock_t rt_runtime_lock;
- ktime_t rt_period;
- u64 rt_runtime;
- struct hrtimer rt_period_timer;
- unsigned int rt_period_active;
-};
-
struct dl_bandwidth {
raw_spinlock_t dl_runtime_lock;
u64 dl_runtime;
@@ -510,7 +501,6 @@ struct task_group {
struct sched_dl_entity **dl_se;
struct rt_rq **rt_rq;
- struct rt_bandwidth rt_bandwidth;
struct dl_bandwidth dl_bandwidth;
#endif
@@ -826,11 +816,6 @@ struct scx_rq {
};
#endif /* CONFIG_SCHED_CLASS_EXT */
-static inline int rt_bandwidth_enabled(void)
-{
- return 0;
-}
-
/* RT IPI pull logic requires IRQ_WORK */
#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
# define HAVE_RT_PUSH_IPI
@@ -848,17 +833,6 @@ struct rt_rq {
bool overloaded;
struct plist_head pushable_tasks;
- int rt_queued;
-
-#ifdef CONFIG_RT_GROUP_SCHED
- int rt_throttled;
- u64 rt_time; /* consumed RT time, goes up in update_curr_rt */
- u64 rt_runtime; /* allotted RT time, "slice" from rt_bandwidth, RT sharing/balancing */
- /* Nests inside the rq lock: */
- raw_spinlock_t rt_runtime_lock;
-
- unsigned int rt_nr_boosted;
-#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg; /* this tg has "this" rt_rq on given CPU for runnable entities */
#endif
--
2.51.0