1 // SPDX-License-Identifier: GPL-2.0
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13 struct rt_bandwidth def_rt_bandwidth;
16 * period over which we measure -rt task CPU usage in us.
19 int sysctl_sched_rt_period = 1000000;
22 * part of the period that we allow rt tasks to run in us.
25 int sysctl_sched_rt_runtime = 950000;
28 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
29 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
30 size_t *lenp, loff_t *ppos);
31 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
32 size_t *lenp, loff_t *ppos);
33 static struct ctl_table sched_rt_sysctls[] = {
35 .procname = "sched_rt_period_us",
36 .data = &sysctl_sched_rt_period,
37 .maxlen = sizeof(int),
39 .proc_handler = sched_rt_handler,
41 .extra2 = SYSCTL_INT_MAX,
44 .procname = "sched_rt_runtime_us",
45 .data = &sysctl_sched_rt_runtime,
46 .maxlen = sizeof(int),
48 .proc_handler = sched_rt_handler,
49 .extra1 = SYSCTL_NEG_ONE,
50 .extra2 = (void *)&sysctl_sched_rt_period,
53 .procname = "sched_rr_timeslice_ms",
54 .data = &sysctl_sched_rr_timeslice,
55 .maxlen = sizeof(int),
57 .proc_handler = sched_rr_handler,
62 static int __init sched_rt_sysctl_init(void)
64 register_sysctl_init("kernel", sched_rt_sysctls);
67 late_initcall(sched_rt_sysctl_init);
70 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
72 struct rt_bandwidth *rt_b =
73 container_of(timer, struct rt_bandwidth, rt_period_timer);
77 raw_spin_lock(&rt_b->rt_runtime_lock);
79 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
83 raw_spin_unlock(&rt_b->rt_runtime_lock);
84 idle = do_sched_rt_period_timer(rt_b, overrun);
85 raw_spin_lock(&rt_b->rt_runtime_lock);
88 rt_b->rt_period_active = 0;
89 raw_spin_unlock(&rt_b->rt_runtime_lock);
91 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
94 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
96 rt_b->rt_period = ns_to_ktime(period);
97 rt_b->rt_runtime = runtime;
99 raw_spin_lock_init(&rt_b->rt_runtime_lock);
101 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
102 HRTIMER_MODE_REL_HARD);
103 rt_b->rt_period_timer.function = sched_rt_period_timer;
106 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
108 raw_spin_lock(&rt_b->rt_runtime_lock);
109 if (!rt_b->rt_period_active) {
110 rt_b->rt_period_active = 1;
112 * SCHED_DEADLINE updates the bandwidth, as a run away
113 * RT task with a DL task could hog a CPU. But DL does
114 * not reset the period. If a deadline task was running
115 * without an RT task running, it can cause RT tasks to
116 * throttle when they start up. Kick the timer right away
117 * to update the period.
119 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
120 hrtimer_start_expires(&rt_b->rt_period_timer,
121 HRTIMER_MODE_ABS_PINNED_HARD);
123 raw_spin_unlock(&rt_b->rt_runtime_lock);
126 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
128 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
131 do_start_rt_bandwidth(rt_b);
134 void init_rt_rq(struct rt_rq *rt_rq)
136 struct rt_prio_array *array;
139 array = &rt_rq->active;
140 for (i = 0; i < MAX_RT_PRIO; i++) {
141 INIT_LIST_HEAD(array->queue + i);
142 __clear_bit(i, array->bitmap);
144 /* delimiter for bitsearch: */
145 __set_bit(MAX_RT_PRIO, array->bitmap);
147 #if defined CONFIG_SMP
148 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
149 rt_rq->highest_prio.next = MAX_RT_PRIO-1;
150 rt_rq->overloaded = 0;
151 plist_head_init(&rt_rq->pushable_tasks);
152 #endif /* CONFIG_SMP */
153 /* We start is dequeued state, because no RT tasks are queued */
154 rt_rq->rt_queued = 0;
157 rt_rq->rt_throttled = 0;
158 rt_rq->rt_runtime = 0;
159 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
162 #ifdef CONFIG_RT_GROUP_SCHED
163 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
165 hrtimer_cancel(&rt_b->rt_period_timer);
168 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
170 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
172 #ifdef CONFIG_SCHED_DEBUG
173 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
175 return container_of(rt_se, struct task_struct, rt);
178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
183 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
188 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
190 struct rt_rq *rt_rq = rt_se->rt_rq;
195 void unregister_rt_sched_group(struct task_group *tg)
198 destroy_rt_bandwidth(&tg->rt_bandwidth);
202 void free_rt_sched_group(struct task_group *tg)
206 for_each_possible_cpu(i) {
217 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
218 struct sched_rt_entity *rt_se, int cpu,
219 struct sched_rt_entity *parent)
221 struct rq *rq = cpu_rq(cpu);
223 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
224 rt_rq->rt_nr_boosted = 0;
228 tg->rt_rq[cpu] = rt_rq;
229 tg->rt_se[cpu] = rt_se;
235 rt_se->rt_rq = &rq->rt;
237 rt_se->rt_rq = parent->my_q;
240 rt_se->parent = parent;
241 INIT_LIST_HEAD(&rt_se->run_list);
244 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
247 struct sched_rt_entity *rt_se;
250 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
253 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
257 init_rt_bandwidth(&tg->rt_bandwidth,
258 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
260 for_each_possible_cpu(i) {
261 rt_rq = kzalloc_node(sizeof(struct rt_rq),
262 GFP_KERNEL, cpu_to_node(i));
266 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
267 GFP_KERNEL, cpu_to_node(i));
272 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
273 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
284 #else /* CONFIG_RT_GROUP_SCHED */
286 #define rt_entity_is_task(rt_se) (1)
288 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
290 return container_of(rt_se, struct task_struct, rt);
293 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
295 return container_of(rt_rq, struct rq, rt);
298 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
300 struct task_struct *p = rt_task_of(rt_se);
305 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
307 struct rq *rq = rq_of_rt_se(rt_se);
312 void unregister_rt_sched_group(struct task_group *tg) { }
314 void free_rt_sched_group(struct task_group *tg) { }
316 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
320 #endif /* CONFIG_RT_GROUP_SCHED */
324 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
326 /* Try to pull RT tasks here if we lower this rq's prio */
327 return rq->online && rq->rt.highest_prio.curr > prev->prio;
330 static inline int rt_overloaded(struct rq *rq)
332 return atomic_read(&rq->rd->rto_count);
335 static inline void rt_set_overload(struct rq *rq)
340 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
342 * Make sure the mask is visible before we set
343 * the overload count. That is checked to determine
344 * if we should look at the mask. It would be a shame
345 * if we looked at the mask, but the mask was not
348 * Matched by the barrier in pull_rt_task().
351 atomic_inc(&rq->rd->rto_count);
354 static inline void rt_clear_overload(struct rq *rq)
359 /* the order here really doesn't matter */
360 atomic_dec(&rq->rd->rto_count);
361 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
364 static inline int has_pushable_tasks(struct rq *rq)
366 return !plist_head_empty(&rq->rt.pushable_tasks);
369 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
370 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
372 static void push_rt_tasks(struct rq *);
373 static void pull_rt_task(struct rq *);
375 static inline void rt_queue_push_tasks(struct rq *rq)
377 if (!has_pushable_tasks(rq))
380 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
383 static inline void rt_queue_pull_task(struct rq *rq)
385 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
388 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
390 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
391 plist_node_init(&p->pushable_tasks, p->prio);
392 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394 /* Update the highest prio pushable task */
395 if (p->prio < rq->rt.highest_prio.next)
396 rq->rt.highest_prio.next = p->prio;
398 if (!rq->rt.overloaded) {
400 rq->rt.overloaded = 1;
404 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
406 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
408 /* Update the new highest prio pushable task */
409 if (has_pushable_tasks(rq)) {
410 p = plist_first_entry(&rq->rt.pushable_tasks,
411 struct task_struct, pushable_tasks);
412 rq->rt.highest_prio.next = p->prio;
414 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
416 if (rq->rt.overloaded) {
417 rt_clear_overload(rq);
418 rq->rt.overloaded = 0;
425 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
429 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
433 static inline void rt_queue_push_tasks(struct rq *rq)
436 #endif /* CONFIG_SMP */
438 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
439 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
441 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
446 #ifdef CONFIG_UCLAMP_TASK
448 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
451 * This check is only important for heterogeneous systems where uclamp_min value
452 * is higher than the capacity of a @cpu. For non-heterogeneous system this
453 * function will always return true.
455 * The function will return true if the capacity of the @cpu is >= the
456 * uclamp_min and false otherwise.
458 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
461 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
463 unsigned int min_cap;
464 unsigned int max_cap;
465 unsigned int cpu_cap;
467 /* Only heterogeneous systems can benefit from this check */
468 if (!sched_asym_cpucap_active())
471 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
472 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
474 cpu_cap = arch_scale_cpu_capacity(cpu);
476 return cpu_cap >= min(min_cap, max_cap);
479 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
485 #ifdef CONFIG_RT_GROUP_SCHED
487 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
492 return rt_rq->rt_runtime;
495 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
497 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
500 typedef struct task_group *rt_rq_iter_t;
502 static inline struct task_group *next_task_group(struct task_group *tg)
505 tg = list_entry_rcu(tg->list.next,
506 typeof(struct task_group), list);
507 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
509 if (&tg->list == &task_groups)
515 #define for_each_rt_rq(rt_rq, iter, rq) \
516 for (iter = container_of(&task_groups, typeof(*iter), list); \
517 (iter = next_task_group(iter)) && \
518 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
520 #define for_each_sched_rt_entity(rt_se) \
521 for (; rt_se; rt_se = rt_se->parent)
523 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
528 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
529 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
531 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
533 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
534 struct rq *rq = rq_of_rt_rq(rt_rq);
535 struct sched_rt_entity *rt_se;
537 int cpu = cpu_of(rq);
539 rt_se = rt_rq->tg->rt_se[cpu];
541 if (rt_rq->rt_nr_running) {
543 enqueue_top_rt_rq(rt_rq);
544 else if (!on_rt_rq(rt_se))
545 enqueue_rt_entity(rt_se, 0);
547 if (rt_rq->highest_prio.curr < curr->prio)
552 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
554 struct sched_rt_entity *rt_se;
555 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
557 rt_se = rt_rq->tg->rt_se[cpu];
560 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
561 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
562 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
564 else if (on_rt_rq(rt_se))
565 dequeue_rt_entity(rt_se, 0);
568 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
570 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
573 static int rt_se_boosted(struct sched_rt_entity *rt_se)
575 struct rt_rq *rt_rq = group_rt_rq(rt_se);
576 struct task_struct *p;
579 return !!rt_rq->rt_nr_boosted;
581 p = rt_task_of(rt_se);
582 return p->prio != p->normal_prio;
586 static inline const struct cpumask *sched_rt_period_mask(void)
588 return this_rq()->rd->span;
591 static inline const struct cpumask *sched_rt_period_mask(void)
593 return cpu_online_mask;
598 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
600 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
603 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
605 return &rt_rq->tg->rt_bandwidth;
608 #else /* !CONFIG_RT_GROUP_SCHED */
610 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
612 return rt_rq->rt_runtime;
615 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
617 return ktime_to_ns(def_rt_bandwidth.rt_period);
620 typedef struct rt_rq *rt_rq_iter_t;
622 #define for_each_rt_rq(rt_rq, iter, rq) \
623 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
625 #define for_each_sched_rt_entity(rt_se) \
626 for (; rt_se; rt_se = NULL)
628 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
633 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
635 struct rq *rq = rq_of_rt_rq(rt_rq);
637 if (!rt_rq->rt_nr_running)
640 enqueue_top_rt_rq(rt_rq);
644 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
646 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
649 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
651 return rt_rq->rt_throttled;
654 static inline const struct cpumask *sched_rt_period_mask(void)
656 return cpu_online_mask;
660 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
662 return &cpu_rq(cpu)->rt;
665 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
667 return &def_rt_bandwidth;
670 #endif /* CONFIG_RT_GROUP_SCHED */
672 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
674 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
676 return (hrtimer_active(&rt_b->rt_period_timer) ||
677 rt_rq->rt_time < rt_b->rt_runtime);
682 * We ran out of runtime, see if we can borrow some from our neighbours.
684 static void do_balance_runtime(struct rt_rq *rt_rq)
686 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
687 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
691 weight = cpumask_weight(rd->span);
693 raw_spin_lock(&rt_b->rt_runtime_lock);
694 rt_period = ktime_to_ns(rt_b->rt_period);
695 for_each_cpu(i, rd->span) {
696 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
702 raw_spin_lock(&iter->rt_runtime_lock);
704 * Either all rqs have inf runtime and there's nothing to steal
705 * or __disable_runtime() below sets a specific rq to inf to
706 * indicate its been disabled and disallow stealing.
708 if (iter->rt_runtime == RUNTIME_INF)
712 * From runqueues with spare time, take 1/n part of their
713 * spare time, but no more than our period.
715 diff = iter->rt_runtime - iter->rt_time;
717 diff = div_u64((u64)diff, weight);
718 if (rt_rq->rt_runtime + diff > rt_period)
719 diff = rt_period - rt_rq->rt_runtime;
720 iter->rt_runtime -= diff;
721 rt_rq->rt_runtime += diff;
722 if (rt_rq->rt_runtime == rt_period) {
723 raw_spin_unlock(&iter->rt_runtime_lock);
728 raw_spin_unlock(&iter->rt_runtime_lock);
730 raw_spin_unlock(&rt_b->rt_runtime_lock);
734 * Ensure this RQ takes back all the runtime it lend to its neighbours.
736 static void __disable_runtime(struct rq *rq)
738 struct root_domain *rd = rq->rd;
742 if (unlikely(!scheduler_running))
745 for_each_rt_rq(rt_rq, iter, rq) {
746 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
750 raw_spin_lock(&rt_b->rt_runtime_lock);
751 raw_spin_lock(&rt_rq->rt_runtime_lock);
753 * Either we're all inf and nobody needs to borrow, or we're
754 * already disabled and thus have nothing to do, or we have
755 * exactly the right amount of runtime to take out.
757 if (rt_rq->rt_runtime == RUNTIME_INF ||
758 rt_rq->rt_runtime == rt_b->rt_runtime)
760 raw_spin_unlock(&rt_rq->rt_runtime_lock);
763 * Calculate the difference between what we started out with
764 * and what we current have, that's the amount of runtime
765 * we lend and now have to reclaim.
767 want = rt_b->rt_runtime - rt_rq->rt_runtime;
770 * Greedy reclaim, take back as much as we can.
772 for_each_cpu(i, rd->span) {
773 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
777 * Can't reclaim from ourselves or disabled runqueues.
779 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
782 raw_spin_lock(&iter->rt_runtime_lock);
784 diff = min_t(s64, iter->rt_runtime, want);
785 iter->rt_runtime -= diff;
788 iter->rt_runtime -= want;
791 raw_spin_unlock(&iter->rt_runtime_lock);
797 raw_spin_lock(&rt_rq->rt_runtime_lock);
799 * We cannot be left wanting - that would mean some runtime
800 * leaked out of the system.
805 * Disable all the borrow logic by pretending we have inf
806 * runtime - in which case borrowing doesn't make sense.
808 rt_rq->rt_runtime = RUNTIME_INF;
809 rt_rq->rt_throttled = 0;
810 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 raw_spin_unlock(&rt_b->rt_runtime_lock);
813 /* Make rt_rq available for pick_next_task() */
814 sched_rt_rq_enqueue(rt_rq);
818 static void __enable_runtime(struct rq *rq)
823 if (unlikely(!scheduler_running))
827 * Reset each runqueue's bandwidth settings
829 for_each_rt_rq(rt_rq, iter, rq) {
830 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
832 raw_spin_lock(&rt_b->rt_runtime_lock);
833 raw_spin_lock(&rt_rq->rt_runtime_lock);
834 rt_rq->rt_runtime = rt_b->rt_runtime;
836 rt_rq->rt_throttled = 0;
837 raw_spin_unlock(&rt_rq->rt_runtime_lock);
838 raw_spin_unlock(&rt_b->rt_runtime_lock);
842 static void balance_runtime(struct rt_rq *rt_rq)
844 if (!sched_feat(RT_RUNTIME_SHARE))
847 if (rt_rq->rt_time > rt_rq->rt_runtime) {
848 raw_spin_unlock(&rt_rq->rt_runtime_lock);
849 do_balance_runtime(rt_rq);
850 raw_spin_lock(&rt_rq->rt_runtime_lock);
853 #else /* !CONFIG_SMP */
854 static inline void balance_runtime(struct rt_rq *rt_rq) {}
855 #endif /* CONFIG_SMP */
857 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
859 int i, idle = 1, throttled = 0;
860 const struct cpumask *span;
862 span = sched_rt_period_mask();
863 #ifdef CONFIG_RT_GROUP_SCHED
865 * FIXME: isolated CPUs should really leave the root task group,
866 * whether they are isolcpus or were isolated via cpusets, lest
867 * the timer run on a CPU which does not service all runqueues,
868 * potentially leaving other CPUs indefinitely throttled. If
869 * isolation is really required, the user will turn the throttle
870 * off to kill the perturbations it causes anyway. Meanwhile,
871 * this maintains functionality for boot and/or troubleshooting.
873 if (rt_b == &root_task_group.rt_bandwidth)
874 span = cpu_online_mask;
876 for_each_cpu(i, span) {
878 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
879 struct rq *rq = rq_of_rt_rq(rt_rq);
884 * When span == cpu_online_mask, taking each rq->lock
885 * can be time-consuming. Try to avoid it when possible.
887 raw_spin_lock(&rt_rq->rt_runtime_lock);
888 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
889 rt_rq->rt_runtime = rt_b->rt_runtime;
890 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
891 raw_spin_unlock(&rt_rq->rt_runtime_lock);
898 if (rt_rq->rt_time) {
901 raw_spin_lock(&rt_rq->rt_runtime_lock);
902 if (rt_rq->rt_throttled)
903 balance_runtime(rt_rq);
904 runtime = rt_rq->rt_runtime;
905 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
906 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
907 rt_rq->rt_throttled = 0;
911 * When we're idle and a woken (rt) task is
912 * throttled wakeup_preempt() will set
913 * skip_update and the time between the wakeup
914 * and this unthrottle will get accounted as
917 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
918 rq_clock_cancel_skipupdate(rq);
920 if (rt_rq->rt_time || rt_rq->rt_nr_running)
922 raw_spin_unlock(&rt_rq->rt_runtime_lock);
923 } else if (rt_rq->rt_nr_running) {
925 if (!rt_rq_throttled(rt_rq))
928 if (rt_rq->rt_throttled)
932 sched_rt_rq_enqueue(rt_rq);
936 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
942 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
944 #ifdef CONFIG_RT_GROUP_SCHED
945 struct rt_rq *rt_rq = group_rt_rq(rt_se);
948 return rt_rq->highest_prio.curr;
951 return rt_task_of(rt_se)->prio;
954 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
956 u64 runtime = sched_rt_runtime(rt_rq);
958 if (rt_rq->rt_throttled)
959 return rt_rq_throttled(rt_rq);
961 if (runtime >= sched_rt_period(rt_rq))
964 balance_runtime(rt_rq);
965 runtime = sched_rt_runtime(rt_rq);
966 if (runtime == RUNTIME_INF)
969 if (rt_rq->rt_time > runtime) {
970 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
973 * Don't actually throttle groups that have no runtime assigned
974 * but accrue some time due to boosting.
976 if (likely(rt_b->rt_runtime)) {
977 rt_rq->rt_throttled = 1;
978 printk_deferred_once("sched: RT throttling activated\n");
981 * In case we did anyway, make it go away,
982 * replenishment is a joke, since it will replenish us
988 if (rt_rq_throttled(rt_rq)) {
989 sched_rt_rq_dequeue(rt_rq);
998 * Update the current task's runtime statistics. Skip current tasks that
999 * are not in our scheduling class.
1001 static void update_curr_rt(struct rq *rq)
1003 struct task_struct *curr = rq->curr;
1004 struct sched_rt_entity *rt_se = &curr->rt;
1007 if (curr->sched_class != &rt_sched_class)
1010 delta_exec = update_curr_common(rq);
1011 if (unlikely(delta_exec <= 0))
1014 if (!rt_bandwidth_enabled())
1017 for_each_sched_rt_entity(rt_se) {
1018 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1021 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1022 raw_spin_lock(&rt_rq->rt_runtime_lock);
1023 rt_rq->rt_time += delta_exec;
1024 exceeded = sched_rt_runtime_exceeded(rt_rq);
1027 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1029 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1035 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1037 struct rq *rq = rq_of_rt_rq(rt_rq);
1039 BUG_ON(&rq->rt != rt_rq);
1041 if (!rt_rq->rt_queued)
1044 BUG_ON(!rq->nr_running);
1046 sub_nr_running(rq, count);
1047 rt_rq->rt_queued = 0;
1052 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1054 struct rq *rq = rq_of_rt_rq(rt_rq);
1056 BUG_ON(&rq->rt != rt_rq);
1058 if (rt_rq->rt_queued)
1061 if (rt_rq_throttled(rt_rq))
1064 if (rt_rq->rt_nr_running) {
1065 add_nr_running(rq, rt_rq->rt_nr_running);
1066 rt_rq->rt_queued = 1;
1069 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1070 cpufreq_update_util(rq, 0);
1073 #if defined CONFIG_SMP
1076 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1078 struct rq *rq = rq_of_rt_rq(rt_rq);
1080 #ifdef CONFIG_RT_GROUP_SCHED
1082 * Change rq's cpupri only if rt_rq is the top queue.
1084 if (&rq->rt != rt_rq)
1087 if (rq->online && prio < prev_prio)
1088 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1092 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1094 struct rq *rq = rq_of_rt_rq(rt_rq);
1096 #ifdef CONFIG_RT_GROUP_SCHED
1098 * Change rq's cpupri only if rt_rq is the top queue.
1100 if (&rq->rt != rt_rq)
1103 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1104 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1107 #else /* CONFIG_SMP */
1110 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1112 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1114 #endif /* CONFIG_SMP */
1116 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1118 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1120 int prev_prio = rt_rq->highest_prio.curr;
1122 if (prio < prev_prio)
1123 rt_rq->highest_prio.curr = prio;
1125 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1129 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1131 int prev_prio = rt_rq->highest_prio.curr;
1133 if (rt_rq->rt_nr_running) {
1135 WARN_ON(prio < prev_prio);
1138 * This may have been our highest task, and therefore
1139 * we may have some recomputation to do
1141 if (prio == prev_prio) {
1142 struct rt_prio_array *array = &rt_rq->active;
1144 rt_rq->highest_prio.curr =
1145 sched_find_first_bit(array->bitmap);
1149 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1152 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1157 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1158 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1160 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1162 #ifdef CONFIG_RT_GROUP_SCHED
1165 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1167 if (rt_se_boosted(rt_se))
1168 rt_rq->rt_nr_boosted++;
1171 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1175 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1177 if (rt_se_boosted(rt_se))
1178 rt_rq->rt_nr_boosted--;
1180 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1183 #else /* CONFIG_RT_GROUP_SCHED */
1186 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1188 start_rt_bandwidth(&def_rt_bandwidth);
1192 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1194 #endif /* CONFIG_RT_GROUP_SCHED */
1197 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1199 struct rt_rq *group_rq = group_rt_rq(rt_se);
1202 return group_rq->rt_nr_running;
1208 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1210 struct rt_rq *group_rq = group_rt_rq(rt_se);
1211 struct task_struct *tsk;
1214 return group_rq->rr_nr_running;
1216 tsk = rt_task_of(rt_se);
1218 return (tsk->policy == SCHED_RR) ? 1 : 0;
1222 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224 int prio = rt_se_prio(rt_se);
1226 WARN_ON(!rt_prio(prio));
1227 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1228 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1230 inc_rt_prio(rt_rq, prio);
1231 inc_rt_group(rt_se, rt_rq);
1235 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1237 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1238 WARN_ON(!rt_rq->rt_nr_running);
1239 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1240 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1242 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1243 dec_rt_group(rt_se, rt_rq);
1247 * Change rt_se->run_list location unless SAVE && !MOVE
1249 * assumes ENQUEUE/DEQUEUE flags match
1251 static inline bool move_entity(unsigned int flags)
1253 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1259 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1261 list_del_init(&rt_se->run_list);
1263 if (list_empty(array->queue + rt_se_prio(rt_se)))
1264 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1269 static inline struct sched_statistics *
1270 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1272 #ifdef CONFIG_RT_GROUP_SCHED
1273 /* schedstats is not supported for rt group. */
1274 if (!rt_entity_is_task(rt_se))
1278 return &rt_task_of(rt_se)->stats;
1282 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1284 struct sched_statistics *stats;
1285 struct task_struct *p = NULL;
1287 if (!schedstat_enabled())
1290 if (rt_entity_is_task(rt_se))
1291 p = rt_task_of(rt_se);
1293 stats = __schedstats_from_rt_se(rt_se);
1297 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1301 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1303 struct sched_statistics *stats;
1304 struct task_struct *p = NULL;
1306 if (!schedstat_enabled())
1309 if (rt_entity_is_task(rt_se))
1310 p = rt_task_of(rt_se);
1312 stats = __schedstats_from_rt_se(rt_se);
1316 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1320 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1323 if (!schedstat_enabled())
1326 if (flags & ENQUEUE_WAKEUP)
1327 update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1331 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1333 struct sched_statistics *stats;
1334 struct task_struct *p = NULL;
1336 if (!schedstat_enabled())
1339 if (rt_entity_is_task(rt_se))
1340 p = rt_task_of(rt_se);
1342 stats = __schedstats_from_rt_se(rt_se);
1346 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1350 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1353 struct task_struct *p = NULL;
1355 if (!schedstat_enabled())
1358 if (rt_entity_is_task(rt_se))
1359 p = rt_task_of(rt_se);
1361 if ((flags & DEQUEUE_SLEEP) && p) {
1364 state = READ_ONCE(p->__state);
1365 if (state & TASK_INTERRUPTIBLE)
1366 __schedstat_set(p->stats.sleep_start,
1367 rq_clock(rq_of_rt_rq(rt_rq)));
1369 if (state & TASK_UNINTERRUPTIBLE)
1370 __schedstat_set(p->stats.block_start,
1371 rq_clock(rq_of_rt_rq(rt_rq)));
1375 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1377 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1378 struct rt_prio_array *array = &rt_rq->active;
1379 struct rt_rq *group_rq = group_rt_rq(rt_se);
1380 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1383 * Don't enqueue the group if its throttled, or when empty.
1384 * The latter is a consequence of the former when a child group
1385 * get throttled and the current group doesn't have any other
1388 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1390 __delist_rt_entity(rt_se, array);
1394 if (move_entity(flags)) {
1395 WARN_ON_ONCE(rt_se->on_list);
1396 if (flags & ENQUEUE_HEAD)
1397 list_add(&rt_se->run_list, queue);
1399 list_add_tail(&rt_se->run_list, queue);
1401 __set_bit(rt_se_prio(rt_se), array->bitmap);
1406 inc_rt_tasks(rt_se, rt_rq);
1409 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1411 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1412 struct rt_prio_array *array = &rt_rq->active;
1414 if (move_entity(flags)) {
1415 WARN_ON_ONCE(!rt_se->on_list);
1416 __delist_rt_entity(rt_se, array);
1420 dec_rt_tasks(rt_se, rt_rq);
1424 * Because the prio of an upper entry depends on the lower
1425 * entries, we must remove entries top - down.
1427 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1429 struct sched_rt_entity *back = NULL;
1430 unsigned int rt_nr_running;
1432 for_each_sched_rt_entity(rt_se) {
1437 rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1439 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1440 if (on_rt_rq(rt_se))
1441 __dequeue_rt_entity(rt_se, flags);
1444 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1447 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1449 struct rq *rq = rq_of_rt_se(rt_se);
1451 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1453 dequeue_rt_stack(rt_se, flags);
1454 for_each_sched_rt_entity(rt_se)
1455 __enqueue_rt_entity(rt_se, flags);
1456 enqueue_top_rt_rq(&rq->rt);
1459 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1461 struct rq *rq = rq_of_rt_se(rt_se);
1463 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1465 dequeue_rt_stack(rt_se, flags);
1467 for_each_sched_rt_entity(rt_se) {
1468 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1470 if (rt_rq && rt_rq->rt_nr_running)
1471 __enqueue_rt_entity(rt_se, flags);
1473 enqueue_top_rt_rq(&rq->rt);
1477 * Adding/removing a task to/from a priority array:
1480 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1482 struct sched_rt_entity *rt_se = &p->rt;
1484 if (flags & ENQUEUE_WAKEUP)
1487 check_schedstat_required();
1488 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1490 enqueue_rt_entity(rt_se, flags);
1492 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1493 enqueue_pushable_task(rq, p);
1496 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1498 struct sched_rt_entity *rt_se = &p->rt;
1501 dequeue_rt_entity(rt_se, flags);
1503 dequeue_pushable_task(rq, p);
1507 * Put task to the head or the end of the run list without the overhead of
1508 * dequeue followed by enqueue.
1511 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1513 if (on_rt_rq(rt_se)) {
1514 struct rt_prio_array *array = &rt_rq->active;
1515 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1518 list_move(&rt_se->run_list, queue);
1520 list_move_tail(&rt_se->run_list, queue);
1524 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1526 struct sched_rt_entity *rt_se = &p->rt;
1527 struct rt_rq *rt_rq;
1529 for_each_sched_rt_entity(rt_se) {
1530 rt_rq = rt_rq_of_se(rt_se);
1531 requeue_rt_entity(rt_rq, rt_se, head);
1535 static void yield_task_rt(struct rq *rq)
1537 requeue_task_rt(rq, rq->curr, 0);
1541 static int find_lowest_rq(struct task_struct *task);
1544 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1546 struct task_struct *curr;
1550 /* For anything but wake ups, just return the task_cpu */
1551 if (!(flags & (WF_TTWU | WF_FORK)))
1557 curr = READ_ONCE(rq->curr); /* unlocked access */
1560 * If the current task on @p's runqueue is an RT task, then
1561 * try to see if we can wake this RT task up on another
1562 * runqueue. Otherwise simply start this RT task
1563 * on its current runqueue.
1565 * We want to avoid overloading runqueues. If the woken
1566 * task is a higher priority, then it will stay on this CPU
1567 * and the lower prio task should be moved to another CPU.
1568 * Even though this will probably make the lower prio task
1569 * lose its cache, we do not want to bounce a higher task
1570 * around just because it gave up its CPU, perhaps for a
1573 * For equal prio tasks, we just let the scheduler sort it out.
1575 * Otherwise, just let it ride on the affined RQ and the
1576 * post-schedule router will push the preempted task away
1578 * This test is optimistic, if we get it wrong the load-balancer
1579 * will have to sort it out.
1581 * We take into account the capacity of the CPU to ensure it fits the
1582 * requirement of the task - which is only important on heterogeneous
1583 * systems like big.LITTLE.
1586 unlikely(rt_task(curr)) &&
1587 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1589 if (test || !rt_task_fits_capacity(p, cpu)) {
1590 int target = find_lowest_rq(p);
1593 * Bail out if we were forcing a migration to find a better
1594 * fitting CPU but our search failed.
1596 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1600 * Don't bother moving it if the destination CPU is
1601 * not running a lower priority task.
1604 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1615 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1618 * Current can't be migrated, useless to reschedule,
1619 * let's hope p can move out.
1621 if (rq->curr->nr_cpus_allowed == 1 ||
1622 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1626 * p is migratable, so let's not schedule it and
1627 * see if it is pushed or pulled somewhere else.
1629 if (p->nr_cpus_allowed != 1 &&
1630 cpupri_find(&rq->rd->cpupri, p, NULL))
1634 * There appear to be other CPUs that can accept
1635 * the current task but none can run 'p', so lets reschedule
1636 * to try and push the current task away:
1638 requeue_task_rt(rq, p, 1);
1642 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1644 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1646 * This is OK, because current is on_cpu, which avoids it being
1647 * picked for load-balance and preemption/IRQs are still
1648 * disabled avoiding further scheduler activity on it and we've
1649 * not yet started the picking loop.
1651 rq_unpin_lock(rq, rf);
1653 rq_repin_lock(rq, rf);
1656 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1658 #endif /* CONFIG_SMP */
1661 * Preempt the current task with a newly woken task if needed:
1663 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1665 if (p->prio < rq->curr->prio) {
1674 * - the newly woken task is of equal priority to the current task
1675 * - the newly woken task is non-migratable while current is migratable
1676 * - current will be preempted on the next reschedule
1678 * we should check to see if current can readily move to a different
1679 * cpu. If so, we will reschedule to allow the push logic to try
1680 * to move current somewhere else, making room for our non-migratable
1683 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1684 check_preempt_equal_prio(rq, p);
1688 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1690 struct sched_rt_entity *rt_se = &p->rt;
1691 struct rt_rq *rt_rq = &rq->rt;
1693 p->se.exec_start = rq_clock_task(rq);
1694 if (on_rt_rq(&p->rt))
1695 update_stats_wait_end_rt(rt_rq, rt_se);
1697 /* The running task is never eligible for pushing */
1698 dequeue_pushable_task(rq, p);
1704 * If prev task was rt, put_prev_task() has already updated the
1705 * utilization. We only care of the case where we start to schedule a
1708 if (rq->curr->sched_class != &rt_sched_class)
1709 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1711 rt_queue_push_tasks(rq);
1714 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1716 struct rt_prio_array *array = &rt_rq->active;
1717 struct sched_rt_entity *next = NULL;
1718 struct list_head *queue;
1721 idx = sched_find_first_bit(array->bitmap);
1722 BUG_ON(idx >= MAX_RT_PRIO);
1724 queue = array->queue + idx;
1725 if (SCHED_WARN_ON(list_empty(queue)))
1727 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1732 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1734 struct sched_rt_entity *rt_se;
1735 struct rt_rq *rt_rq = &rq->rt;
1738 rt_se = pick_next_rt_entity(rt_rq);
1739 if (unlikely(!rt_se))
1741 rt_rq = group_rt_rq(rt_se);
1744 return rt_task_of(rt_se);
1747 static struct task_struct *pick_task_rt(struct rq *rq)
1749 struct task_struct *p;
1751 if (!sched_rt_runnable(rq))
1754 p = _pick_next_task_rt(rq);
1759 static struct task_struct *pick_next_task_rt(struct rq *rq)
1761 struct task_struct *p = pick_task_rt(rq);
1764 set_next_task_rt(rq, p, true);
1769 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1771 struct sched_rt_entity *rt_se = &p->rt;
1772 struct rt_rq *rt_rq = &rq->rt;
1774 if (on_rt_rq(&p->rt))
1775 update_stats_wait_start_rt(rt_rq, rt_se);
1779 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1782 * The previous task needs to be made eligible for pushing
1783 * if it is still active
1785 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1786 enqueue_pushable_task(rq, p);
1791 /* Only try algorithms three times */
1792 #define RT_MAX_TRIES 3
1794 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1796 if (!task_on_cpu(rq, p) &&
1797 cpumask_test_cpu(cpu, &p->cpus_mask))
1804 * Return the highest pushable rq's task, which is suitable to be executed
1805 * on the CPU, NULL otherwise
1807 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1809 struct plist_head *head = &rq->rt.pushable_tasks;
1810 struct task_struct *p;
1812 if (!has_pushable_tasks(rq))
1815 plist_for_each_entry(p, head, pushable_tasks) {
1816 if (pick_rt_task(rq, p, cpu))
1823 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1825 static int find_lowest_rq(struct task_struct *task)
1827 struct sched_domain *sd;
1828 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1829 int this_cpu = smp_processor_id();
1830 int cpu = task_cpu(task);
1833 /* Make sure the mask is initialized first */
1834 if (unlikely(!lowest_mask))
1837 if (task->nr_cpus_allowed == 1)
1838 return -1; /* No other targets possible */
1841 * If we're on asym system ensure we consider the different capacities
1842 * of the CPUs when searching for the lowest_mask.
1844 if (sched_asym_cpucap_active()) {
1846 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1848 rt_task_fits_capacity);
1851 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1856 return -1; /* No targets found */
1859 * At this point we have built a mask of CPUs representing the
1860 * lowest priority tasks in the system. Now we want to elect
1861 * the best one based on our affinity and topology.
1863 * We prioritize the last CPU that the task executed on since
1864 * it is most likely cache-hot in that location.
1866 if (cpumask_test_cpu(cpu, lowest_mask))
1870 * Otherwise, we consult the sched_domains span maps to figure
1871 * out which CPU is logically closest to our hot cache data.
1873 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1874 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1877 for_each_domain(cpu, sd) {
1878 if (sd->flags & SD_WAKE_AFFINE) {
1882 * "this_cpu" is cheaper to preempt than a
1885 if (this_cpu != -1 &&
1886 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1891 best_cpu = cpumask_any_and_distribute(lowest_mask,
1892 sched_domain_span(sd));
1893 if (best_cpu < nr_cpu_ids) {
1902 * And finally, if there were no matches within the domains
1903 * just give the caller *something* to work with from the compatible
1909 cpu = cpumask_any_distribute(lowest_mask);
1910 if (cpu < nr_cpu_ids)
1916 /* Will lock the rq it finds */
1917 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1919 struct rq *lowest_rq = NULL;
1923 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1924 cpu = find_lowest_rq(task);
1926 if ((cpu == -1) || (cpu == rq->cpu))
1929 lowest_rq = cpu_rq(cpu);
1931 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1933 * Target rq has tasks of equal or higher priority,
1934 * retrying does not release any lock and is unlikely
1935 * to yield a different result.
1941 /* if the prio of this runqueue changed, try again */
1942 if (double_lock_balance(rq, lowest_rq)) {
1944 * We had to unlock the run queue. In
1945 * the mean time, task could have
1946 * migrated already or had its affinity changed.
1947 * Also make sure that it wasn't scheduled on its rq.
1948 * It is possible the task was scheduled, set
1949 * "migrate_disabled" and then got preempted, so we must
1950 * check the task migration disable flag here too.
1952 if (unlikely(task_rq(task) != rq ||
1953 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1954 task_on_cpu(rq, task) ||
1956 is_migration_disabled(task) ||
1957 !task_on_rq_queued(task))) {
1959 double_unlock_balance(rq, lowest_rq);
1965 /* If this rq is still suitable use it. */
1966 if (lowest_rq->rt.highest_prio.curr > task->prio)
1970 double_unlock_balance(rq, lowest_rq);
1977 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1979 struct task_struct *p;
1981 if (!has_pushable_tasks(rq))
1984 p = plist_first_entry(&rq->rt.pushable_tasks,
1985 struct task_struct, pushable_tasks);
1987 BUG_ON(rq->cpu != task_cpu(p));
1988 BUG_ON(task_current(rq, p));
1989 BUG_ON(p->nr_cpus_allowed <= 1);
1991 BUG_ON(!task_on_rq_queued(p));
1992 BUG_ON(!rt_task(p));
1998 * If the current CPU has more than one RT task, see if the non
1999 * running task can migrate over to a CPU that is running a task
2000 * of lesser priority.
2002 static int push_rt_task(struct rq *rq, bool pull)
2004 struct task_struct *next_task;
2005 struct rq *lowest_rq;
2008 if (!rq->rt.overloaded)
2011 next_task = pick_next_pushable_task(rq);
2017 * It's possible that the next_task slipped in of
2018 * higher priority than current. If that's the case
2019 * just reschedule current.
2021 if (unlikely(next_task->prio < rq->curr->prio)) {
2026 if (is_migration_disabled(next_task)) {
2027 struct task_struct *push_task = NULL;
2030 if (!pull || rq->push_busy)
2034 * Invoking find_lowest_rq() on anything but an RT task doesn't
2035 * make sense. Per the above priority check, curr has to
2036 * be of higher priority than next_task, so no need to
2037 * reschedule when bailing out.
2039 * Note that the stoppers are masqueraded as SCHED_FIFO
2040 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2042 if (rq->curr->sched_class != &rt_sched_class)
2045 cpu = find_lowest_rq(rq->curr);
2046 if (cpu == -1 || cpu == rq->cpu)
2050 * Given we found a CPU with lower priority than @next_task,
2051 * therefore it should be running. However we cannot migrate it
2052 * to this other CPU, instead attempt to push the current
2053 * running task on this CPU away.
2055 push_task = get_push_task(rq);
2058 raw_spin_rq_unlock(rq);
2059 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2060 push_task, &rq->push_work);
2062 raw_spin_rq_lock(rq);
2068 if (WARN_ON(next_task == rq->curr))
2071 /* We might release rq lock */
2072 get_task_struct(next_task);
2074 /* find_lock_lowest_rq locks the rq if found */
2075 lowest_rq = find_lock_lowest_rq(next_task, rq);
2077 struct task_struct *task;
2079 * find_lock_lowest_rq releases rq->lock
2080 * so it is possible that next_task has migrated.
2082 * We need to make sure that the task is still on the same
2083 * run-queue and is also still the next task eligible for
2086 task = pick_next_pushable_task(rq);
2087 if (task == next_task) {
2089 * The task hasn't migrated, and is still the next
2090 * eligible task, but we failed to find a run-queue
2091 * to push it to. Do not retry in this case, since
2092 * other CPUs will pull from us when ready.
2098 /* No more tasks, just exit */
2102 * Something has shifted, try again.
2104 put_task_struct(next_task);
2109 deactivate_task(rq, next_task, 0);
2110 set_task_cpu(next_task, lowest_rq->cpu);
2111 activate_task(lowest_rq, next_task, 0);
2112 resched_curr(lowest_rq);
2115 double_unlock_balance(rq, lowest_rq);
2117 put_task_struct(next_task);
2122 static void push_rt_tasks(struct rq *rq)
2124 /* push_rt_task will return true if it moved an RT */
2125 while (push_rt_task(rq, false))
2129 #ifdef HAVE_RT_PUSH_IPI
2132 * When a high priority task schedules out from a CPU and a lower priority
2133 * task is scheduled in, a check is made to see if there's any RT tasks
2134 * on other CPUs that are waiting to run because a higher priority RT task
2135 * is currently running on its CPU. In this case, the CPU with multiple RT
2136 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2137 * up that may be able to run one of its non-running queued RT tasks.
2139 * All CPUs with overloaded RT tasks need to be notified as there is currently
2140 * no way to know which of these CPUs have the highest priority task waiting
2141 * to run. Instead of trying to take a spinlock on each of these CPUs,
2142 * which has shown to cause large latency when done on machines with many
2143 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2144 * RT tasks waiting to run.
2146 * Just sending an IPI to each of the CPUs is also an issue, as on large
2147 * count CPU machines, this can cause an IPI storm on a CPU, especially
2148 * if its the only CPU with multiple RT tasks queued, and a large number
2149 * of CPUs scheduling a lower priority task at the same time.
2151 * Each root domain has its own irq work function that can iterate over
2152 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2153 * task must be checked if there's one or many CPUs that are lowering
2154 * their priority, there's a single irq work iterator that will try to
2155 * push off RT tasks that are waiting to run.
2157 * When a CPU schedules a lower priority task, it will kick off the
2158 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2159 * As it only takes the first CPU that schedules a lower priority task
2160 * to start the process, the rto_start variable is incremented and if
2161 * the atomic result is one, then that CPU will try to take the rto_lock.
2162 * This prevents high contention on the lock as the process handles all
2163 * CPUs scheduling lower priority tasks.
2165 * All CPUs that are scheduling a lower priority task will increment the
2166 * rt_loop_next variable. This will make sure that the irq work iterator
2167 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2168 * priority task, even if the iterator is in the middle of a scan. Incrementing
2169 * the rt_loop_next will cause the iterator to perform another scan.
2172 static int rto_next_cpu(struct root_domain *rd)
2178 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2179 * rt_next_cpu() will simply return the first CPU found in
2182 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2183 * will return the next CPU found in the rto_mask.
2185 * If there are no more CPUs left in the rto_mask, then a check is made
2186 * against rto_loop and rto_loop_next. rto_loop is only updated with
2187 * the rto_lock held, but any CPU may increment the rto_loop_next
2188 * without any locking.
2192 /* When rto_cpu is -1 this acts like cpumask_first() */
2193 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2197 if (cpu < nr_cpu_ids)
2203 * ACQUIRE ensures we see the @rto_mask changes
2204 * made prior to the @next value observed.
2206 * Matches WMB in rt_set_overload().
2208 next = atomic_read_acquire(&rd->rto_loop_next);
2210 if (rd->rto_loop == next)
2213 rd->rto_loop = next;
2219 static inline bool rto_start_trylock(atomic_t *v)
2221 return !atomic_cmpxchg_acquire(v, 0, 1);
2224 static inline void rto_start_unlock(atomic_t *v)
2226 atomic_set_release(v, 0);
2229 static void tell_cpu_to_push(struct rq *rq)
2233 /* Keep the loop going if the IPI is currently active */
2234 atomic_inc(&rq->rd->rto_loop_next);
2236 /* Only one CPU can initiate a loop at a time */
2237 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2240 raw_spin_lock(&rq->rd->rto_lock);
2243 * The rto_cpu is updated under the lock, if it has a valid CPU
2244 * then the IPI is still running and will continue due to the
2245 * update to loop_next, and nothing needs to be done here.
2246 * Otherwise it is finishing up and an ipi needs to be sent.
2248 if (rq->rd->rto_cpu < 0)
2249 cpu = rto_next_cpu(rq->rd);
2251 raw_spin_unlock(&rq->rd->rto_lock);
2253 rto_start_unlock(&rq->rd->rto_loop_start);
2256 /* Make sure the rd does not get freed while pushing */
2257 sched_get_rd(rq->rd);
2258 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2262 /* Called from hardirq context */
2263 void rto_push_irq_work_func(struct irq_work *work)
2265 struct root_domain *rd =
2266 container_of(work, struct root_domain, rto_push_work);
2273 * We do not need to grab the lock to check for has_pushable_tasks.
2274 * When it gets updated, a check is made if a push is possible.
2276 if (has_pushable_tasks(rq)) {
2277 raw_spin_rq_lock(rq);
2278 while (push_rt_task(rq, true))
2280 raw_spin_rq_unlock(rq);
2283 raw_spin_lock(&rd->rto_lock);
2285 /* Pass the IPI to the next rt overloaded queue */
2286 cpu = rto_next_cpu(rd);
2288 raw_spin_unlock(&rd->rto_lock);
2295 /* Try the next RT overloaded CPU */
2296 irq_work_queue_on(&rd->rto_push_work, cpu);
2298 #endif /* HAVE_RT_PUSH_IPI */
2300 static void pull_rt_task(struct rq *this_rq)
2302 int this_cpu = this_rq->cpu, cpu;
2303 bool resched = false;
2304 struct task_struct *p, *push_task;
2306 int rt_overload_count = rt_overloaded(this_rq);
2308 if (likely(!rt_overload_count))
2312 * Match the barrier from rt_set_overloaded; this guarantees that if we
2313 * see overloaded we must also see the rto_mask bit.
2317 /* If we are the only overloaded CPU do nothing */
2318 if (rt_overload_count == 1 &&
2319 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2322 #ifdef HAVE_RT_PUSH_IPI
2323 if (sched_feat(RT_PUSH_IPI)) {
2324 tell_cpu_to_push(this_rq);
2329 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2330 if (this_cpu == cpu)
2333 src_rq = cpu_rq(cpu);
2336 * Don't bother taking the src_rq->lock if the next highest
2337 * task is known to be lower-priority than our current task.
2338 * This may look racy, but if this value is about to go
2339 * logically higher, the src_rq will push this task away.
2340 * And if its going logically lower, we do not care
2342 if (src_rq->rt.highest_prio.next >=
2343 this_rq->rt.highest_prio.curr)
2347 * We can potentially drop this_rq's lock in
2348 * double_lock_balance, and another CPU could
2352 double_lock_balance(this_rq, src_rq);
2355 * We can pull only a task, which is pushable
2356 * on its rq, and no others.
2358 p = pick_highest_pushable_task(src_rq, this_cpu);
2361 * Do we have an RT task that preempts
2362 * the to-be-scheduled task?
2364 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2365 WARN_ON(p == src_rq->curr);
2366 WARN_ON(!task_on_rq_queued(p));
2369 * There's a chance that p is higher in priority
2370 * than what's currently running on its CPU.
2371 * This is just that p is waking up and hasn't
2372 * had a chance to schedule. We only pull
2373 * p if it is lower in priority than the
2374 * current task on the run queue
2376 if (p->prio < src_rq->curr->prio)
2379 if (is_migration_disabled(p)) {
2380 push_task = get_push_task(src_rq);
2382 deactivate_task(src_rq, p, 0);
2383 set_task_cpu(p, this_cpu);
2384 activate_task(this_rq, p, 0);
2388 * We continue with the search, just in
2389 * case there's an even higher prio task
2390 * in another runqueue. (low likelihood
2395 double_unlock_balance(this_rq, src_rq);
2399 raw_spin_rq_unlock(this_rq);
2400 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2401 push_task, &src_rq->push_work);
2403 raw_spin_rq_lock(this_rq);
2408 resched_curr(this_rq);
2412 * If we are not running and we are not going to reschedule soon, we should
2413 * try to push tasks away now
2415 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2417 bool need_to_push = !task_on_cpu(rq, p) &&
2418 !test_tsk_need_resched(rq->curr) &&
2419 p->nr_cpus_allowed > 1 &&
2420 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2421 (rq->curr->nr_cpus_allowed < 2 ||
2422 rq->curr->prio <= p->prio);
2428 /* Assumes rq->lock is held */
2429 static void rq_online_rt(struct rq *rq)
2431 if (rq->rt.overloaded)
2432 rt_set_overload(rq);
2434 __enable_runtime(rq);
2436 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2439 /* Assumes rq->lock is held */
2440 static void rq_offline_rt(struct rq *rq)
2442 if (rq->rt.overloaded)
2443 rt_clear_overload(rq);
2445 __disable_runtime(rq);
2447 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2451 * When switch from the rt queue, we bring ourselves to a position
2452 * that we might want to pull RT tasks from other runqueues.
2454 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2457 * If there are other RT tasks then we will reschedule
2458 * and the scheduling of the other RT tasks will handle
2459 * the balancing. But if we are the last RT task
2460 * we may need to handle the pulling of RT tasks
2463 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2466 rt_queue_pull_task(rq);
2469 void __init init_sched_rt_class(void)
2473 for_each_possible_cpu(i) {
2474 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2475 GFP_KERNEL, cpu_to_node(i));
2478 #endif /* CONFIG_SMP */
2481 * When switching a task to RT, we may overload the runqueue
2482 * with RT tasks. In this case we try to push them off to
2485 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2488 * If we are running, update the avg_rt tracking, as the running time
2489 * will now on be accounted into the latter.
2491 if (task_current(rq, p)) {
2492 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2497 * If we are not running we may need to preempt the current
2498 * running task. If that current running task is also an RT task
2499 * then see if we can move to another run queue.
2501 if (task_on_rq_queued(p)) {
2503 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2504 rt_queue_push_tasks(rq);
2505 #endif /* CONFIG_SMP */
2506 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2512 * Priority of the task has changed. This may cause
2513 * us to initiate a push or pull.
2516 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2518 if (!task_on_rq_queued(p))
2521 if (task_current(rq, p)) {
2524 * If our priority decreases while running, we
2525 * may need to pull tasks to this runqueue.
2527 if (oldprio < p->prio)
2528 rt_queue_pull_task(rq);
2531 * If there's a higher priority task waiting to run
2534 if (p->prio > rq->rt.highest_prio.curr)
2537 /* For UP simply resched on drop of prio */
2538 if (oldprio < p->prio)
2540 #endif /* CONFIG_SMP */
2543 * This task is not running, but if it is
2544 * greater than the current running task
2547 if (p->prio < rq->curr->prio)
2552 #ifdef CONFIG_POSIX_TIMERS
2553 static void watchdog(struct rq *rq, struct task_struct *p)
2555 unsigned long soft, hard;
2557 /* max may change after cur was read, this will be fixed next tick */
2558 soft = task_rlimit(p, RLIMIT_RTTIME);
2559 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2561 if (soft != RLIM_INFINITY) {
2564 if (p->rt.watchdog_stamp != jiffies) {
2566 p->rt.watchdog_stamp = jiffies;
2569 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2570 if (p->rt.timeout > next) {
2571 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2572 p->se.sum_exec_runtime);
2577 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2581 * scheduler tick hitting a task of our scheduling class.
2583 * NOTE: This function can be called remotely by the tick offload that
2584 * goes along full dynticks. Therefore no local assumption can be made
2585 * and everything must be accessed through the @rq and @curr passed in
2588 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2590 struct sched_rt_entity *rt_se = &p->rt;
2593 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2598 * RR tasks need a special form of timeslice management.
2599 * FIFO tasks have no timeslices.
2601 if (p->policy != SCHED_RR)
2604 if (--p->rt.time_slice)
2607 p->rt.time_slice = sched_rr_timeslice;
2610 * Requeue to the end of queue if we (and all of our ancestors) are not
2611 * the only element on the queue
2613 for_each_sched_rt_entity(rt_se) {
2614 if (rt_se->run_list.prev != rt_se->run_list.next) {
2615 requeue_task_rt(rq, p, 0);
2622 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2625 * Time slice is 0 for SCHED_FIFO tasks
2627 if (task->policy == SCHED_RR)
2628 return sched_rr_timeslice;
2633 #ifdef CONFIG_SCHED_CORE
2634 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2636 struct rt_rq *rt_rq;
2638 #ifdef CONFIG_RT_GROUP_SCHED
2639 rt_rq = task_group(p)->rt_rq[cpu];
2641 rt_rq = &cpu_rq(cpu)->rt;
2644 return rt_rq_throttled(rt_rq);
2648 DEFINE_SCHED_CLASS(rt) = {
2650 .enqueue_task = enqueue_task_rt,
2651 .dequeue_task = dequeue_task_rt,
2652 .yield_task = yield_task_rt,
2654 .wakeup_preempt = wakeup_preempt_rt,
2656 .pick_next_task = pick_next_task_rt,
2657 .put_prev_task = put_prev_task_rt,
2658 .set_next_task = set_next_task_rt,
2661 .balance = balance_rt,
2662 .pick_task = pick_task_rt,
2663 .select_task_rq = select_task_rq_rt,
2664 .set_cpus_allowed = set_cpus_allowed_common,
2665 .rq_online = rq_online_rt,
2666 .rq_offline = rq_offline_rt,
2667 .task_woken = task_woken_rt,
2668 .switched_from = switched_from_rt,
2669 .find_lock_rq = find_lock_lowest_rq,
2672 .task_tick = task_tick_rt,
2674 .get_rr_interval = get_rr_interval_rt,
2676 .prio_changed = prio_changed_rt,
2677 .switched_to = switched_to_rt,
2679 .update_curr = update_curr_rt,
2681 #ifdef CONFIG_SCHED_CORE
2682 .task_is_throttled = task_is_throttled_rt,
2685 #ifdef CONFIG_UCLAMP_TASK
2686 .uclamp_enabled = 1,
2690 #ifdef CONFIG_RT_GROUP_SCHED
2692 * Ensure that the real time constraints are schedulable.
2694 static DEFINE_MUTEX(rt_constraints_mutex);
2696 static inline int tg_has_rt_tasks(struct task_group *tg)
2698 struct task_struct *task;
2699 struct css_task_iter it;
2703 * Autogroups do not have RT tasks; see autogroup_create().
2705 if (task_group_is_autogroup(tg))
2708 css_task_iter_start(&tg->css, 0, &it);
2709 while (!ret && (task = css_task_iter_next(&it)))
2710 ret |= rt_task(task);
2711 css_task_iter_end(&it);
2716 struct rt_schedulable_data {
2717 struct task_group *tg;
2722 static int tg_rt_schedulable(struct task_group *tg, void *data)
2724 struct rt_schedulable_data *d = data;
2725 struct task_group *child;
2726 unsigned long total, sum = 0;
2727 u64 period, runtime;
2729 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2730 runtime = tg->rt_bandwidth.rt_runtime;
2733 period = d->rt_period;
2734 runtime = d->rt_runtime;
2738 * Cannot have more runtime than the period.
2740 if (runtime > period && runtime != RUNTIME_INF)
2744 * Ensure we don't starve existing RT tasks if runtime turns zero.
2746 if (rt_bandwidth_enabled() && !runtime &&
2747 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2750 total = to_ratio(period, runtime);
2753 * Nobody can have more than the global setting allows.
2755 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2759 * The sum of our children's runtime should not exceed our own.
2761 list_for_each_entry_rcu(child, &tg->children, siblings) {
2762 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2763 runtime = child->rt_bandwidth.rt_runtime;
2765 if (child == d->tg) {
2766 period = d->rt_period;
2767 runtime = d->rt_runtime;
2770 sum += to_ratio(period, runtime);
2779 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2783 struct rt_schedulable_data data = {
2785 .rt_period = period,
2786 .rt_runtime = runtime,
2790 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2796 static int tg_set_rt_bandwidth(struct task_group *tg,
2797 u64 rt_period, u64 rt_runtime)
2802 * Disallowing the root group RT runtime is BAD, it would disallow the
2803 * kernel creating (and or operating) RT threads.
2805 if (tg == &root_task_group && rt_runtime == 0)
2808 /* No period doesn't make any sense. */
2813 * Bound quota to defend quota against overflow during bandwidth shift.
2815 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2818 mutex_lock(&rt_constraints_mutex);
2819 err = __rt_schedulable(tg, rt_period, rt_runtime);
2823 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2824 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2825 tg->rt_bandwidth.rt_runtime = rt_runtime;
2827 for_each_possible_cpu(i) {
2828 struct rt_rq *rt_rq = tg->rt_rq[i];
2830 raw_spin_lock(&rt_rq->rt_runtime_lock);
2831 rt_rq->rt_runtime = rt_runtime;
2832 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2834 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2836 mutex_unlock(&rt_constraints_mutex);
2841 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2843 u64 rt_runtime, rt_period;
2845 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2846 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2847 if (rt_runtime_us < 0)
2848 rt_runtime = RUNTIME_INF;
2849 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2852 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2855 long sched_group_rt_runtime(struct task_group *tg)
2859 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2862 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2863 do_div(rt_runtime_us, NSEC_PER_USEC);
2864 return rt_runtime_us;
2867 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2869 u64 rt_runtime, rt_period;
2871 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2874 rt_period = rt_period_us * NSEC_PER_USEC;
2875 rt_runtime = tg->rt_bandwidth.rt_runtime;
2877 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2880 long sched_group_rt_period(struct task_group *tg)
2884 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2885 do_div(rt_period_us, NSEC_PER_USEC);
2886 return rt_period_us;
2889 #ifdef CONFIG_SYSCTL
2890 static int sched_rt_global_constraints(void)
2894 mutex_lock(&rt_constraints_mutex);
2895 ret = __rt_schedulable(NULL, 0, 0);
2896 mutex_unlock(&rt_constraints_mutex);
2900 #endif /* CONFIG_SYSCTL */
2902 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2904 /* Don't accept realtime tasks when there is no way for them to run */
2905 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2911 #else /* !CONFIG_RT_GROUP_SCHED */
2913 #ifdef CONFIG_SYSCTL
2914 static int sched_rt_global_constraints(void)
2916 unsigned long flags;
2919 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2920 for_each_possible_cpu(i) {
2921 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2923 raw_spin_lock(&rt_rq->rt_runtime_lock);
2924 rt_rq->rt_runtime = global_rt_runtime();
2925 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2927 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2931 #endif /* CONFIG_SYSCTL */
2932 #endif /* CONFIG_RT_GROUP_SCHED */
2934 #ifdef CONFIG_SYSCTL
2935 static int sched_rt_global_validate(void)
2937 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2938 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2939 ((u64)sysctl_sched_rt_runtime *
2940 NSEC_PER_USEC > max_rt_runtime)))
2946 static void sched_rt_do_global(void)
2948 unsigned long flags;
2950 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2951 def_rt_bandwidth.rt_runtime = global_rt_runtime();
2952 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2953 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2956 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
2957 size_t *lenp, loff_t *ppos)
2959 int old_period, old_runtime;
2960 static DEFINE_MUTEX(mutex);
2964 old_period = sysctl_sched_rt_period;
2965 old_runtime = sysctl_sched_rt_runtime;
2967 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2969 if (!ret && write) {
2970 ret = sched_rt_global_validate();
2974 ret = sched_dl_global_validate();
2978 ret = sched_rt_global_constraints();
2982 sched_rt_do_global();
2983 sched_dl_do_global();
2987 sysctl_sched_rt_period = old_period;
2988 sysctl_sched_rt_runtime = old_runtime;
2990 mutex_unlock(&mutex);
2995 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
2996 size_t *lenp, loff_t *ppos)
2999 static DEFINE_MUTEX(mutex);
3002 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3004 * Make sure that internally we keep jiffies.
3005 * Also, writing zero resets the timeslice to default:
3007 if (!ret && write) {
3008 sched_rr_timeslice =
3009 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3010 msecs_to_jiffies(sysctl_sched_rr_timeslice);
3012 if (sysctl_sched_rr_timeslice <= 0)
3013 sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
3015 mutex_unlock(&mutex);
3019 #endif /* CONFIG_SYSCTL */
3021 #ifdef CONFIG_SCHED_DEBUG
3022 void print_rt_stats(struct seq_file *m, int cpu)
3025 struct rt_rq *rt_rq;
3028 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3029 print_rt_rq(m, cpu, rt_rq);
3032 #endif /* CONFIG_SCHED_DEBUG */