1 // SPDX-License-Identifier: GPL-2.0
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13 struct rt_bandwidth def_rt_bandwidth;
16 * period over which we measure -rt task CPU usage in us.
19 unsigned int sysctl_sched_rt_period = 1000000;
22 * part of the period that we allow rt tasks to run in us.
25 int sysctl_sched_rt_runtime = 950000;
28 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
29 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
30 size_t *lenp, loff_t *ppos);
31 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
32 size_t *lenp, loff_t *ppos);
33 static struct ctl_table sched_rt_sysctls[] = {
35 .procname = "sched_rt_period_us",
36 .data = &sysctl_sched_rt_period,
37 .maxlen = sizeof(unsigned int),
39 .proc_handler = sched_rt_handler,
41 .extra2 = SYSCTL_INT_MAX,
44 .procname = "sched_rt_runtime_us",
45 .data = &sysctl_sched_rt_runtime,
46 .maxlen = sizeof(int),
48 .proc_handler = sched_rt_handler,
49 .extra1 = SYSCTL_NEG_ONE,
50 .extra2 = SYSCTL_INT_MAX,
53 .procname = "sched_rr_timeslice_ms",
54 .data = &sysctl_sched_rr_timeslice,
55 .maxlen = sizeof(int),
57 .proc_handler = sched_rr_handler,
62 static int __init sched_rt_sysctl_init(void)
64 register_sysctl_init("kernel", sched_rt_sysctls);
67 late_initcall(sched_rt_sysctl_init);
70 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
72 struct rt_bandwidth *rt_b =
73 container_of(timer, struct rt_bandwidth, rt_period_timer);
77 raw_spin_lock(&rt_b->rt_runtime_lock);
79 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
83 raw_spin_unlock(&rt_b->rt_runtime_lock);
84 idle = do_sched_rt_period_timer(rt_b, overrun);
85 raw_spin_lock(&rt_b->rt_runtime_lock);
88 rt_b->rt_period_active = 0;
89 raw_spin_unlock(&rt_b->rt_runtime_lock);
91 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
94 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
96 rt_b->rt_period = ns_to_ktime(period);
97 rt_b->rt_runtime = runtime;
99 raw_spin_lock_init(&rt_b->rt_runtime_lock);
101 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
102 HRTIMER_MODE_REL_HARD);
103 rt_b->rt_period_timer.function = sched_rt_period_timer;
106 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
108 raw_spin_lock(&rt_b->rt_runtime_lock);
109 if (!rt_b->rt_period_active) {
110 rt_b->rt_period_active = 1;
112 * SCHED_DEADLINE updates the bandwidth, as a run away
113 * RT task with a DL task could hog a CPU. But DL does
114 * not reset the period. If a deadline task was running
115 * without an RT task running, it can cause RT tasks to
116 * throttle when they start up. Kick the timer right away
117 * to update the period.
119 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
120 hrtimer_start_expires(&rt_b->rt_period_timer,
121 HRTIMER_MODE_ABS_PINNED_HARD);
123 raw_spin_unlock(&rt_b->rt_runtime_lock);
126 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
128 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
131 do_start_rt_bandwidth(rt_b);
134 void init_rt_rq(struct rt_rq *rt_rq)
136 struct rt_prio_array *array;
139 array = &rt_rq->active;
140 for (i = 0; i < MAX_RT_PRIO; i++) {
141 INIT_LIST_HEAD(array->queue + i);
142 __clear_bit(i, array->bitmap);
144 /* delimiter for bitsearch: */
145 __set_bit(MAX_RT_PRIO, array->bitmap);
147 #if defined CONFIG_SMP
148 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
149 rt_rq->highest_prio.next = MAX_RT_PRIO-1;
150 rt_rq->rt_nr_migratory = 0;
151 rt_rq->overloaded = 0;
152 plist_head_init(&rt_rq->pushable_tasks);
153 #endif /* CONFIG_SMP */
154 /* We start is dequeued state, because no RT tasks are queued */
155 rt_rq->rt_queued = 0;
158 rt_rq->rt_throttled = 0;
159 rt_rq->rt_runtime = 0;
160 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
163 #ifdef CONFIG_RT_GROUP_SCHED
164 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
166 hrtimer_cancel(&rt_b->rt_period_timer);
169 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
171 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
173 #ifdef CONFIG_SCHED_DEBUG
174 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
176 return container_of(rt_se, struct task_struct, rt);
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
189 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
191 struct rt_rq *rt_rq = rt_se->rt_rq;
196 void unregister_rt_sched_group(struct task_group *tg)
199 destroy_rt_bandwidth(&tg->rt_bandwidth);
203 void free_rt_sched_group(struct task_group *tg)
207 for_each_possible_cpu(i) {
218 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
219 struct sched_rt_entity *rt_se, int cpu,
220 struct sched_rt_entity *parent)
222 struct rq *rq = cpu_rq(cpu);
224 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
225 rt_rq->rt_nr_boosted = 0;
229 tg->rt_rq[cpu] = rt_rq;
230 tg->rt_se[cpu] = rt_se;
236 rt_se->rt_rq = &rq->rt;
238 rt_se->rt_rq = parent->my_q;
241 rt_se->parent = parent;
242 INIT_LIST_HEAD(&rt_se->run_list);
245 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
248 struct sched_rt_entity *rt_se;
251 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
254 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
258 init_rt_bandwidth(&tg->rt_bandwidth,
259 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
261 for_each_possible_cpu(i) {
262 rt_rq = kzalloc_node(sizeof(struct rt_rq),
263 GFP_KERNEL, cpu_to_node(i));
267 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
268 GFP_KERNEL, cpu_to_node(i));
273 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
274 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
285 #else /* CONFIG_RT_GROUP_SCHED */
287 #define rt_entity_is_task(rt_se) (1)
289 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
291 return container_of(rt_se, struct task_struct, rt);
294 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
296 return container_of(rt_rq, struct rq, rt);
299 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
301 struct task_struct *p = rt_task_of(rt_se);
306 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
308 struct rq *rq = rq_of_rt_se(rt_se);
313 void unregister_rt_sched_group(struct task_group *tg) { }
315 void free_rt_sched_group(struct task_group *tg) { }
317 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
321 #endif /* CONFIG_RT_GROUP_SCHED */
325 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
327 /* Try to pull RT tasks here if we lower this rq's prio */
328 return rq->online && rq->rt.highest_prio.curr > prev->prio;
331 static inline int rt_overloaded(struct rq *rq)
333 return atomic_read(&rq->rd->rto_count);
336 static inline void rt_set_overload(struct rq *rq)
341 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
343 * Make sure the mask is visible before we set
344 * the overload count. That is checked to determine
345 * if we should look at the mask. It would be a shame
346 * if we looked at the mask, but the mask was not
349 * Matched by the barrier in pull_rt_task().
352 atomic_inc(&rq->rd->rto_count);
355 static inline void rt_clear_overload(struct rq *rq)
360 /* the order here really doesn't matter */
361 atomic_dec(&rq->rd->rto_count);
362 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
365 static void update_rt_migration(struct rt_rq *rt_rq)
367 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
368 if (!rt_rq->overloaded) {
369 rt_set_overload(rq_of_rt_rq(rt_rq));
370 rt_rq->overloaded = 1;
372 } else if (rt_rq->overloaded) {
373 rt_clear_overload(rq_of_rt_rq(rt_rq));
374 rt_rq->overloaded = 0;
378 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
380 struct task_struct *p;
382 if (!rt_entity_is_task(rt_se))
385 p = rt_task_of(rt_se);
386 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
388 rt_rq->rt_nr_total++;
389 if (p->nr_cpus_allowed > 1)
390 rt_rq->rt_nr_migratory++;
392 update_rt_migration(rt_rq);
395 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
397 struct task_struct *p;
399 if (!rt_entity_is_task(rt_se))
402 p = rt_task_of(rt_se);
403 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
405 rt_rq->rt_nr_total--;
406 if (p->nr_cpus_allowed > 1)
407 rt_rq->rt_nr_migratory--;
409 update_rt_migration(rt_rq);
412 static inline int has_pushable_tasks(struct rq *rq)
414 return !plist_head_empty(&rq->rt.pushable_tasks);
417 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
418 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
420 static void push_rt_tasks(struct rq *);
421 static void pull_rt_task(struct rq *);
423 static inline void rt_queue_push_tasks(struct rq *rq)
425 if (!has_pushable_tasks(rq))
428 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
431 static inline void rt_queue_pull_task(struct rq *rq)
433 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
436 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
438 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
439 plist_node_init(&p->pushable_tasks, p->prio);
440 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
442 /* Update the highest prio pushable task */
443 if (p->prio < rq->rt.highest_prio.next)
444 rq->rt.highest_prio.next = p->prio;
447 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
449 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
451 /* Update the new highest prio pushable task */
452 if (has_pushable_tasks(rq)) {
453 p = plist_first_entry(&rq->rt.pushable_tasks,
454 struct task_struct, pushable_tasks);
455 rq->rt.highest_prio.next = p->prio;
457 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
463 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
467 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
472 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
477 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
481 static inline void rt_queue_push_tasks(struct rq *rq)
484 #endif /* CONFIG_SMP */
486 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
487 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
489 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
494 #ifdef CONFIG_UCLAMP_TASK
496 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
499 * This check is only important for heterogeneous systems where uclamp_min value
500 * is higher than the capacity of a @cpu. For non-heterogeneous system this
501 * function will always return true.
503 * The function will return true if the capacity of the @cpu is >= the
504 * uclamp_min and false otherwise.
506 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
509 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
511 unsigned int min_cap;
512 unsigned int max_cap;
513 unsigned int cpu_cap;
515 /* Only heterogeneous systems can benefit from this check */
516 if (!sched_asym_cpucap_active())
519 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
520 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
522 cpu_cap = capacity_orig_of(cpu);
524 return cpu_cap >= min(min_cap, max_cap);
527 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
533 #ifdef CONFIG_RT_GROUP_SCHED
535 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
540 return rt_rq->rt_runtime;
543 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
545 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
548 typedef struct task_group *rt_rq_iter_t;
550 static inline struct task_group *next_task_group(struct task_group *tg)
553 tg = list_entry_rcu(tg->list.next,
554 typeof(struct task_group), list);
555 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
557 if (&tg->list == &task_groups)
563 #define for_each_rt_rq(rt_rq, iter, rq) \
564 for (iter = container_of(&task_groups, typeof(*iter), list); \
565 (iter = next_task_group(iter)) && \
566 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
568 #define for_each_sched_rt_entity(rt_se) \
569 for (; rt_se; rt_se = rt_se->parent)
571 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
576 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
577 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
579 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
581 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
582 struct rq *rq = rq_of_rt_rq(rt_rq);
583 struct sched_rt_entity *rt_se;
585 int cpu = cpu_of(rq);
587 rt_se = rt_rq->tg->rt_se[cpu];
589 if (rt_rq->rt_nr_running) {
591 enqueue_top_rt_rq(rt_rq);
592 else if (!on_rt_rq(rt_se))
593 enqueue_rt_entity(rt_se, 0);
595 if (rt_rq->highest_prio.curr < curr->prio)
600 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
602 struct sched_rt_entity *rt_se;
603 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
605 rt_se = rt_rq->tg->rt_se[cpu];
608 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
609 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
610 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
612 else if (on_rt_rq(rt_se))
613 dequeue_rt_entity(rt_se, 0);
616 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
618 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
621 static int rt_se_boosted(struct sched_rt_entity *rt_se)
623 struct rt_rq *rt_rq = group_rt_rq(rt_se);
624 struct task_struct *p;
627 return !!rt_rq->rt_nr_boosted;
629 p = rt_task_of(rt_se);
630 return p->prio != p->normal_prio;
634 static inline const struct cpumask *sched_rt_period_mask(void)
636 return this_rq()->rd->span;
639 static inline const struct cpumask *sched_rt_period_mask(void)
641 return cpu_online_mask;
646 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
648 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
651 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
653 return &rt_rq->tg->rt_bandwidth;
656 #else /* !CONFIG_RT_GROUP_SCHED */
658 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
660 return rt_rq->rt_runtime;
663 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
665 return ktime_to_ns(def_rt_bandwidth.rt_period);
668 typedef struct rt_rq *rt_rq_iter_t;
670 #define for_each_rt_rq(rt_rq, iter, rq) \
671 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
673 #define for_each_sched_rt_entity(rt_se) \
674 for (; rt_se; rt_se = NULL)
676 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
681 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
683 struct rq *rq = rq_of_rt_rq(rt_rq);
685 if (!rt_rq->rt_nr_running)
688 enqueue_top_rt_rq(rt_rq);
692 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
694 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
697 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
699 return rt_rq->rt_throttled;
702 static inline const struct cpumask *sched_rt_period_mask(void)
704 return cpu_online_mask;
708 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
710 return &cpu_rq(cpu)->rt;
713 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
715 return &def_rt_bandwidth;
718 #endif /* CONFIG_RT_GROUP_SCHED */
720 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
722 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
724 return (hrtimer_active(&rt_b->rt_period_timer) ||
725 rt_rq->rt_time < rt_b->rt_runtime);
730 * We ran out of runtime, see if we can borrow some from our neighbours.
732 static void do_balance_runtime(struct rt_rq *rt_rq)
734 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
735 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
739 weight = cpumask_weight(rd->span);
741 raw_spin_lock(&rt_b->rt_runtime_lock);
742 rt_period = ktime_to_ns(rt_b->rt_period);
743 for_each_cpu(i, rd->span) {
744 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
750 raw_spin_lock(&iter->rt_runtime_lock);
752 * Either all rqs have inf runtime and there's nothing to steal
753 * or __disable_runtime() below sets a specific rq to inf to
754 * indicate its been disabled and disallow stealing.
756 if (iter->rt_runtime == RUNTIME_INF)
760 * From runqueues with spare time, take 1/n part of their
761 * spare time, but no more than our period.
763 diff = iter->rt_runtime - iter->rt_time;
765 diff = div_u64((u64)diff, weight);
766 if (rt_rq->rt_runtime + diff > rt_period)
767 diff = rt_period - rt_rq->rt_runtime;
768 iter->rt_runtime -= diff;
769 rt_rq->rt_runtime += diff;
770 if (rt_rq->rt_runtime == rt_period) {
771 raw_spin_unlock(&iter->rt_runtime_lock);
776 raw_spin_unlock(&iter->rt_runtime_lock);
778 raw_spin_unlock(&rt_b->rt_runtime_lock);
782 * Ensure this RQ takes back all the runtime it lend to its neighbours.
784 static void __disable_runtime(struct rq *rq)
786 struct root_domain *rd = rq->rd;
790 if (unlikely(!scheduler_running))
793 for_each_rt_rq(rt_rq, iter, rq) {
794 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
798 raw_spin_lock(&rt_b->rt_runtime_lock);
799 raw_spin_lock(&rt_rq->rt_runtime_lock);
801 * Either we're all inf and nobody needs to borrow, or we're
802 * already disabled and thus have nothing to do, or we have
803 * exactly the right amount of runtime to take out.
805 if (rt_rq->rt_runtime == RUNTIME_INF ||
806 rt_rq->rt_runtime == rt_b->rt_runtime)
808 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 * Calculate the difference between what we started out with
812 * and what we current have, that's the amount of runtime
813 * we lend and now have to reclaim.
815 want = rt_b->rt_runtime - rt_rq->rt_runtime;
818 * Greedy reclaim, take back as much as we can.
820 for_each_cpu(i, rd->span) {
821 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
825 * Can't reclaim from ourselves or disabled runqueues.
827 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
830 raw_spin_lock(&iter->rt_runtime_lock);
832 diff = min_t(s64, iter->rt_runtime, want);
833 iter->rt_runtime -= diff;
836 iter->rt_runtime -= want;
839 raw_spin_unlock(&iter->rt_runtime_lock);
845 raw_spin_lock(&rt_rq->rt_runtime_lock);
847 * We cannot be left wanting - that would mean some runtime
848 * leaked out of the system.
853 * Disable all the borrow logic by pretending we have inf
854 * runtime - in which case borrowing doesn't make sense.
856 rt_rq->rt_runtime = RUNTIME_INF;
857 rt_rq->rt_throttled = 0;
858 raw_spin_unlock(&rt_rq->rt_runtime_lock);
859 raw_spin_unlock(&rt_b->rt_runtime_lock);
861 /* Make rt_rq available for pick_next_task() */
862 sched_rt_rq_enqueue(rt_rq);
866 static void __enable_runtime(struct rq *rq)
871 if (unlikely(!scheduler_running))
875 * Reset each runqueue's bandwidth settings
877 for_each_rt_rq(rt_rq, iter, rq) {
878 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
880 raw_spin_lock(&rt_b->rt_runtime_lock);
881 raw_spin_lock(&rt_rq->rt_runtime_lock);
882 rt_rq->rt_runtime = rt_b->rt_runtime;
884 rt_rq->rt_throttled = 0;
885 raw_spin_unlock(&rt_rq->rt_runtime_lock);
886 raw_spin_unlock(&rt_b->rt_runtime_lock);
890 static void balance_runtime(struct rt_rq *rt_rq)
892 if (!sched_feat(RT_RUNTIME_SHARE))
895 if (rt_rq->rt_time > rt_rq->rt_runtime) {
896 raw_spin_unlock(&rt_rq->rt_runtime_lock);
897 do_balance_runtime(rt_rq);
898 raw_spin_lock(&rt_rq->rt_runtime_lock);
901 #else /* !CONFIG_SMP */
902 static inline void balance_runtime(struct rt_rq *rt_rq) {}
903 #endif /* CONFIG_SMP */
905 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
907 int i, idle = 1, throttled = 0;
908 const struct cpumask *span;
910 span = sched_rt_period_mask();
911 #ifdef CONFIG_RT_GROUP_SCHED
913 * FIXME: isolated CPUs should really leave the root task group,
914 * whether they are isolcpus or were isolated via cpusets, lest
915 * the timer run on a CPU which does not service all runqueues,
916 * potentially leaving other CPUs indefinitely throttled. If
917 * isolation is really required, the user will turn the throttle
918 * off to kill the perturbations it causes anyway. Meanwhile,
919 * this maintains functionality for boot and/or troubleshooting.
921 if (rt_b == &root_task_group.rt_bandwidth)
922 span = cpu_online_mask;
924 for_each_cpu(i, span) {
926 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
927 struct rq *rq = rq_of_rt_rq(rt_rq);
932 * When span == cpu_online_mask, taking each rq->lock
933 * can be time-consuming. Try to avoid it when possible.
935 raw_spin_lock(&rt_rq->rt_runtime_lock);
936 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
937 rt_rq->rt_runtime = rt_b->rt_runtime;
938 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
939 raw_spin_unlock(&rt_rq->rt_runtime_lock);
946 if (rt_rq->rt_time) {
949 raw_spin_lock(&rt_rq->rt_runtime_lock);
950 if (rt_rq->rt_throttled)
951 balance_runtime(rt_rq);
952 runtime = rt_rq->rt_runtime;
953 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
954 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
955 rt_rq->rt_throttled = 0;
959 * When we're idle and a woken (rt) task is
960 * throttled check_preempt_curr() will set
961 * skip_update and the time between the wakeup
962 * and this unthrottle will get accounted as
965 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
966 rq_clock_cancel_skipupdate(rq);
968 if (rt_rq->rt_time || rt_rq->rt_nr_running)
970 raw_spin_unlock(&rt_rq->rt_runtime_lock);
971 } else if (rt_rq->rt_nr_running) {
973 if (!rt_rq_throttled(rt_rq))
976 if (rt_rq->rt_throttled)
980 sched_rt_rq_enqueue(rt_rq);
984 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
990 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
992 #ifdef CONFIG_RT_GROUP_SCHED
993 struct rt_rq *rt_rq = group_rt_rq(rt_se);
996 return rt_rq->highest_prio.curr;
999 return rt_task_of(rt_se)->prio;
1002 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
1004 u64 runtime = sched_rt_runtime(rt_rq);
1006 if (rt_rq->rt_throttled)
1007 return rt_rq_throttled(rt_rq);
1009 if (runtime >= sched_rt_period(rt_rq))
1012 balance_runtime(rt_rq);
1013 runtime = sched_rt_runtime(rt_rq);
1014 if (runtime == RUNTIME_INF)
1017 if (rt_rq->rt_time > runtime) {
1018 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
1021 * Don't actually throttle groups that have no runtime assigned
1022 * but accrue some time due to boosting.
1024 if (likely(rt_b->rt_runtime)) {
1025 rt_rq->rt_throttled = 1;
1026 printk_deferred_once("sched: RT throttling activated\n");
1029 * In case we did anyway, make it go away,
1030 * replenishment is a joke, since it will replenish us
1031 * with exactly 0 ns.
1036 if (rt_rq_throttled(rt_rq)) {
1037 sched_rt_rq_dequeue(rt_rq);
1046 * Update the current task's runtime statistics. Skip current tasks that
1047 * are not in our scheduling class.
1049 static void update_curr_rt(struct rq *rq)
1051 struct task_struct *curr = rq->curr;
1052 struct sched_rt_entity *rt_se = &curr->rt;
1056 if (curr->sched_class != &rt_sched_class)
1059 now = rq_clock_task(rq);
1060 delta_exec = now - curr->se.exec_start;
1061 if (unlikely((s64)delta_exec <= 0))
1064 schedstat_set(curr->stats.exec_max,
1065 max(curr->stats.exec_max, delta_exec));
1067 trace_sched_stat_runtime(curr, delta_exec, 0);
1069 update_current_exec_runtime(curr, now, delta_exec);
1071 if (!rt_bandwidth_enabled())
1074 for_each_sched_rt_entity(rt_se) {
1075 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1078 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1079 raw_spin_lock(&rt_rq->rt_runtime_lock);
1080 rt_rq->rt_time += delta_exec;
1081 exceeded = sched_rt_runtime_exceeded(rt_rq);
1084 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1086 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1092 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1094 struct rq *rq = rq_of_rt_rq(rt_rq);
1096 BUG_ON(&rq->rt != rt_rq);
1098 if (!rt_rq->rt_queued)
1101 BUG_ON(!rq->nr_running);
1103 sub_nr_running(rq, count);
1104 rt_rq->rt_queued = 0;
1109 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1111 struct rq *rq = rq_of_rt_rq(rt_rq);
1113 BUG_ON(&rq->rt != rt_rq);
1115 if (rt_rq->rt_queued)
1118 if (rt_rq_throttled(rt_rq))
1121 if (rt_rq->rt_nr_running) {
1122 add_nr_running(rq, rt_rq->rt_nr_running);
1123 rt_rq->rt_queued = 1;
1126 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1127 cpufreq_update_util(rq, 0);
1130 #if defined CONFIG_SMP
1133 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1135 struct rq *rq = rq_of_rt_rq(rt_rq);
1137 #ifdef CONFIG_RT_GROUP_SCHED
1139 * Change rq's cpupri only if rt_rq is the top queue.
1141 if (&rq->rt != rt_rq)
1144 if (rq->online && prio < prev_prio)
1145 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1149 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1151 struct rq *rq = rq_of_rt_rq(rt_rq);
1153 #ifdef CONFIG_RT_GROUP_SCHED
1155 * Change rq's cpupri only if rt_rq is the top queue.
1157 if (&rq->rt != rt_rq)
1160 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1161 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1164 #else /* CONFIG_SMP */
1167 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1169 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1171 #endif /* CONFIG_SMP */
1173 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1175 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1177 int prev_prio = rt_rq->highest_prio.curr;
1179 if (prio < prev_prio)
1180 rt_rq->highest_prio.curr = prio;
1182 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1186 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1188 int prev_prio = rt_rq->highest_prio.curr;
1190 if (rt_rq->rt_nr_running) {
1192 WARN_ON(prio < prev_prio);
1195 * This may have been our highest task, and therefore
1196 * we may have some recomputation to do
1198 if (prio == prev_prio) {
1199 struct rt_prio_array *array = &rt_rq->active;
1201 rt_rq->highest_prio.curr =
1202 sched_find_first_bit(array->bitmap);
1206 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1209 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1214 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1215 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1217 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1219 #ifdef CONFIG_RT_GROUP_SCHED
1222 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224 if (rt_se_boosted(rt_se))
1225 rt_rq->rt_nr_boosted++;
1228 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1232 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1234 if (rt_se_boosted(rt_se))
1235 rt_rq->rt_nr_boosted--;
1237 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1240 #else /* CONFIG_RT_GROUP_SCHED */
1243 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1245 start_rt_bandwidth(&def_rt_bandwidth);
1249 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1251 #endif /* CONFIG_RT_GROUP_SCHED */
1254 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1256 struct rt_rq *group_rq = group_rt_rq(rt_se);
1259 return group_rq->rt_nr_running;
1265 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1267 struct rt_rq *group_rq = group_rt_rq(rt_se);
1268 struct task_struct *tsk;
1271 return group_rq->rr_nr_running;
1273 tsk = rt_task_of(rt_se);
1275 return (tsk->policy == SCHED_RR) ? 1 : 0;
1279 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1281 int prio = rt_se_prio(rt_se);
1283 WARN_ON(!rt_prio(prio));
1284 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1285 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1287 inc_rt_prio(rt_rq, prio);
1288 inc_rt_migration(rt_se, rt_rq);
1289 inc_rt_group(rt_se, rt_rq);
1293 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1295 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1296 WARN_ON(!rt_rq->rt_nr_running);
1297 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1298 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1300 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1301 dec_rt_migration(rt_se, rt_rq);
1302 dec_rt_group(rt_se, rt_rq);
1306 * Change rt_se->run_list location unless SAVE && !MOVE
1308 * assumes ENQUEUE/DEQUEUE flags match
1310 static inline bool move_entity(unsigned int flags)
1312 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1318 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1320 list_del_init(&rt_se->run_list);
1322 if (list_empty(array->queue + rt_se_prio(rt_se)))
1323 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1328 static inline struct sched_statistics *
1329 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1331 #ifdef CONFIG_RT_GROUP_SCHED
1332 /* schedstats is not supported for rt group. */
1333 if (!rt_entity_is_task(rt_se))
1337 return &rt_task_of(rt_se)->stats;
1341 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1343 struct sched_statistics *stats;
1344 struct task_struct *p = NULL;
1346 if (!schedstat_enabled())
1349 if (rt_entity_is_task(rt_se))
1350 p = rt_task_of(rt_se);
1352 stats = __schedstats_from_rt_se(rt_se);
1356 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1360 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1362 struct sched_statistics *stats;
1363 struct task_struct *p = NULL;
1365 if (!schedstat_enabled())
1368 if (rt_entity_is_task(rt_se))
1369 p = rt_task_of(rt_se);
1371 stats = __schedstats_from_rt_se(rt_se);
1375 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1379 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1382 if (!schedstat_enabled())
1385 if (flags & ENQUEUE_WAKEUP)
1386 update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1390 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1392 struct sched_statistics *stats;
1393 struct task_struct *p = NULL;
1395 if (!schedstat_enabled())
1398 if (rt_entity_is_task(rt_se))
1399 p = rt_task_of(rt_se);
1401 stats = __schedstats_from_rt_se(rt_se);
1405 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1409 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1412 struct task_struct *p = NULL;
1414 if (!schedstat_enabled())
1417 if (rt_entity_is_task(rt_se))
1418 p = rt_task_of(rt_se);
1420 if ((flags & DEQUEUE_SLEEP) && p) {
1423 state = READ_ONCE(p->__state);
1424 if (state & TASK_INTERRUPTIBLE)
1425 __schedstat_set(p->stats.sleep_start,
1426 rq_clock(rq_of_rt_rq(rt_rq)));
1428 if (state & TASK_UNINTERRUPTIBLE)
1429 __schedstat_set(p->stats.block_start,
1430 rq_clock(rq_of_rt_rq(rt_rq)));
1434 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1436 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1437 struct rt_prio_array *array = &rt_rq->active;
1438 struct rt_rq *group_rq = group_rt_rq(rt_se);
1439 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1442 * Don't enqueue the group if its throttled, or when empty.
1443 * The latter is a consequence of the former when a child group
1444 * get throttled and the current group doesn't have any other
1447 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1449 __delist_rt_entity(rt_se, array);
1453 if (move_entity(flags)) {
1454 WARN_ON_ONCE(rt_se->on_list);
1455 if (flags & ENQUEUE_HEAD)
1456 list_add(&rt_se->run_list, queue);
1458 list_add_tail(&rt_se->run_list, queue);
1460 __set_bit(rt_se_prio(rt_se), array->bitmap);
1465 inc_rt_tasks(rt_se, rt_rq);
1468 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1470 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1471 struct rt_prio_array *array = &rt_rq->active;
1473 if (move_entity(flags)) {
1474 WARN_ON_ONCE(!rt_se->on_list);
1475 __delist_rt_entity(rt_se, array);
1479 dec_rt_tasks(rt_se, rt_rq);
1483 * Because the prio of an upper entry depends on the lower
1484 * entries, we must remove entries top - down.
1486 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1488 struct sched_rt_entity *back = NULL;
1489 unsigned int rt_nr_running;
1491 for_each_sched_rt_entity(rt_se) {
1496 rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1498 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1499 if (on_rt_rq(rt_se))
1500 __dequeue_rt_entity(rt_se, flags);
1503 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1506 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1508 struct rq *rq = rq_of_rt_se(rt_se);
1510 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1512 dequeue_rt_stack(rt_se, flags);
1513 for_each_sched_rt_entity(rt_se)
1514 __enqueue_rt_entity(rt_se, flags);
1515 enqueue_top_rt_rq(&rq->rt);
1518 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1520 struct rq *rq = rq_of_rt_se(rt_se);
1522 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1524 dequeue_rt_stack(rt_se, flags);
1526 for_each_sched_rt_entity(rt_se) {
1527 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1529 if (rt_rq && rt_rq->rt_nr_running)
1530 __enqueue_rt_entity(rt_se, flags);
1532 enqueue_top_rt_rq(&rq->rt);
1536 * Adding/removing a task to/from a priority array:
1539 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1541 struct sched_rt_entity *rt_se = &p->rt;
1543 if (flags & ENQUEUE_WAKEUP)
1546 check_schedstat_required();
1547 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1549 enqueue_rt_entity(rt_se, flags);
1551 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1552 enqueue_pushable_task(rq, p);
1555 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1557 struct sched_rt_entity *rt_se = &p->rt;
1560 dequeue_rt_entity(rt_se, flags);
1562 dequeue_pushable_task(rq, p);
1566 * Put task to the head or the end of the run list without the overhead of
1567 * dequeue followed by enqueue.
1570 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1572 if (on_rt_rq(rt_se)) {
1573 struct rt_prio_array *array = &rt_rq->active;
1574 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1577 list_move(&rt_se->run_list, queue);
1579 list_move_tail(&rt_se->run_list, queue);
1583 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1585 struct sched_rt_entity *rt_se = &p->rt;
1586 struct rt_rq *rt_rq;
1588 for_each_sched_rt_entity(rt_se) {
1589 rt_rq = rt_rq_of_se(rt_se);
1590 requeue_rt_entity(rt_rq, rt_se, head);
1594 static void yield_task_rt(struct rq *rq)
1596 requeue_task_rt(rq, rq->curr, 0);
1600 static int find_lowest_rq(struct task_struct *task);
1603 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1605 struct task_struct *curr;
1609 /* For anything but wake ups, just return the task_cpu */
1610 if (!(flags & (WF_TTWU | WF_FORK)))
1616 curr = READ_ONCE(rq->curr); /* unlocked access */
1619 * If the current task on @p's runqueue is an RT task, then
1620 * try to see if we can wake this RT task up on another
1621 * runqueue. Otherwise simply start this RT task
1622 * on its current runqueue.
1624 * We want to avoid overloading runqueues. If the woken
1625 * task is a higher priority, then it will stay on this CPU
1626 * and the lower prio task should be moved to another CPU.
1627 * Even though this will probably make the lower prio task
1628 * lose its cache, we do not want to bounce a higher task
1629 * around just because it gave up its CPU, perhaps for a
1632 * For equal prio tasks, we just let the scheduler sort it out.
1634 * Otherwise, just let it ride on the affined RQ and the
1635 * post-schedule router will push the preempted task away
1637 * This test is optimistic, if we get it wrong the load-balancer
1638 * will have to sort it out.
1640 * We take into account the capacity of the CPU to ensure it fits the
1641 * requirement of the task - which is only important on heterogeneous
1642 * systems like big.LITTLE.
1645 unlikely(rt_task(curr)) &&
1646 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1648 if (test || !rt_task_fits_capacity(p, cpu)) {
1649 int target = find_lowest_rq(p);
1652 * Bail out if we were forcing a migration to find a better
1653 * fitting CPU but our search failed.
1655 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1659 * Don't bother moving it if the destination CPU is
1660 * not running a lower priority task.
1663 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1674 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1677 * Current can't be migrated, useless to reschedule,
1678 * let's hope p can move out.
1680 if (rq->curr->nr_cpus_allowed == 1 ||
1681 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1685 * p is migratable, so let's not schedule it and
1686 * see if it is pushed or pulled somewhere else.
1688 if (p->nr_cpus_allowed != 1 &&
1689 cpupri_find(&rq->rd->cpupri, p, NULL))
1693 * There appear to be other CPUs that can accept
1694 * the current task but none can run 'p', so lets reschedule
1695 * to try and push the current task away:
1697 requeue_task_rt(rq, p, 1);
1701 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1703 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1705 * This is OK, because current is on_cpu, which avoids it being
1706 * picked for load-balance and preemption/IRQs are still
1707 * disabled avoiding further scheduler activity on it and we've
1708 * not yet started the picking loop.
1710 rq_unpin_lock(rq, rf);
1712 rq_repin_lock(rq, rf);
1715 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1717 #endif /* CONFIG_SMP */
1720 * Preempt the current task with a newly woken task if needed:
1722 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1724 if (p->prio < rq->curr->prio) {
1733 * - the newly woken task is of equal priority to the current task
1734 * - the newly woken task is non-migratable while current is migratable
1735 * - current will be preempted on the next reschedule
1737 * we should check to see if current can readily move to a different
1738 * cpu. If so, we will reschedule to allow the push logic to try
1739 * to move current somewhere else, making room for our non-migratable
1742 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1743 check_preempt_equal_prio(rq, p);
1747 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1749 struct sched_rt_entity *rt_se = &p->rt;
1750 struct rt_rq *rt_rq = &rq->rt;
1752 p->se.exec_start = rq_clock_task(rq);
1753 if (on_rt_rq(&p->rt))
1754 update_stats_wait_end_rt(rt_rq, rt_se);
1756 /* The running task is never eligible for pushing */
1757 dequeue_pushable_task(rq, p);
1763 * If prev task was rt, put_prev_task() has already updated the
1764 * utilization. We only care of the case where we start to schedule a
1767 if (rq->curr->sched_class != &rt_sched_class)
1768 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1770 rt_queue_push_tasks(rq);
1773 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1775 struct rt_prio_array *array = &rt_rq->active;
1776 struct sched_rt_entity *next = NULL;
1777 struct list_head *queue;
1780 idx = sched_find_first_bit(array->bitmap);
1781 BUG_ON(idx >= MAX_RT_PRIO);
1783 queue = array->queue + idx;
1784 if (SCHED_WARN_ON(list_empty(queue)))
1786 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1791 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1793 struct sched_rt_entity *rt_se;
1794 struct rt_rq *rt_rq = &rq->rt;
1797 rt_se = pick_next_rt_entity(rt_rq);
1798 if (unlikely(!rt_se))
1800 rt_rq = group_rt_rq(rt_se);
1803 return rt_task_of(rt_se);
1806 static struct task_struct *pick_task_rt(struct rq *rq)
1808 struct task_struct *p;
1810 if (!sched_rt_runnable(rq))
1813 p = _pick_next_task_rt(rq);
1818 static struct task_struct *pick_next_task_rt(struct rq *rq)
1820 struct task_struct *p = pick_task_rt(rq);
1823 set_next_task_rt(rq, p, true);
1828 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1830 struct sched_rt_entity *rt_se = &p->rt;
1831 struct rt_rq *rt_rq = &rq->rt;
1833 if (on_rt_rq(&p->rt))
1834 update_stats_wait_start_rt(rt_rq, rt_se);
1838 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1841 * The previous task needs to be made eligible for pushing
1842 * if it is still active
1844 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1845 enqueue_pushable_task(rq, p);
1850 /* Only try algorithms three times */
1851 #define RT_MAX_TRIES 3
1853 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1855 if (!task_on_cpu(rq, p) &&
1856 cpumask_test_cpu(cpu, &p->cpus_mask))
1863 * Return the highest pushable rq's task, which is suitable to be executed
1864 * on the CPU, NULL otherwise
1866 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1868 struct plist_head *head = &rq->rt.pushable_tasks;
1869 struct task_struct *p;
1871 if (!has_pushable_tasks(rq))
1874 plist_for_each_entry(p, head, pushable_tasks) {
1875 if (pick_rt_task(rq, p, cpu))
1882 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1884 static int find_lowest_rq(struct task_struct *task)
1886 struct sched_domain *sd;
1887 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1888 int this_cpu = smp_processor_id();
1889 int cpu = task_cpu(task);
1892 /* Make sure the mask is initialized first */
1893 if (unlikely(!lowest_mask))
1896 if (task->nr_cpus_allowed == 1)
1897 return -1; /* No other targets possible */
1900 * If we're on asym system ensure we consider the different capacities
1901 * of the CPUs when searching for the lowest_mask.
1903 if (sched_asym_cpucap_active()) {
1905 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1907 rt_task_fits_capacity);
1910 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1915 return -1; /* No targets found */
1918 * At this point we have built a mask of CPUs representing the
1919 * lowest priority tasks in the system. Now we want to elect
1920 * the best one based on our affinity and topology.
1922 * We prioritize the last CPU that the task executed on since
1923 * it is most likely cache-hot in that location.
1925 if (cpumask_test_cpu(cpu, lowest_mask))
1929 * Otherwise, we consult the sched_domains span maps to figure
1930 * out which CPU is logically closest to our hot cache data.
1932 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1933 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1936 for_each_domain(cpu, sd) {
1937 if (sd->flags & SD_WAKE_AFFINE) {
1941 * "this_cpu" is cheaper to preempt than a
1944 if (this_cpu != -1 &&
1945 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1950 best_cpu = cpumask_any_and_distribute(lowest_mask,
1951 sched_domain_span(sd));
1952 if (best_cpu < nr_cpu_ids) {
1961 * And finally, if there were no matches within the domains
1962 * just give the caller *something* to work with from the compatible
1968 cpu = cpumask_any_distribute(lowest_mask);
1969 if (cpu < nr_cpu_ids)
1975 /* Will lock the rq it finds */
1976 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1978 struct rq *lowest_rq = NULL;
1982 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1983 cpu = find_lowest_rq(task);
1985 if ((cpu == -1) || (cpu == rq->cpu))
1988 lowest_rq = cpu_rq(cpu);
1990 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1992 * Target rq has tasks of equal or higher priority,
1993 * retrying does not release any lock and is unlikely
1994 * to yield a different result.
2000 /* if the prio of this runqueue changed, try again */
2001 if (double_lock_balance(rq, lowest_rq)) {
2003 * We had to unlock the run queue. In
2004 * the mean time, task could have
2005 * migrated already or had its affinity changed.
2006 * Also make sure that it wasn't scheduled on its rq.
2007 * It is possible the task was scheduled, set
2008 * "migrate_disabled" and then got preempted, so we must
2009 * check the task migration disable flag here too.
2011 if (unlikely(task_rq(task) != rq ||
2012 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
2013 task_on_cpu(rq, task) ||
2015 is_migration_disabled(task) ||
2016 !task_on_rq_queued(task))) {
2018 double_unlock_balance(rq, lowest_rq);
2024 /* If this rq is still suitable use it. */
2025 if (lowest_rq->rt.highest_prio.curr > task->prio)
2029 double_unlock_balance(rq, lowest_rq);
2036 static struct task_struct *pick_next_pushable_task(struct rq *rq)
2038 struct task_struct *p;
2040 if (!has_pushable_tasks(rq))
2043 p = plist_first_entry(&rq->rt.pushable_tasks,
2044 struct task_struct, pushable_tasks);
2046 BUG_ON(rq->cpu != task_cpu(p));
2047 BUG_ON(task_current(rq, p));
2048 BUG_ON(p->nr_cpus_allowed <= 1);
2050 BUG_ON(!task_on_rq_queued(p));
2051 BUG_ON(!rt_task(p));
2057 * If the current CPU has more than one RT task, see if the non
2058 * running task can migrate over to a CPU that is running a task
2059 * of lesser priority.
2061 static int push_rt_task(struct rq *rq, bool pull)
2063 struct task_struct *next_task;
2064 struct rq *lowest_rq;
2067 if (!rq->rt.overloaded)
2070 next_task = pick_next_pushable_task(rq);
2076 * It's possible that the next_task slipped in of
2077 * higher priority than current. If that's the case
2078 * just reschedule current.
2080 if (unlikely(next_task->prio < rq->curr->prio)) {
2085 if (is_migration_disabled(next_task)) {
2086 struct task_struct *push_task = NULL;
2089 if (!pull || rq->push_busy)
2093 * Invoking find_lowest_rq() on anything but an RT task doesn't
2094 * make sense. Per the above priority check, curr has to
2095 * be of higher priority than next_task, so no need to
2096 * reschedule when bailing out.
2098 * Note that the stoppers are masqueraded as SCHED_FIFO
2099 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2101 if (rq->curr->sched_class != &rt_sched_class)
2104 cpu = find_lowest_rq(rq->curr);
2105 if (cpu == -1 || cpu == rq->cpu)
2109 * Given we found a CPU with lower priority than @next_task,
2110 * therefore it should be running. However we cannot migrate it
2111 * to this other CPU, instead attempt to push the current
2112 * running task on this CPU away.
2114 push_task = get_push_task(rq);
2117 raw_spin_rq_unlock(rq);
2118 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2119 push_task, &rq->push_work);
2121 raw_spin_rq_lock(rq);
2127 if (WARN_ON(next_task == rq->curr))
2130 /* We might release rq lock */
2131 get_task_struct(next_task);
2133 /* find_lock_lowest_rq locks the rq if found */
2134 lowest_rq = find_lock_lowest_rq(next_task, rq);
2136 struct task_struct *task;
2138 * find_lock_lowest_rq releases rq->lock
2139 * so it is possible that next_task has migrated.
2141 * We need to make sure that the task is still on the same
2142 * run-queue and is also still the next task eligible for
2145 task = pick_next_pushable_task(rq);
2146 if (task == next_task) {
2148 * The task hasn't migrated, and is still the next
2149 * eligible task, but we failed to find a run-queue
2150 * to push it to. Do not retry in this case, since
2151 * other CPUs will pull from us when ready.
2157 /* No more tasks, just exit */
2161 * Something has shifted, try again.
2163 put_task_struct(next_task);
2168 deactivate_task(rq, next_task, 0);
2169 set_task_cpu(next_task, lowest_rq->cpu);
2170 activate_task(lowest_rq, next_task, 0);
2171 resched_curr(lowest_rq);
2174 double_unlock_balance(rq, lowest_rq);
2176 put_task_struct(next_task);
2181 static void push_rt_tasks(struct rq *rq)
2183 /* push_rt_task will return true if it moved an RT */
2184 while (push_rt_task(rq, false))
2188 #ifdef HAVE_RT_PUSH_IPI
2191 * When a high priority task schedules out from a CPU and a lower priority
2192 * task is scheduled in, a check is made to see if there's any RT tasks
2193 * on other CPUs that are waiting to run because a higher priority RT task
2194 * is currently running on its CPU. In this case, the CPU with multiple RT
2195 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2196 * up that may be able to run one of its non-running queued RT tasks.
2198 * All CPUs with overloaded RT tasks need to be notified as there is currently
2199 * no way to know which of these CPUs have the highest priority task waiting
2200 * to run. Instead of trying to take a spinlock on each of these CPUs,
2201 * which has shown to cause large latency when done on machines with many
2202 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2203 * RT tasks waiting to run.
2205 * Just sending an IPI to each of the CPUs is also an issue, as on large
2206 * count CPU machines, this can cause an IPI storm on a CPU, especially
2207 * if its the only CPU with multiple RT tasks queued, and a large number
2208 * of CPUs scheduling a lower priority task at the same time.
2210 * Each root domain has its own irq work function that can iterate over
2211 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2212 * task must be checked if there's one or many CPUs that are lowering
2213 * their priority, there's a single irq work iterator that will try to
2214 * push off RT tasks that are waiting to run.
2216 * When a CPU schedules a lower priority task, it will kick off the
2217 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2218 * As it only takes the first CPU that schedules a lower priority task
2219 * to start the process, the rto_start variable is incremented and if
2220 * the atomic result is one, then that CPU will try to take the rto_lock.
2221 * This prevents high contention on the lock as the process handles all
2222 * CPUs scheduling lower priority tasks.
2224 * All CPUs that are scheduling a lower priority task will increment the
2225 * rt_loop_next variable. This will make sure that the irq work iterator
2226 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2227 * priority task, even if the iterator is in the middle of a scan. Incrementing
2228 * the rt_loop_next will cause the iterator to perform another scan.
2231 static int rto_next_cpu(struct root_domain *rd)
2237 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2238 * rt_next_cpu() will simply return the first CPU found in
2241 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2242 * will return the next CPU found in the rto_mask.
2244 * If there are no more CPUs left in the rto_mask, then a check is made
2245 * against rto_loop and rto_loop_next. rto_loop is only updated with
2246 * the rto_lock held, but any CPU may increment the rto_loop_next
2247 * without any locking.
2251 /* When rto_cpu is -1 this acts like cpumask_first() */
2252 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2256 if (cpu < nr_cpu_ids)
2262 * ACQUIRE ensures we see the @rto_mask changes
2263 * made prior to the @next value observed.
2265 * Matches WMB in rt_set_overload().
2267 next = atomic_read_acquire(&rd->rto_loop_next);
2269 if (rd->rto_loop == next)
2272 rd->rto_loop = next;
2278 static inline bool rto_start_trylock(atomic_t *v)
2280 return !atomic_cmpxchg_acquire(v, 0, 1);
2283 static inline void rto_start_unlock(atomic_t *v)
2285 atomic_set_release(v, 0);
2288 static void tell_cpu_to_push(struct rq *rq)
2292 /* Keep the loop going if the IPI is currently active */
2293 atomic_inc(&rq->rd->rto_loop_next);
2295 /* Only one CPU can initiate a loop at a time */
2296 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2299 raw_spin_lock(&rq->rd->rto_lock);
2302 * The rto_cpu is updated under the lock, if it has a valid CPU
2303 * then the IPI is still running and will continue due to the
2304 * update to loop_next, and nothing needs to be done here.
2305 * Otherwise it is finishing up and an ipi needs to be sent.
2307 if (rq->rd->rto_cpu < 0)
2308 cpu = rto_next_cpu(rq->rd);
2310 raw_spin_unlock(&rq->rd->rto_lock);
2312 rto_start_unlock(&rq->rd->rto_loop_start);
2315 /* Make sure the rd does not get freed while pushing */
2316 sched_get_rd(rq->rd);
2317 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2321 /* Called from hardirq context */
2322 void rto_push_irq_work_func(struct irq_work *work)
2324 struct root_domain *rd =
2325 container_of(work, struct root_domain, rto_push_work);
2332 * We do not need to grab the lock to check for has_pushable_tasks.
2333 * When it gets updated, a check is made if a push is possible.
2335 if (has_pushable_tasks(rq)) {
2336 raw_spin_rq_lock(rq);
2337 while (push_rt_task(rq, true))
2339 raw_spin_rq_unlock(rq);
2342 raw_spin_lock(&rd->rto_lock);
2344 /* Pass the IPI to the next rt overloaded queue */
2345 cpu = rto_next_cpu(rd);
2347 raw_spin_unlock(&rd->rto_lock);
2354 /* Try the next RT overloaded CPU */
2355 irq_work_queue_on(&rd->rto_push_work, cpu);
2357 #endif /* HAVE_RT_PUSH_IPI */
2359 static void pull_rt_task(struct rq *this_rq)
2361 int this_cpu = this_rq->cpu, cpu;
2362 bool resched = false;
2363 struct task_struct *p, *push_task;
2365 int rt_overload_count = rt_overloaded(this_rq);
2367 if (likely(!rt_overload_count))
2371 * Match the barrier from rt_set_overloaded; this guarantees that if we
2372 * see overloaded we must also see the rto_mask bit.
2376 /* If we are the only overloaded CPU do nothing */
2377 if (rt_overload_count == 1 &&
2378 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2381 #ifdef HAVE_RT_PUSH_IPI
2382 if (sched_feat(RT_PUSH_IPI)) {
2383 tell_cpu_to_push(this_rq);
2388 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2389 if (this_cpu == cpu)
2392 src_rq = cpu_rq(cpu);
2395 * Don't bother taking the src_rq->lock if the next highest
2396 * task is known to be lower-priority than our current task.
2397 * This may look racy, but if this value is about to go
2398 * logically higher, the src_rq will push this task away.
2399 * And if its going logically lower, we do not care
2401 if (src_rq->rt.highest_prio.next >=
2402 this_rq->rt.highest_prio.curr)
2406 * We can potentially drop this_rq's lock in
2407 * double_lock_balance, and another CPU could
2411 double_lock_balance(this_rq, src_rq);
2414 * We can pull only a task, which is pushable
2415 * on its rq, and no others.
2417 p = pick_highest_pushable_task(src_rq, this_cpu);
2420 * Do we have an RT task that preempts
2421 * the to-be-scheduled task?
2423 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2424 WARN_ON(p == src_rq->curr);
2425 WARN_ON(!task_on_rq_queued(p));
2428 * There's a chance that p is higher in priority
2429 * than what's currently running on its CPU.
2430 * This is just that p is waking up and hasn't
2431 * had a chance to schedule. We only pull
2432 * p if it is lower in priority than the
2433 * current task on the run queue
2435 if (p->prio < src_rq->curr->prio)
2438 if (is_migration_disabled(p)) {
2439 push_task = get_push_task(src_rq);
2441 deactivate_task(src_rq, p, 0);
2442 set_task_cpu(p, this_cpu);
2443 activate_task(this_rq, p, 0);
2447 * We continue with the search, just in
2448 * case there's an even higher prio task
2449 * in another runqueue. (low likelihood
2454 double_unlock_balance(this_rq, src_rq);
2458 raw_spin_rq_unlock(this_rq);
2459 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2460 push_task, &src_rq->push_work);
2462 raw_spin_rq_lock(this_rq);
2467 resched_curr(this_rq);
2471 * If we are not running and we are not going to reschedule soon, we should
2472 * try to push tasks away now
2474 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2476 bool need_to_push = !task_on_cpu(rq, p) &&
2477 !test_tsk_need_resched(rq->curr) &&
2478 p->nr_cpus_allowed > 1 &&
2479 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2480 (rq->curr->nr_cpus_allowed < 2 ||
2481 rq->curr->prio <= p->prio);
2487 /* Assumes rq->lock is held */
2488 static void rq_online_rt(struct rq *rq)
2490 if (rq->rt.overloaded)
2491 rt_set_overload(rq);
2493 __enable_runtime(rq);
2495 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2498 /* Assumes rq->lock is held */
2499 static void rq_offline_rt(struct rq *rq)
2501 if (rq->rt.overloaded)
2502 rt_clear_overload(rq);
2504 __disable_runtime(rq);
2506 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2510 * When switch from the rt queue, we bring ourselves to a position
2511 * that we might want to pull RT tasks from other runqueues.
2513 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2516 * If there are other RT tasks then we will reschedule
2517 * and the scheduling of the other RT tasks will handle
2518 * the balancing. But if we are the last RT task
2519 * we may need to handle the pulling of RT tasks
2522 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2525 rt_queue_pull_task(rq);
2528 void __init init_sched_rt_class(void)
2532 for_each_possible_cpu(i) {
2533 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2534 GFP_KERNEL, cpu_to_node(i));
2537 #endif /* CONFIG_SMP */
2540 * When switching a task to RT, we may overload the runqueue
2541 * with RT tasks. In this case we try to push them off to
2544 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2547 * If we are running, update the avg_rt tracking, as the running time
2548 * will now on be accounted into the latter.
2550 if (task_current(rq, p)) {
2551 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2556 * If we are not running we may need to preempt the current
2557 * running task. If that current running task is also an RT task
2558 * then see if we can move to another run queue.
2560 if (task_on_rq_queued(p)) {
2562 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2563 rt_queue_push_tasks(rq);
2564 #endif /* CONFIG_SMP */
2565 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2571 * Priority of the task has changed. This may cause
2572 * us to initiate a push or pull.
2575 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2577 if (!task_on_rq_queued(p))
2580 if (task_current(rq, p)) {
2583 * If our priority decreases while running, we
2584 * may need to pull tasks to this runqueue.
2586 if (oldprio < p->prio)
2587 rt_queue_pull_task(rq);
2590 * If there's a higher priority task waiting to run
2593 if (p->prio > rq->rt.highest_prio.curr)
2596 /* For UP simply resched on drop of prio */
2597 if (oldprio < p->prio)
2599 #endif /* CONFIG_SMP */
2602 * This task is not running, but if it is
2603 * greater than the current running task
2606 if (p->prio < rq->curr->prio)
2611 #ifdef CONFIG_POSIX_TIMERS
2612 static void watchdog(struct rq *rq, struct task_struct *p)
2614 unsigned long soft, hard;
2616 /* max may change after cur was read, this will be fixed next tick */
2617 soft = task_rlimit(p, RLIMIT_RTTIME);
2618 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2620 if (soft != RLIM_INFINITY) {
2623 if (p->rt.watchdog_stamp != jiffies) {
2625 p->rt.watchdog_stamp = jiffies;
2628 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2629 if (p->rt.timeout > next) {
2630 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2631 p->se.sum_exec_runtime);
2636 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2640 * scheduler tick hitting a task of our scheduling class.
2642 * NOTE: This function can be called remotely by the tick offload that
2643 * goes along full dynticks. Therefore no local assumption can be made
2644 * and everything must be accessed through the @rq and @curr passed in
2647 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2649 struct sched_rt_entity *rt_se = &p->rt;
2652 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2657 * RR tasks need a special form of timeslice management.
2658 * FIFO tasks have no timeslices.
2660 if (p->policy != SCHED_RR)
2663 if (--p->rt.time_slice)
2666 p->rt.time_slice = sched_rr_timeslice;
2669 * Requeue to the end of queue if we (and all of our ancestors) are not
2670 * the only element on the queue
2672 for_each_sched_rt_entity(rt_se) {
2673 if (rt_se->run_list.prev != rt_se->run_list.next) {
2674 requeue_task_rt(rq, p, 0);
2681 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2684 * Time slice is 0 for SCHED_FIFO tasks
2686 if (task->policy == SCHED_RR)
2687 return sched_rr_timeslice;
2692 #ifdef CONFIG_SCHED_CORE
2693 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2695 struct rt_rq *rt_rq;
2697 #ifdef CONFIG_RT_GROUP_SCHED
2698 rt_rq = task_group(p)->rt_rq[cpu];
2700 rt_rq = &cpu_rq(cpu)->rt;
2703 return rt_rq_throttled(rt_rq);
2707 DEFINE_SCHED_CLASS(rt) = {
2709 .enqueue_task = enqueue_task_rt,
2710 .dequeue_task = dequeue_task_rt,
2711 .yield_task = yield_task_rt,
2713 .check_preempt_curr = check_preempt_curr_rt,
2715 .pick_next_task = pick_next_task_rt,
2716 .put_prev_task = put_prev_task_rt,
2717 .set_next_task = set_next_task_rt,
2720 .balance = balance_rt,
2721 .pick_task = pick_task_rt,
2722 .select_task_rq = select_task_rq_rt,
2723 .set_cpus_allowed = set_cpus_allowed_common,
2724 .rq_online = rq_online_rt,
2725 .rq_offline = rq_offline_rt,
2726 .task_woken = task_woken_rt,
2727 .switched_from = switched_from_rt,
2728 .find_lock_rq = find_lock_lowest_rq,
2731 .task_tick = task_tick_rt,
2733 .get_rr_interval = get_rr_interval_rt,
2735 .prio_changed = prio_changed_rt,
2736 .switched_to = switched_to_rt,
2738 .update_curr = update_curr_rt,
2740 #ifdef CONFIG_SCHED_CORE
2741 .task_is_throttled = task_is_throttled_rt,
2744 #ifdef CONFIG_UCLAMP_TASK
2745 .uclamp_enabled = 1,
2749 #ifdef CONFIG_RT_GROUP_SCHED
2751 * Ensure that the real time constraints are schedulable.
2753 static DEFINE_MUTEX(rt_constraints_mutex);
2755 static inline int tg_has_rt_tasks(struct task_group *tg)
2757 struct task_struct *task;
2758 struct css_task_iter it;
2762 * Autogroups do not have RT tasks; see autogroup_create().
2764 if (task_group_is_autogroup(tg))
2767 css_task_iter_start(&tg->css, 0, &it);
2768 while (!ret && (task = css_task_iter_next(&it)))
2769 ret |= rt_task(task);
2770 css_task_iter_end(&it);
2775 struct rt_schedulable_data {
2776 struct task_group *tg;
2781 static int tg_rt_schedulable(struct task_group *tg, void *data)
2783 struct rt_schedulable_data *d = data;
2784 struct task_group *child;
2785 unsigned long total, sum = 0;
2786 u64 period, runtime;
2788 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2789 runtime = tg->rt_bandwidth.rt_runtime;
2792 period = d->rt_period;
2793 runtime = d->rt_runtime;
2797 * Cannot have more runtime than the period.
2799 if (runtime > period && runtime != RUNTIME_INF)
2803 * Ensure we don't starve existing RT tasks if runtime turns zero.
2805 if (rt_bandwidth_enabled() && !runtime &&
2806 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2809 total = to_ratio(period, runtime);
2812 * Nobody can have more than the global setting allows.
2814 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2818 * The sum of our children's runtime should not exceed our own.
2820 list_for_each_entry_rcu(child, &tg->children, siblings) {
2821 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2822 runtime = child->rt_bandwidth.rt_runtime;
2824 if (child == d->tg) {
2825 period = d->rt_period;
2826 runtime = d->rt_runtime;
2829 sum += to_ratio(period, runtime);
2838 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2842 struct rt_schedulable_data data = {
2844 .rt_period = period,
2845 .rt_runtime = runtime,
2849 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2855 static int tg_set_rt_bandwidth(struct task_group *tg,
2856 u64 rt_period, u64 rt_runtime)
2861 * Disallowing the root group RT runtime is BAD, it would disallow the
2862 * kernel creating (and or operating) RT threads.
2864 if (tg == &root_task_group && rt_runtime == 0)
2867 /* No period doesn't make any sense. */
2872 * Bound quota to defend quota against overflow during bandwidth shift.
2874 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2877 mutex_lock(&rt_constraints_mutex);
2878 err = __rt_schedulable(tg, rt_period, rt_runtime);
2882 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2883 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2884 tg->rt_bandwidth.rt_runtime = rt_runtime;
2886 for_each_possible_cpu(i) {
2887 struct rt_rq *rt_rq = tg->rt_rq[i];
2889 raw_spin_lock(&rt_rq->rt_runtime_lock);
2890 rt_rq->rt_runtime = rt_runtime;
2891 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2893 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2895 mutex_unlock(&rt_constraints_mutex);
2900 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2902 u64 rt_runtime, rt_period;
2904 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2905 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2906 if (rt_runtime_us < 0)
2907 rt_runtime = RUNTIME_INF;
2908 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2911 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2914 long sched_group_rt_runtime(struct task_group *tg)
2918 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2921 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2922 do_div(rt_runtime_us, NSEC_PER_USEC);
2923 return rt_runtime_us;
2926 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2928 u64 rt_runtime, rt_period;
2930 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2933 rt_period = rt_period_us * NSEC_PER_USEC;
2934 rt_runtime = tg->rt_bandwidth.rt_runtime;
2936 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2939 long sched_group_rt_period(struct task_group *tg)
2943 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2944 do_div(rt_period_us, NSEC_PER_USEC);
2945 return rt_period_us;
2948 #ifdef CONFIG_SYSCTL
2949 static int sched_rt_global_constraints(void)
2953 mutex_lock(&rt_constraints_mutex);
2954 ret = __rt_schedulable(NULL, 0, 0);
2955 mutex_unlock(&rt_constraints_mutex);
2959 #endif /* CONFIG_SYSCTL */
2961 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2963 /* Don't accept realtime tasks when there is no way for them to run */
2964 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2970 #else /* !CONFIG_RT_GROUP_SCHED */
2972 #ifdef CONFIG_SYSCTL
2973 static int sched_rt_global_constraints(void)
2975 unsigned long flags;
2978 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2979 for_each_possible_cpu(i) {
2980 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2982 raw_spin_lock(&rt_rq->rt_runtime_lock);
2983 rt_rq->rt_runtime = global_rt_runtime();
2984 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2986 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2990 #endif /* CONFIG_SYSCTL */
2991 #endif /* CONFIG_RT_GROUP_SCHED */
2993 #ifdef CONFIG_SYSCTL
2994 static int sched_rt_global_validate(void)
2996 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2997 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2998 ((u64)sysctl_sched_rt_runtime *
2999 NSEC_PER_USEC > max_rt_runtime)))
3005 static void sched_rt_do_global(void)
3007 unsigned long flags;
3009 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
3010 def_rt_bandwidth.rt_runtime = global_rt_runtime();
3011 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
3012 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
3015 static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
3016 size_t *lenp, loff_t *ppos)
3018 int old_period, old_runtime;
3019 static DEFINE_MUTEX(mutex);
3023 old_period = sysctl_sched_rt_period;
3024 old_runtime = sysctl_sched_rt_runtime;
3026 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
3028 if (!ret && write) {
3029 ret = sched_rt_global_validate();
3033 ret = sched_dl_global_validate();
3037 ret = sched_rt_global_constraints();
3041 sched_rt_do_global();
3042 sched_dl_do_global();
3046 sysctl_sched_rt_period = old_period;
3047 sysctl_sched_rt_runtime = old_runtime;
3049 mutex_unlock(&mutex);
3054 static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
3055 size_t *lenp, loff_t *ppos)
3058 static DEFINE_MUTEX(mutex);
3061 ret = proc_dointvec(table, write, buffer, lenp, ppos);
3063 * Make sure that internally we keep jiffies.
3064 * Also, writing zero resets the timeslice to default:
3066 if (!ret && write) {
3067 sched_rr_timeslice =
3068 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3069 msecs_to_jiffies(sysctl_sched_rr_timeslice);
3071 if (sysctl_sched_rr_timeslice <= 0)
3072 sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
3074 mutex_unlock(&mutex);
3078 #endif /* CONFIG_SYSCTL */
3080 #ifdef CONFIG_SCHED_DEBUG
3081 void print_rt_stats(struct seq_file *m, int cpu)
3084 struct rt_rq *rt_rq;
3087 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3088 print_rt_rq(m, cpu, rt_rq);
3091 #endif /* CONFIG_SCHED_DEBUG */