1 // SPDX-License-Identifier: GPL-2.0
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
10 int sched_rr_timeslice = RR_TIMESLICE;
11 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
12 /* More than 4 hours if BW_SHIFT equals 20. */
13 static const u64 max_rt_runtime = MAX_BW;
15 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
17 struct rt_bandwidth def_rt_bandwidth;
19 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
21 struct rt_bandwidth *rt_b =
22 container_of(timer, struct rt_bandwidth, rt_period_timer);
26 raw_spin_lock(&rt_b->rt_runtime_lock);
28 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
32 raw_spin_unlock(&rt_b->rt_runtime_lock);
33 idle = do_sched_rt_period_timer(rt_b, overrun);
34 raw_spin_lock(&rt_b->rt_runtime_lock);
37 rt_b->rt_period_active = 0;
38 raw_spin_unlock(&rt_b->rt_runtime_lock);
40 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
43 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
45 rt_b->rt_period = ns_to_ktime(period);
46 rt_b->rt_runtime = runtime;
48 raw_spin_lock_init(&rt_b->rt_runtime_lock);
50 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
51 HRTIMER_MODE_REL_HARD);
52 rt_b->rt_period_timer.function = sched_rt_period_timer;
55 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 if (!rt_b->rt_period_active) {
59 rt_b->rt_period_active = 1;
61 * SCHED_DEADLINE updates the bandwidth, as a run away
62 * RT task with a DL task could hog a CPU. But DL does
63 * not reset the period. If a deadline task was running
64 * without an RT task running, it can cause RT tasks to
65 * throttle when they start up. Kick the timer right away
66 * to update the period.
68 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
69 hrtimer_start_expires(&rt_b->rt_period_timer,
70 HRTIMER_MODE_ABS_PINNED_HARD);
72 raw_spin_unlock(&rt_b->rt_runtime_lock);
75 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
77 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
80 do_start_rt_bandwidth(rt_b);
83 void init_rt_rq(struct rt_rq *rt_rq)
85 struct rt_prio_array *array;
88 array = &rt_rq->active;
89 for (i = 0; i < MAX_RT_PRIO; i++) {
90 INIT_LIST_HEAD(array->queue + i);
91 __clear_bit(i, array->bitmap);
93 /* delimiter for bitsearch: */
94 __set_bit(MAX_RT_PRIO, array->bitmap);
96 #if defined CONFIG_SMP
97 rt_rq->highest_prio.curr = MAX_RT_PRIO;
98 rt_rq->highest_prio.next = MAX_RT_PRIO;
99 rt_rq->rt_nr_migratory = 0;
100 rt_rq->overloaded = 0;
101 plist_head_init(&rt_rq->pushable_tasks);
102 #endif /* CONFIG_SMP */
103 /* We start is dequeued state, because no RT tasks are queued */
104 rt_rq->rt_queued = 0;
107 rt_rq->rt_throttled = 0;
108 rt_rq->rt_runtime = 0;
109 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
112 #ifdef CONFIG_RT_GROUP_SCHED
113 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
115 hrtimer_cancel(&rt_b->rt_period_timer);
118 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
120 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
122 #ifdef CONFIG_SCHED_DEBUG
123 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
125 return container_of(rt_se, struct task_struct, rt);
128 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
133 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
138 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
140 struct rt_rq *rt_rq = rt_se->rt_rq;
145 void free_rt_sched_group(struct task_group *tg)
150 destroy_rt_bandwidth(&tg->rt_bandwidth);
152 for_each_possible_cpu(i) {
163 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
164 struct sched_rt_entity *rt_se, int cpu,
165 struct sched_rt_entity *parent)
167 struct rq *rq = cpu_rq(cpu);
169 rt_rq->highest_prio.curr = MAX_RT_PRIO;
170 rt_rq->rt_nr_boosted = 0;
174 tg->rt_rq[cpu] = rt_rq;
175 tg->rt_se[cpu] = rt_se;
181 rt_se->rt_rq = &rq->rt;
183 rt_se->rt_rq = parent->my_q;
186 rt_se->parent = parent;
187 INIT_LIST_HEAD(&rt_se->run_list);
190 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
193 struct sched_rt_entity *rt_se;
196 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
199 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
203 init_rt_bandwidth(&tg->rt_bandwidth,
204 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
206 for_each_possible_cpu(i) {
207 rt_rq = kzalloc_node(sizeof(struct rt_rq),
208 GFP_KERNEL, cpu_to_node(i));
212 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
213 GFP_KERNEL, cpu_to_node(i));
218 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
219 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
230 #else /* CONFIG_RT_GROUP_SCHED */
232 #define rt_entity_is_task(rt_se) (1)
234 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
236 return container_of(rt_se, struct task_struct, rt);
239 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
241 return container_of(rt_rq, struct rq, rt);
244 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
246 struct task_struct *p = rt_task_of(rt_se);
251 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
253 struct rq *rq = rq_of_rt_se(rt_se);
258 void free_rt_sched_group(struct task_group *tg) { }
260 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
264 #endif /* CONFIG_RT_GROUP_SCHED */
268 static void pull_rt_task(struct rq *this_rq);
270 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
272 /* Try to pull RT tasks here if we lower this rq's prio */
273 return rq->rt.highest_prio.curr > prev->prio;
276 static inline int rt_overloaded(struct rq *rq)
278 return atomic_read(&rq->rd->rto_count);
281 static inline void rt_set_overload(struct rq *rq)
286 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
288 * Make sure the mask is visible before we set
289 * the overload count. That is checked to determine
290 * if we should look at the mask. It would be a shame
291 * if we looked at the mask, but the mask was not
294 * Matched by the barrier in pull_rt_task().
297 atomic_inc(&rq->rd->rto_count);
300 static inline void rt_clear_overload(struct rq *rq)
305 /* the order here really doesn't matter */
306 atomic_dec(&rq->rd->rto_count);
307 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
310 static void update_rt_migration(struct rt_rq *rt_rq)
312 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
313 if (!rt_rq->overloaded) {
314 rt_set_overload(rq_of_rt_rq(rt_rq));
315 rt_rq->overloaded = 1;
317 } else if (rt_rq->overloaded) {
318 rt_clear_overload(rq_of_rt_rq(rt_rq));
319 rt_rq->overloaded = 0;
323 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
325 struct task_struct *p;
327 if (!rt_entity_is_task(rt_se))
330 p = rt_task_of(rt_se);
331 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
333 rt_rq->rt_nr_total++;
334 if (p->nr_cpus_allowed > 1)
335 rt_rq->rt_nr_migratory++;
337 update_rt_migration(rt_rq);
340 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
342 struct task_struct *p;
344 if (!rt_entity_is_task(rt_se))
347 p = rt_task_of(rt_se);
348 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
350 rt_rq->rt_nr_total--;
351 if (p->nr_cpus_allowed > 1)
352 rt_rq->rt_nr_migratory--;
354 update_rt_migration(rt_rq);
357 static inline int has_pushable_tasks(struct rq *rq)
359 return !plist_head_empty(&rq->rt.pushable_tasks);
362 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
363 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
365 static void push_rt_tasks(struct rq *);
366 static void pull_rt_task(struct rq *);
368 static inline void rt_queue_push_tasks(struct rq *rq)
370 if (!has_pushable_tasks(rq))
373 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
376 static inline void rt_queue_pull_task(struct rq *rq)
378 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
381 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
383 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
384 plist_node_init(&p->pushable_tasks, p->prio);
385 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
387 /* Update the highest prio pushable task */
388 if (p->prio < rq->rt.highest_prio.next)
389 rq->rt.highest_prio.next = p->prio;
392 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
394 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
396 /* Update the new highest prio pushable task */
397 if (has_pushable_tasks(rq)) {
398 p = plist_first_entry(&rq->rt.pushable_tasks,
399 struct task_struct, pushable_tasks);
400 rq->rt.highest_prio.next = p->prio;
402 rq->rt.highest_prio.next = MAX_RT_PRIO;
407 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
411 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
416 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
421 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
425 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
430 static inline void pull_rt_task(struct rq *this_rq)
434 static inline void rt_queue_push_tasks(struct rq *rq)
437 #endif /* CONFIG_SMP */
439 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
440 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
442 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
447 #ifdef CONFIG_RT_GROUP_SCHED
449 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
454 return rt_rq->rt_runtime;
457 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
459 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
462 typedef struct task_group *rt_rq_iter_t;
464 static inline struct task_group *next_task_group(struct task_group *tg)
467 tg = list_entry_rcu(tg->list.next,
468 typeof(struct task_group), list);
469 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
471 if (&tg->list == &task_groups)
477 #define for_each_rt_rq(rt_rq, iter, rq) \
478 for (iter = container_of(&task_groups, typeof(*iter), list); \
479 (iter = next_task_group(iter)) && \
480 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
482 #define for_each_sched_rt_entity(rt_se) \
483 for (; rt_se; rt_se = rt_se->parent)
485 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
490 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
491 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
493 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
495 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
496 struct rq *rq = rq_of_rt_rq(rt_rq);
497 struct sched_rt_entity *rt_se;
499 int cpu = cpu_of(rq);
501 rt_se = rt_rq->tg->rt_se[cpu];
503 if (rt_rq->rt_nr_running) {
505 enqueue_top_rt_rq(rt_rq);
506 else if (!on_rt_rq(rt_se))
507 enqueue_rt_entity(rt_se, 0);
509 if (rt_rq->highest_prio.curr < curr->prio)
514 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
516 struct sched_rt_entity *rt_se;
517 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
519 rt_se = rt_rq->tg->rt_se[cpu];
522 dequeue_top_rt_rq(rt_rq);
523 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
524 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
526 else if (on_rt_rq(rt_se))
527 dequeue_rt_entity(rt_se, 0);
530 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
532 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
535 static int rt_se_boosted(struct sched_rt_entity *rt_se)
537 struct rt_rq *rt_rq = group_rt_rq(rt_se);
538 struct task_struct *p;
541 return !!rt_rq->rt_nr_boosted;
543 p = rt_task_of(rt_se);
544 return p->prio != p->normal_prio;
548 static inline const struct cpumask *sched_rt_period_mask(void)
550 return this_rq()->rd->span;
553 static inline const struct cpumask *sched_rt_period_mask(void)
555 return cpu_online_mask;
560 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
562 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
565 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
567 return &rt_rq->tg->rt_bandwidth;
570 #else /* !CONFIG_RT_GROUP_SCHED */
572 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
574 return rt_rq->rt_runtime;
577 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
579 return ktime_to_ns(def_rt_bandwidth.rt_period);
582 typedef struct rt_rq *rt_rq_iter_t;
584 #define for_each_rt_rq(rt_rq, iter, rq) \
585 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
587 #define for_each_sched_rt_entity(rt_se) \
588 for (; rt_se; rt_se = NULL)
590 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
595 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
597 struct rq *rq = rq_of_rt_rq(rt_rq);
599 if (!rt_rq->rt_nr_running)
602 enqueue_top_rt_rq(rt_rq);
606 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
608 dequeue_top_rt_rq(rt_rq);
611 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
613 return rt_rq->rt_throttled;
616 static inline const struct cpumask *sched_rt_period_mask(void)
618 return cpu_online_mask;
622 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
624 return &cpu_rq(cpu)->rt;
627 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
629 return &def_rt_bandwidth;
632 #endif /* CONFIG_RT_GROUP_SCHED */
634 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
636 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
638 return (hrtimer_active(&rt_b->rt_period_timer) ||
639 rt_rq->rt_time < rt_b->rt_runtime);
644 * We ran out of runtime, see if we can borrow some from our neighbours.
646 static void do_balance_runtime(struct rt_rq *rt_rq)
648 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
649 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
653 weight = cpumask_weight(rd->span);
655 raw_spin_lock(&rt_b->rt_runtime_lock);
656 rt_period = ktime_to_ns(rt_b->rt_period);
657 for_each_cpu(i, rd->span) {
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
664 raw_spin_lock(&iter->rt_runtime_lock);
666 * Either all rqs have inf runtime and there's nothing to steal
667 * or __disable_runtime() below sets a specific rq to inf to
668 * indicate its been disabled and disalow stealing.
670 if (iter->rt_runtime == RUNTIME_INF)
674 * From runqueues with spare time, take 1/n part of their
675 * spare time, but no more than our period.
677 diff = iter->rt_runtime - iter->rt_time;
679 diff = div_u64((u64)diff, weight);
680 if (rt_rq->rt_runtime + diff > rt_period)
681 diff = rt_period - rt_rq->rt_runtime;
682 iter->rt_runtime -= diff;
683 rt_rq->rt_runtime += diff;
684 if (rt_rq->rt_runtime == rt_period) {
685 raw_spin_unlock(&iter->rt_runtime_lock);
690 raw_spin_unlock(&iter->rt_runtime_lock);
692 raw_spin_unlock(&rt_b->rt_runtime_lock);
696 * Ensure this RQ takes back all the runtime it lend to its neighbours.
698 static void __disable_runtime(struct rq *rq)
700 struct root_domain *rd = rq->rd;
704 if (unlikely(!scheduler_running))
707 for_each_rt_rq(rt_rq, iter, rq) {
708 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
712 raw_spin_lock(&rt_b->rt_runtime_lock);
713 raw_spin_lock(&rt_rq->rt_runtime_lock);
715 * Either we're all inf and nobody needs to borrow, or we're
716 * already disabled and thus have nothing to do, or we have
717 * exactly the right amount of runtime to take out.
719 if (rt_rq->rt_runtime == RUNTIME_INF ||
720 rt_rq->rt_runtime == rt_b->rt_runtime)
722 raw_spin_unlock(&rt_rq->rt_runtime_lock);
725 * Calculate the difference between what we started out with
726 * and what we current have, that's the amount of runtime
727 * we lend and now have to reclaim.
729 want = rt_b->rt_runtime - rt_rq->rt_runtime;
732 * Greedy reclaim, take back as much as we can.
734 for_each_cpu(i, rd->span) {
735 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
739 * Can't reclaim from ourselves or disabled runqueues.
741 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
744 raw_spin_lock(&iter->rt_runtime_lock);
746 diff = min_t(s64, iter->rt_runtime, want);
747 iter->rt_runtime -= diff;
750 iter->rt_runtime -= want;
753 raw_spin_unlock(&iter->rt_runtime_lock);
759 raw_spin_lock(&rt_rq->rt_runtime_lock);
761 * We cannot be left wanting - that would mean some runtime
762 * leaked out of the system.
767 * Disable all the borrow logic by pretending we have inf
768 * runtime - in which case borrowing doesn't make sense.
770 rt_rq->rt_runtime = RUNTIME_INF;
771 rt_rq->rt_throttled = 0;
772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
773 raw_spin_unlock(&rt_b->rt_runtime_lock);
775 /* Make rt_rq available for pick_next_task() */
776 sched_rt_rq_enqueue(rt_rq);
780 static void __enable_runtime(struct rq *rq)
785 if (unlikely(!scheduler_running))
789 * Reset each runqueue's bandwidth settings
791 for_each_rt_rq(rt_rq, iter, rq) {
792 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
794 raw_spin_lock(&rt_b->rt_runtime_lock);
795 raw_spin_lock(&rt_rq->rt_runtime_lock);
796 rt_rq->rt_runtime = rt_b->rt_runtime;
798 rt_rq->rt_throttled = 0;
799 raw_spin_unlock(&rt_rq->rt_runtime_lock);
800 raw_spin_unlock(&rt_b->rt_runtime_lock);
804 static void balance_runtime(struct rt_rq *rt_rq)
806 if (!sched_feat(RT_RUNTIME_SHARE))
809 if (rt_rq->rt_time > rt_rq->rt_runtime) {
810 raw_spin_unlock(&rt_rq->rt_runtime_lock);
811 do_balance_runtime(rt_rq);
812 raw_spin_lock(&rt_rq->rt_runtime_lock);
815 #else /* !CONFIG_SMP */
816 static inline void balance_runtime(struct rt_rq *rt_rq) {}
817 #endif /* CONFIG_SMP */
819 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
821 int i, idle = 1, throttled = 0;
822 const struct cpumask *span;
824 span = sched_rt_period_mask();
825 #ifdef CONFIG_RT_GROUP_SCHED
827 * FIXME: isolated CPUs should really leave the root task group,
828 * whether they are isolcpus or were isolated via cpusets, lest
829 * the timer run on a CPU which does not service all runqueues,
830 * potentially leaving other CPUs indefinitely throttled. If
831 * isolation is really required, the user will turn the throttle
832 * off to kill the perturbations it causes anyway. Meanwhile,
833 * this maintains functionality for boot and/or troubleshooting.
835 if (rt_b == &root_task_group.rt_bandwidth)
836 span = cpu_online_mask;
838 for_each_cpu(i, span) {
840 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
841 struct rq *rq = rq_of_rt_rq(rt_rq);
845 * When span == cpu_online_mask, taking each rq->lock
846 * can be time-consuming. Try to avoid it when possible.
848 raw_spin_lock(&rt_rq->rt_runtime_lock);
849 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
850 rt_rq->rt_runtime = rt_b->rt_runtime;
851 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
852 raw_spin_unlock(&rt_rq->rt_runtime_lock);
856 raw_spin_lock(&rq->lock);
859 if (rt_rq->rt_time) {
862 raw_spin_lock(&rt_rq->rt_runtime_lock);
863 if (rt_rq->rt_throttled)
864 balance_runtime(rt_rq);
865 runtime = rt_rq->rt_runtime;
866 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
867 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
868 rt_rq->rt_throttled = 0;
872 * When we're idle and a woken (rt) task is
873 * throttled check_preempt_curr() will set
874 * skip_update and the time between the wakeup
875 * and this unthrottle will get accounted as
878 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
879 rq_clock_cancel_skipupdate(rq);
881 if (rt_rq->rt_time || rt_rq->rt_nr_running)
883 raw_spin_unlock(&rt_rq->rt_runtime_lock);
884 } else if (rt_rq->rt_nr_running) {
886 if (!rt_rq_throttled(rt_rq))
889 if (rt_rq->rt_throttled)
893 sched_rt_rq_enqueue(rt_rq);
894 raw_spin_unlock(&rq->lock);
897 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
903 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
905 #ifdef CONFIG_RT_GROUP_SCHED
906 struct rt_rq *rt_rq = group_rt_rq(rt_se);
909 return rt_rq->highest_prio.curr;
912 return rt_task_of(rt_se)->prio;
915 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
917 u64 runtime = sched_rt_runtime(rt_rq);
919 if (rt_rq->rt_throttled)
920 return rt_rq_throttled(rt_rq);
922 if (runtime >= sched_rt_period(rt_rq))
925 balance_runtime(rt_rq);
926 runtime = sched_rt_runtime(rt_rq);
927 if (runtime == RUNTIME_INF)
930 if (rt_rq->rt_time > runtime) {
931 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
934 * Don't actually throttle groups that have no runtime assigned
935 * but accrue some time due to boosting.
937 if (likely(rt_b->rt_runtime)) {
938 rt_rq->rt_throttled = 1;
939 printk_deferred_once("sched: RT throttling activated\n");
942 * In case we did anyway, make it go away,
943 * replenishment is a joke, since it will replenish us
949 if (rt_rq_throttled(rt_rq)) {
950 sched_rt_rq_dequeue(rt_rq);
959 * Update the current task's runtime statistics. Skip current tasks that
960 * are not in our scheduling class.
962 static void update_curr_rt(struct rq *rq)
964 struct task_struct *curr = rq->curr;
965 struct sched_rt_entity *rt_se = &curr->rt;
969 if (curr->sched_class != &rt_sched_class)
972 now = rq_clock_task(rq);
973 delta_exec = now - curr->se.exec_start;
974 if (unlikely((s64)delta_exec <= 0))
977 schedstat_set(curr->se.statistics.exec_max,
978 max(curr->se.statistics.exec_max, delta_exec));
980 curr->se.sum_exec_runtime += delta_exec;
981 account_group_exec_runtime(curr, delta_exec);
983 curr->se.exec_start = now;
984 cgroup_account_cputime(curr, delta_exec);
986 if (!rt_bandwidth_enabled())
989 for_each_sched_rt_entity(rt_se) {
990 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
993 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
994 raw_spin_lock(&rt_rq->rt_runtime_lock);
995 rt_rq->rt_time += delta_exec;
996 exceeded = sched_rt_runtime_exceeded(rt_rq);
999 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1001 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1007 dequeue_top_rt_rq(struct rt_rq *rt_rq)
1009 struct rq *rq = rq_of_rt_rq(rt_rq);
1011 BUG_ON(&rq->rt != rt_rq);
1013 if (!rt_rq->rt_queued)
1016 BUG_ON(!rq->nr_running);
1018 sub_nr_running(rq, rt_rq->rt_nr_running);
1019 rt_rq->rt_queued = 0;
1024 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1026 struct rq *rq = rq_of_rt_rq(rt_rq);
1028 BUG_ON(&rq->rt != rt_rq);
1030 if (rt_rq->rt_queued)
1033 if (rt_rq_throttled(rt_rq))
1036 if (rt_rq->rt_nr_running) {
1037 add_nr_running(rq, rt_rq->rt_nr_running);
1038 rt_rq->rt_queued = 1;
1041 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1042 cpufreq_update_util(rq, 0);
1045 #if defined CONFIG_SMP
1048 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1050 struct rq *rq = rq_of_rt_rq(rt_rq);
1052 #ifdef CONFIG_RT_GROUP_SCHED
1054 * Change rq's cpupri only if rt_rq is the top queue.
1056 if (&rq->rt != rt_rq)
1059 if (rq->online && prio < prev_prio)
1060 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1064 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1066 struct rq *rq = rq_of_rt_rq(rt_rq);
1068 #ifdef CONFIG_RT_GROUP_SCHED
1070 * Change rq's cpupri only if rt_rq is the top queue.
1072 if (&rq->rt != rt_rq)
1075 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1076 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1079 #else /* CONFIG_SMP */
1082 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1084 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1086 #endif /* CONFIG_SMP */
1088 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1090 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1092 int prev_prio = rt_rq->highest_prio.curr;
1094 if (prio < prev_prio)
1095 rt_rq->highest_prio.curr = prio;
1097 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1101 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1103 int prev_prio = rt_rq->highest_prio.curr;
1105 if (rt_rq->rt_nr_running) {
1107 WARN_ON(prio < prev_prio);
1110 * This may have been our highest task, and therefore
1111 * we may have some recomputation to do
1113 if (prio == prev_prio) {
1114 struct rt_prio_array *array = &rt_rq->active;
1116 rt_rq->highest_prio.curr =
1117 sched_find_first_bit(array->bitmap);
1121 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1123 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1128 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1129 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1131 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1133 #ifdef CONFIG_RT_GROUP_SCHED
1136 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1138 if (rt_se_boosted(rt_se))
1139 rt_rq->rt_nr_boosted++;
1142 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1146 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1148 if (rt_se_boosted(rt_se))
1149 rt_rq->rt_nr_boosted--;
1151 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1154 #else /* CONFIG_RT_GROUP_SCHED */
1157 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1159 start_rt_bandwidth(&def_rt_bandwidth);
1163 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1165 #endif /* CONFIG_RT_GROUP_SCHED */
1168 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1170 struct rt_rq *group_rq = group_rt_rq(rt_se);
1173 return group_rq->rt_nr_running;
1179 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1181 struct rt_rq *group_rq = group_rt_rq(rt_se);
1182 struct task_struct *tsk;
1185 return group_rq->rr_nr_running;
1187 tsk = rt_task_of(rt_se);
1189 return (tsk->policy == SCHED_RR) ? 1 : 0;
1193 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1195 int prio = rt_se_prio(rt_se);
1197 WARN_ON(!rt_prio(prio));
1198 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1199 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1201 inc_rt_prio(rt_rq, prio);
1202 inc_rt_migration(rt_se, rt_rq);
1203 inc_rt_group(rt_se, rt_rq);
1207 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1209 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1210 WARN_ON(!rt_rq->rt_nr_running);
1211 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1212 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1214 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1215 dec_rt_migration(rt_se, rt_rq);
1216 dec_rt_group(rt_se, rt_rq);
1220 * Change rt_se->run_list location unless SAVE && !MOVE
1222 * assumes ENQUEUE/DEQUEUE flags match
1224 static inline bool move_entity(unsigned int flags)
1226 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1232 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1234 list_del_init(&rt_se->run_list);
1236 if (list_empty(array->queue + rt_se_prio(rt_se)))
1237 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1242 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1244 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1245 struct rt_prio_array *array = &rt_rq->active;
1246 struct rt_rq *group_rq = group_rt_rq(rt_se);
1247 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1250 * Don't enqueue the group if its throttled, or when empty.
1251 * The latter is a consequence of the former when a child group
1252 * get throttled and the current group doesn't have any other
1255 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1257 __delist_rt_entity(rt_se, array);
1261 if (move_entity(flags)) {
1262 WARN_ON_ONCE(rt_se->on_list);
1263 if (flags & ENQUEUE_HEAD)
1264 list_add(&rt_se->run_list, queue);
1266 list_add_tail(&rt_se->run_list, queue);
1268 __set_bit(rt_se_prio(rt_se), array->bitmap);
1273 inc_rt_tasks(rt_se, rt_rq);
1276 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1278 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1279 struct rt_prio_array *array = &rt_rq->active;
1281 if (move_entity(flags)) {
1282 WARN_ON_ONCE(!rt_se->on_list);
1283 __delist_rt_entity(rt_se, array);
1287 dec_rt_tasks(rt_se, rt_rq);
1291 * Because the prio of an upper entry depends on the lower
1292 * entries, we must remove entries top - down.
1294 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1296 struct sched_rt_entity *back = NULL;
1298 for_each_sched_rt_entity(rt_se) {
1303 dequeue_top_rt_rq(rt_rq_of_se(back));
1305 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1306 if (on_rt_rq(rt_se))
1307 __dequeue_rt_entity(rt_se, flags);
1311 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1313 struct rq *rq = rq_of_rt_se(rt_se);
1315 dequeue_rt_stack(rt_se, flags);
1316 for_each_sched_rt_entity(rt_se)
1317 __enqueue_rt_entity(rt_se, flags);
1318 enqueue_top_rt_rq(&rq->rt);
1321 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1323 struct rq *rq = rq_of_rt_se(rt_se);
1325 dequeue_rt_stack(rt_se, flags);
1327 for_each_sched_rt_entity(rt_se) {
1328 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1330 if (rt_rq && rt_rq->rt_nr_running)
1331 __enqueue_rt_entity(rt_se, flags);
1333 enqueue_top_rt_rq(&rq->rt);
1337 * Adding/removing a task to/from a priority array:
1340 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1342 struct sched_rt_entity *rt_se = &p->rt;
1344 if (flags & ENQUEUE_WAKEUP)
1347 enqueue_rt_entity(rt_se, flags);
1349 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1350 enqueue_pushable_task(rq, p);
1353 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1355 struct sched_rt_entity *rt_se = &p->rt;
1358 dequeue_rt_entity(rt_se, flags);
1360 dequeue_pushable_task(rq, p);
1364 * Put task to the head or the end of the run list without the overhead of
1365 * dequeue followed by enqueue.
1368 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1370 if (on_rt_rq(rt_se)) {
1371 struct rt_prio_array *array = &rt_rq->active;
1372 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1375 list_move(&rt_se->run_list, queue);
1377 list_move_tail(&rt_se->run_list, queue);
1381 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1383 struct sched_rt_entity *rt_se = &p->rt;
1384 struct rt_rq *rt_rq;
1386 for_each_sched_rt_entity(rt_se) {
1387 rt_rq = rt_rq_of_se(rt_se);
1388 requeue_rt_entity(rt_rq, rt_se, head);
1392 static void yield_task_rt(struct rq *rq)
1394 requeue_task_rt(rq, rq->curr, 0);
1398 static int find_lowest_rq(struct task_struct *task);
1401 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1403 struct task_struct *curr;
1406 /* For anything but wake ups, just return the task_cpu */
1407 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1413 curr = READ_ONCE(rq->curr); /* unlocked access */
1416 * If the current task on @p's runqueue is an RT task, then
1417 * try to see if we can wake this RT task up on another
1418 * runqueue. Otherwise simply start this RT task
1419 * on its current runqueue.
1421 * We want to avoid overloading runqueues. If the woken
1422 * task is a higher priority, then it will stay on this CPU
1423 * and the lower prio task should be moved to another CPU.
1424 * Even though this will probably make the lower prio task
1425 * lose its cache, we do not want to bounce a higher task
1426 * around just because it gave up its CPU, perhaps for a
1429 * For equal prio tasks, we just let the scheduler sort it out.
1431 * Otherwise, just let it ride on the affined RQ and the
1432 * post-schedule router will push the preempted task away
1434 * This test is optimistic, if we get it wrong the load-balancer
1435 * will have to sort it out.
1437 if (curr && unlikely(rt_task(curr)) &&
1438 (curr->nr_cpus_allowed < 2 ||
1439 curr->prio <= p->prio)) {
1440 int target = find_lowest_rq(p);
1443 * Don't bother moving it if the destination CPU is
1444 * not running a lower priority task.
1447 p->prio < cpu_rq(target)->rt.highest_prio.curr)
1456 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1459 * Current can't be migrated, useless to reschedule,
1460 * let's hope p can move out.
1462 if (rq->curr->nr_cpus_allowed == 1 ||
1463 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1467 * p is migratable, so let's not schedule it and
1468 * see if it is pushed or pulled somewhere else.
1470 if (p->nr_cpus_allowed != 1
1471 && cpupri_find(&rq->rd->cpupri, p, NULL))
1475 * There appear to be other CPUs that can accept
1476 * the current task but none can run 'p', so lets reschedule
1477 * to try and push the current task away:
1479 requeue_task_rt(rq, p, 1);
1483 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1485 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1487 * This is OK, because current is on_cpu, which avoids it being
1488 * picked for load-balance and preemption/IRQs are still
1489 * disabled avoiding further scheduler activity on it and we've
1490 * not yet started the picking loop.
1492 rq_unpin_lock(rq, rf);
1494 rq_repin_lock(rq, rf);
1497 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1499 #endif /* CONFIG_SMP */
1502 * Preempt the current task with a newly woken task if needed:
1504 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1506 if (p->prio < rq->curr->prio) {
1515 * - the newly woken task is of equal priority to the current task
1516 * - the newly woken task is non-migratable while current is migratable
1517 * - current will be preempted on the next reschedule
1519 * we should check to see if current can readily move to a different
1520 * cpu. If so, we will reschedule to allow the push logic to try
1521 * to move current somewhere else, making room for our non-migratable
1524 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1525 check_preempt_equal_prio(rq, p);
1529 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1531 p->se.exec_start = rq_clock_task(rq);
1533 /* The running task is never eligible for pushing */
1534 dequeue_pushable_task(rq, p);
1540 * If prev task was rt, put_prev_task() has already updated the
1541 * utilization. We only care of the case where we start to schedule a
1544 if (rq->curr->sched_class != &rt_sched_class)
1545 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1547 rt_queue_push_tasks(rq);
1550 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1551 struct rt_rq *rt_rq)
1553 struct rt_prio_array *array = &rt_rq->active;
1554 struct sched_rt_entity *next = NULL;
1555 struct list_head *queue;
1558 idx = sched_find_first_bit(array->bitmap);
1559 BUG_ON(idx >= MAX_RT_PRIO);
1561 queue = array->queue + idx;
1562 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1567 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1569 struct sched_rt_entity *rt_se;
1570 struct rt_rq *rt_rq = &rq->rt;
1573 rt_se = pick_next_rt_entity(rq, rt_rq);
1575 rt_rq = group_rt_rq(rt_se);
1578 return rt_task_of(rt_se);
1581 static struct task_struct *
1582 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1584 struct task_struct *p;
1586 WARN_ON_ONCE(prev || rf);
1588 if (!sched_rt_runnable(rq))
1591 p = _pick_next_task_rt(rq);
1592 set_next_task_rt(rq, p, true);
1596 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1600 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1603 * The previous task needs to be made eligible for pushing
1604 * if it is still active
1606 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1607 enqueue_pushable_task(rq, p);
1612 /* Only try algorithms three times */
1613 #define RT_MAX_TRIES 3
1615 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1617 if (!task_running(rq, p) &&
1618 cpumask_test_cpu(cpu, p->cpus_ptr))
1625 * Return the highest pushable rq's task, which is suitable to be executed
1626 * on the CPU, NULL otherwise
1628 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1630 struct plist_head *head = &rq->rt.pushable_tasks;
1631 struct task_struct *p;
1633 if (!has_pushable_tasks(rq))
1636 plist_for_each_entry(p, head, pushable_tasks) {
1637 if (pick_rt_task(rq, p, cpu))
1644 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1646 static int find_lowest_rq(struct task_struct *task)
1648 struct sched_domain *sd;
1649 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1650 int this_cpu = smp_processor_id();
1651 int cpu = task_cpu(task);
1653 /* Make sure the mask is initialized first */
1654 if (unlikely(!lowest_mask))
1657 if (task->nr_cpus_allowed == 1)
1658 return -1; /* No other targets possible */
1660 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1661 return -1; /* No targets found */
1664 * At this point we have built a mask of CPUs representing the
1665 * lowest priority tasks in the system. Now we want to elect
1666 * the best one based on our affinity and topology.
1668 * We prioritize the last CPU that the task executed on since
1669 * it is most likely cache-hot in that location.
1671 if (cpumask_test_cpu(cpu, lowest_mask))
1675 * Otherwise, we consult the sched_domains span maps to figure
1676 * out which CPU is logically closest to our hot cache data.
1678 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1679 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1682 for_each_domain(cpu, sd) {
1683 if (sd->flags & SD_WAKE_AFFINE) {
1687 * "this_cpu" is cheaper to preempt than a
1690 if (this_cpu != -1 &&
1691 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1696 best_cpu = cpumask_first_and(lowest_mask,
1697 sched_domain_span(sd));
1698 if (best_cpu < nr_cpu_ids) {
1707 * And finally, if there were no matches within the domains
1708 * just give the caller *something* to work with from the compatible
1714 cpu = cpumask_any(lowest_mask);
1715 if (cpu < nr_cpu_ids)
1721 /* Will lock the rq it finds */
1722 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1724 struct rq *lowest_rq = NULL;
1728 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1729 cpu = find_lowest_rq(task);
1731 if ((cpu == -1) || (cpu == rq->cpu))
1734 lowest_rq = cpu_rq(cpu);
1736 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1738 * Target rq has tasks of equal or higher priority,
1739 * retrying does not release any lock and is unlikely
1740 * to yield a different result.
1746 /* if the prio of this runqueue changed, try again */
1747 if (double_lock_balance(rq, lowest_rq)) {
1749 * We had to unlock the run queue. In
1750 * the mean time, task could have
1751 * migrated already or had its affinity changed.
1752 * Also make sure that it wasn't scheduled on its rq.
1754 if (unlikely(task_rq(task) != rq ||
1755 !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
1756 task_running(rq, task) ||
1758 !task_on_rq_queued(task))) {
1760 double_unlock_balance(rq, lowest_rq);
1766 /* If this rq is still suitable use it. */
1767 if (lowest_rq->rt.highest_prio.curr > task->prio)
1771 double_unlock_balance(rq, lowest_rq);
1778 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1780 struct task_struct *p;
1782 if (!has_pushable_tasks(rq))
1785 p = plist_first_entry(&rq->rt.pushable_tasks,
1786 struct task_struct, pushable_tasks);
1788 BUG_ON(rq->cpu != task_cpu(p));
1789 BUG_ON(task_current(rq, p));
1790 BUG_ON(p->nr_cpus_allowed <= 1);
1792 BUG_ON(!task_on_rq_queued(p));
1793 BUG_ON(!rt_task(p));
1799 * If the current CPU has more than one RT task, see if the non
1800 * running task can migrate over to a CPU that is running a task
1801 * of lesser priority.
1803 static int push_rt_task(struct rq *rq)
1805 struct task_struct *next_task;
1806 struct rq *lowest_rq;
1809 if (!rq->rt.overloaded)
1812 next_task = pick_next_pushable_task(rq);
1817 if (WARN_ON(next_task == rq->curr))
1821 * It's possible that the next_task slipped in of
1822 * higher priority than current. If that's the case
1823 * just reschedule current.
1825 if (unlikely(next_task->prio < rq->curr->prio)) {
1830 /* We might release rq lock */
1831 get_task_struct(next_task);
1833 /* find_lock_lowest_rq locks the rq if found */
1834 lowest_rq = find_lock_lowest_rq(next_task, rq);
1836 struct task_struct *task;
1838 * find_lock_lowest_rq releases rq->lock
1839 * so it is possible that next_task has migrated.
1841 * We need to make sure that the task is still on the same
1842 * run-queue and is also still the next task eligible for
1845 task = pick_next_pushable_task(rq);
1846 if (task == next_task) {
1848 * The task hasn't migrated, and is still the next
1849 * eligible task, but we failed to find a run-queue
1850 * to push it to. Do not retry in this case, since
1851 * other CPUs will pull from us when ready.
1857 /* No more tasks, just exit */
1861 * Something has shifted, try again.
1863 put_task_struct(next_task);
1868 deactivate_task(rq, next_task, 0);
1869 set_task_cpu(next_task, lowest_rq->cpu);
1870 activate_task(lowest_rq, next_task, 0);
1873 resched_curr(lowest_rq);
1875 double_unlock_balance(rq, lowest_rq);
1878 put_task_struct(next_task);
1883 static void push_rt_tasks(struct rq *rq)
1885 /* push_rt_task will return true if it moved an RT */
1886 while (push_rt_task(rq))
1890 #ifdef HAVE_RT_PUSH_IPI
1893 * When a high priority task schedules out from a CPU and a lower priority
1894 * task is scheduled in, a check is made to see if there's any RT tasks
1895 * on other CPUs that are waiting to run because a higher priority RT task
1896 * is currently running on its CPU. In this case, the CPU with multiple RT
1897 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1898 * up that may be able to run one of its non-running queued RT tasks.
1900 * All CPUs with overloaded RT tasks need to be notified as there is currently
1901 * no way to know which of these CPUs have the highest priority task waiting
1902 * to run. Instead of trying to take a spinlock on each of these CPUs,
1903 * which has shown to cause large latency when done on machines with many
1904 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1905 * RT tasks waiting to run.
1907 * Just sending an IPI to each of the CPUs is also an issue, as on large
1908 * count CPU machines, this can cause an IPI storm on a CPU, especially
1909 * if its the only CPU with multiple RT tasks queued, and a large number
1910 * of CPUs scheduling a lower priority task at the same time.
1912 * Each root domain has its own irq work function that can iterate over
1913 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1914 * tassk must be checked if there's one or many CPUs that are lowering
1915 * their priority, there's a single irq work iterator that will try to
1916 * push off RT tasks that are waiting to run.
1918 * When a CPU schedules a lower priority task, it will kick off the
1919 * irq work iterator that will jump to each CPU with overloaded RT tasks.
1920 * As it only takes the first CPU that schedules a lower priority task
1921 * to start the process, the rto_start variable is incremented and if
1922 * the atomic result is one, then that CPU will try to take the rto_lock.
1923 * This prevents high contention on the lock as the process handles all
1924 * CPUs scheduling lower priority tasks.
1926 * All CPUs that are scheduling a lower priority task will increment the
1927 * rt_loop_next variable. This will make sure that the irq work iterator
1928 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1929 * priority task, even if the iterator is in the middle of a scan. Incrementing
1930 * the rt_loop_next will cause the iterator to perform another scan.
1933 static int rto_next_cpu(struct root_domain *rd)
1939 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1940 * rt_next_cpu() will simply return the first CPU found in
1943 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
1944 * will return the next CPU found in the rto_mask.
1946 * If there are no more CPUs left in the rto_mask, then a check is made
1947 * against rto_loop and rto_loop_next. rto_loop is only updated with
1948 * the rto_lock held, but any CPU may increment the rto_loop_next
1949 * without any locking.
1953 /* When rto_cpu is -1 this acts like cpumask_first() */
1954 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1958 if (cpu < nr_cpu_ids)
1964 * ACQUIRE ensures we see the @rto_mask changes
1965 * made prior to the @next value observed.
1967 * Matches WMB in rt_set_overload().
1969 next = atomic_read_acquire(&rd->rto_loop_next);
1971 if (rd->rto_loop == next)
1974 rd->rto_loop = next;
1980 static inline bool rto_start_trylock(atomic_t *v)
1982 return !atomic_cmpxchg_acquire(v, 0, 1);
1985 static inline void rto_start_unlock(atomic_t *v)
1987 atomic_set_release(v, 0);
1990 static void tell_cpu_to_push(struct rq *rq)
1994 /* Keep the loop going if the IPI is currently active */
1995 atomic_inc(&rq->rd->rto_loop_next);
1997 /* Only one CPU can initiate a loop at a time */
1998 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2001 raw_spin_lock(&rq->rd->rto_lock);
2004 * The rto_cpu is updated under the lock, if it has a valid CPU
2005 * then the IPI is still running and will continue due to the
2006 * update to loop_next, and nothing needs to be done here.
2007 * Otherwise it is finishing up and an ipi needs to be sent.
2009 if (rq->rd->rto_cpu < 0)
2010 cpu = rto_next_cpu(rq->rd);
2012 raw_spin_unlock(&rq->rd->rto_lock);
2014 rto_start_unlock(&rq->rd->rto_loop_start);
2017 /* Make sure the rd does not get freed while pushing */
2018 sched_get_rd(rq->rd);
2019 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2023 /* Called from hardirq context */
2024 void rto_push_irq_work_func(struct irq_work *work)
2026 struct root_domain *rd =
2027 container_of(work, struct root_domain, rto_push_work);
2034 * We do not need to grab the lock to check for has_pushable_tasks.
2035 * When it gets updated, a check is made if a push is possible.
2037 if (has_pushable_tasks(rq)) {
2038 raw_spin_lock(&rq->lock);
2040 raw_spin_unlock(&rq->lock);
2043 raw_spin_lock(&rd->rto_lock);
2045 /* Pass the IPI to the next rt overloaded queue */
2046 cpu = rto_next_cpu(rd);
2048 raw_spin_unlock(&rd->rto_lock);
2055 /* Try the next RT overloaded CPU */
2056 irq_work_queue_on(&rd->rto_push_work, cpu);
2058 #endif /* HAVE_RT_PUSH_IPI */
2060 static void pull_rt_task(struct rq *this_rq)
2062 int this_cpu = this_rq->cpu, cpu;
2063 bool resched = false;
2064 struct task_struct *p;
2066 int rt_overload_count = rt_overloaded(this_rq);
2068 if (likely(!rt_overload_count))
2072 * Match the barrier from rt_set_overloaded; this guarantees that if we
2073 * see overloaded we must also see the rto_mask bit.
2077 /* If we are the only overloaded CPU do nothing */
2078 if (rt_overload_count == 1 &&
2079 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2082 #ifdef HAVE_RT_PUSH_IPI
2083 if (sched_feat(RT_PUSH_IPI)) {
2084 tell_cpu_to_push(this_rq);
2089 for_each_cpu(cpu, this_rq->rd->rto_mask) {
2090 if (this_cpu == cpu)
2093 src_rq = cpu_rq(cpu);
2096 * Don't bother taking the src_rq->lock if the next highest
2097 * task is known to be lower-priority than our current task.
2098 * This may look racy, but if this value is about to go
2099 * logically higher, the src_rq will push this task away.
2100 * And if its going logically lower, we do not care
2102 if (src_rq->rt.highest_prio.next >=
2103 this_rq->rt.highest_prio.curr)
2107 * We can potentially drop this_rq's lock in
2108 * double_lock_balance, and another CPU could
2111 double_lock_balance(this_rq, src_rq);
2114 * We can pull only a task, which is pushable
2115 * on its rq, and no others.
2117 p = pick_highest_pushable_task(src_rq, this_cpu);
2120 * Do we have an RT task that preempts
2121 * the to-be-scheduled task?
2123 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2124 WARN_ON(p == src_rq->curr);
2125 WARN_ON(!task_on_rq_queued(p));
2128 * There's a chance that p is higher in priority
2129 * than what's currently running on its CPU.
2130 * This is just that p is wakeing up and hasn't
2131 * had a chance to schedule. We only pull
2132 * p if it is lower in priority than the
2133 * current task on the run queue
2135 if (p->prio < src_rq->curr->prio)
2140 deactivate_task(src_rq, p, 0);
2141 set_task_cpu(p, this_cpu);
2142 activate_task(this_rq, p, 0);
2144 * We continue with the search, just in
2145 * case there's an even higher prio task
2146 * in another runqueue. (low likelihood
2151 double_unlock_balance(this_rq, src_rq);
2155 resched_curr(this_rq);
2159 * If we are not running and we are not going to reschedule soon, we should
2160 * try to push tasks away now
2162 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2164 if (!task_running(rq, p) &&
2165 !test_tsk_need_resched(rq->curr) &&
2166 p->nr_cpus_allowed > 1 &&
2167 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2168 (rq->curr->nr_cpus_allowed < 2 ||
2169 rq->curr->prio <= p->prio))
2173 /* Assumes rq->lock is held */
2174 static void rq_online_rt(struct rq *rq)
2176 if (rq->rt.overloaded)
2177 rt_set_overload(rq);
2179 __enable_runtime(rq);
2181 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2184 /* Assumes rq->lock is held */
2185 static void rq_offline_rt(struct rq *rq)
2187 if (rq->rt.overloaded)
2188 rt_clear_overload(rq);
2190 __disable_runtime(rq);
2192 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2196 * When switch from the rt queue, we bring ourselves to a position
2197 * that we might want to pull RT tasks from other runqueues.
2199 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2202 * If there are other RT tasks then we will reschedule
2203 * and the scheduling of the other RT tasks will handle
2204 * the balancing. But if we are the last RT task
2205 * we may need to handle the pulling of RT tasks
2208 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2211 rt_queue_pull_task(rq);
2214 void __init init_sched_rt_class(void)
2218 for_each_possible_cpu(i) {
2219 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2220 GFP_KERNEL, cpu_to_node(i));
2223 #endif /* CONFIG_SMP */
2226 * When switching a task to RT, we may overload the runqueue
2227 * with RT tasks. In this case we try to push them off to
2230 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2233 * If we are running, update the avg_rt tracking, as the running time
2234 * will now on be accounted into the latter.
2236 if (task_current(rq, p)) {
2237 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2242 * If we are not running we may need to preempt the current
2243 * running task. If that current running task is also an RT task
2244 * then see if we can move to another run queue.
2246 if (task_on_rq_queued(p)) {
2248 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2249 rt_queue_push_tasks(rq);
2250 #endif /* CONFIG_SMP */
2251 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2257 * Priority of the task has changed. This may cause
2258 * us to initiate a push or pull.
2261 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2263 if (!task_on_rq_queued(p))
2266 if (rq->curr == p) {
2269 * If our priority decreases while running, we
2270 * may need to pull tasks to this runqueue.
2272 if (oldprio < p->prio)
2273 rt_queue_pull_task(rq);
2276 * If there's a higher priority task waiting to run
2279 if (p->prio > rq->rt.highest_prio.curr)
2282 /* For UP simply resched on drop of prio */
2283 if (oldprio < p->prio)
2285 #endif /* CONFIG_SMP */
2288 * This task is not running, but if it is
2289 * greater than the current running task
2292 if (p->prio < rq->curr->prio)
2297 #ifdef CONFIG_POSIX_TIMERS
2298 static void watchdog(struct rq *rq, struct task_struct *p)
2300 unsigned long soft, hard;
2302 /* max may change after cur was read, this will be fixed next tick */
2303 soft = task_rlimit(p, RLIMIT_RTTIME);
2304 hard = task_rlimit_max(p, RLIMIT_RTTIME);
2306 if (soft != RLIM_INFINITY) {
2309 if (p->rt.watchdog_stamp != jiffies) {
2311 p->rt.watchdog_stamp = jiffies;
2314 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2315 if (p->rt.timeout > next) {
2316 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2317 p->se.sum_exec_runtime);
2322 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2326 * scheduler tick hitting a task of our scheduling class.
2328 * NOTE: This function can be called remotely by the tick offload that
2329 * goes along full dynticks. Therefore no local assumption can be made
2330 * and everything must be accessed through the @rq and @curr passed in
2333 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2335 struct sched_rt_entity *rt_se = &p->rt;
2338 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2343 * RR tasks need a special form of timeslice management.
2344 * FIFO tasks have no timeslices.
2346 if (p->policy != SCHED_RR)
2349 if (--p->rt.time_slice)
2352 p->rt.time_slice = sched_rr_timeslice;
2355 * Requeue to the end of queue if we (and all of our ancestors) are not
2356 * the only element on the queue
2358 for_each_sched_rt_entity(rt_se) {
2359 if (rt_se->run_list.prev != rt_se->run_list.next) {
2360 requeue_task_rt(rq, p, 0);
2367 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2370 * Time slice is 0 for SCHED_FIFO tasks
2372 if (task->policy == SCHED_RR)
2373 return sched_rr_timeslice;
2378 const struct sched_class rt_sched_class = {
2379 .next = &fair_sched_class,
2380 .enqueue_task = enqueue_task_rt,
2381 .dequeue_task = dequeue_task_rt,
2382 .yield_task = yield_task_rt,
2384 .check_preempt_curr = check_preempt_curr_rt,
2386 .pick_next_task = pick_next_task_rt,
2387 .put_prev_task = put_prev_task_rt,
2388 .set_next_task = set_next_task_rt,
2391 .balance = balance_rt,
2392 .select_task_rq = select_task_rq_rt,
2393 .set_cpus_allowed = set_cpus_allowed_common,
2394 .rq_online = rq_online_rt,
2395 .rq_offline = rq_offline_rt,
2396 .task_woken = task_woken_rt,
2397 .switched_from = switched_from_rt,
2400 .task_tick = task_tick_rt,
2402 .get_rr_interval = get_rr_interval_rt,
2404 .prio_changed = prio_changed_rt,
2405 .switched_to = switched_to_rt,
2407 .update_curr = update_curr_rt,
2409 #ifdef CONFIG_UCLAMP_TASK
2410 .uclamp_enabled = 1,
2414 #ifdef CONFIG_RT_GROUP_SCHED
2416 * Ensure that the real time constraints are schedulable.
2418 static DEFINE_MUTEX(rt_constraints_mutex);
2420 /* Must be called with tasklist_lock held */
2421 static inline int tg_has_rt_tasks(struct task_group *tg)
2423 struct task_struct *g, *p;
2426 * Autogroups do not have RT tasks; see autogroup_create().
2428 if (task_group_is_autogroup(tg))
2431 for_each_process_thread(g, p) {
2432 if (rt_task(p) && task_group(p) == tg)
2439 struct rt_schedulable_data {
2440 struct task_group *tg;
2445 static int tg_rt_schedulable(struct task_group *tg, void *data)
2447 struct rt_schedulable_data *d = data;
2448 struct task_group *child;
2449 unsigned long total, sum = 0;
2450 u64 period, runtime;
2452 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2453 runtime = tg->rt_bandwidth.rt_runtime;
2456 period = d->rt_period;
2457 runtime = d->rt_runtime;
2461 * Cannot have more runtime than the period.
2463 if (runtime > period && runtime != RUNTIME_INF)
2467 * Ensure we don't starve existing RT tasks.
2469 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
2472 total = to_ratio(period, runtime);
2475 * Nobody can have more than the global setting allows.
2477 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2481 * The sum of our children's runtime should not exceed our own.
2483 list_for_each_entry_rcu(child, &tg->children, siblings) {
2484 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2485 runtime = child->rt_bandwidth.rt_runtime;
2487 if (child == d->tg) {
2488 period = d->rt_period;
2489 runtime = d->rt_runtime;
2492 sum += to_ratio(period, runtime);
2501 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2505 struct rt_schedulable_data data = {
2507 .rt_period = period,
2508 .rt_runtime = runtime,
2512 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2518 static int tg_set_rt_bandwidth(struct task_group *tg,
2519 u64 rt_period, u64 rt_runtime)
2524 * Disallowing the root group RT runtime is BAD, it would disallow the
2525 * kernel creating (and or operating) RT threads.
2527 if (tg == &root_task_group && rt_runtime == 0)
2530 /* No period doesn't make any sense. */
2535 * Bound quota to defend quota against overflow during bandwidth shift.
2537 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2540 mutex_lock(&rt_constraints_mutex);
2541 read_lock(&tasklist_lock);
2542 err = __rt_schedulable(tg, rt_period, rt_runtime);
2546 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2547 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2548 tg->rt_bandwidth.rt_runtime = rt_runtime;
2550 for_each_possible_cpu(i) {
2551 struct rt_rq *rt_rq = tg->rt_rq[i];
2553 raw_spin_lock(&rt_rq->rt_runtime_lock);
2554 rt_rq->rt_runtime = rt_runtime;
2555 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2557 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2559 read_unlock(&tasklist_lock);
2560 mutex_unlock(&rt_constraints_mutex);
2565 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2567 u64 rt_runtime, rt_period;
2569 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2570 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2571 if (rt_runtime_us < 0)
2572 rt_runtime = RUNTIME_INF;
2573 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2576 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2579 long sched_group_rt_runtime(struct task_group *tg)
2583 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2586 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2587 do_div(rt_runtime_us, NSEC_PER_USEC);
2588 return rt_runtime_us;
2591 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2593 u64 rt_runtime, rt_period;
2595 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2598 rt_period = rt_period_us * NSEC_PER_USEC;
2599 rt_runtime = tg->rt_bandwidth.rt_runtime;
2601 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2604 long sched_group_rt_period(struct task_group *tg)
2608 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2609 do_div(rt_period_us, NSEC_PER_USEC);
2610 return rt_period_us;
2613 static int sched_rt_global_constraints(void)
2617 mutex_lock(&rt_constraints_mutex);
2618 read_lock(&tasklist_lock);
2619 ret = __rt_schedulable(NULL, 0, 0);
2620 read_unlock(&tasklist_lock);
2621 mutex_unlock(&rt_constraints_mutex);
2626 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2628 /* Don't accept realtime tasks when there is no way for them to run */
2629 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2635 #else /* !CONFIG_RT_GROUP_SCHED */
2636 static int sched_rt_global_constraints(void)
2638 unsigned long flags;
2641 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2642 for_each_possible_cpu(i) {
2643 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2645 raw_spin_lock(&rt_rq->rt_runtime_lock);
2646 rt_rq->rt_runtime = global_rt_runtime();
2647 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2649 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2653 #endif /* CONFIG_RT_GROUP_SCHED */
2655 static int sched_rt_global_validate(void)
2657 if (sysctl_sched_rt_period <= 0)
2660 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2661 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2662 ((u64)sysctl_sched_rt_runtime *
2663 NSEC_PER_USEC > max_rt_runtime)))
2669 static void sched_rt_do_global(void)
2671 unsigned long flags;
2673 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2674 def_rt_bandwidth.rt_runtime = global_rt_runtime();
2675 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2676 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2679 int sched_rt_handler(struct ctl_table *table, int write,
2680 void __user *buffer, size_t *lenp,
2683 int old_period, old_runtime;
2684 static DEFINE_MUTEX(mutex);
2688 old_period = sysctl_sched_rt_period;
2689 old_runtime = sysctl_sched_rt_runtime;
2691 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2693 if (!ret && write) {
2694 ret = sched_rt_global_validate();
2698 ret = sched_dl_global_validate();
2702 ret = sched_rt_global_constraints();
2706 sched_rt_do_global();
2707 sched_dl_do_global();
2711 sysctl_sched_rt_period = old_period;
2712 sysctl_sched_rt_runtime = old_runtime;
2714 mutex_unlock(&mutex);
2719 int sched_rr_handler(struct ctl_table *table, int write,
2720 void __user *buffer, size_t *lenp,
2724 static DEFINE_MUTEX(mutex);
2727 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2729 * Make sure that internally we keep jiffies.
2730 * Also, writing zero resets the timeslice to default:
2732 if (!ret && write) {
2733 sched_rr_timeslice =
2734 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2735 msecs_to_jiffies(sysctl_sched_rr_timeslice);
2737 mutex_unlock(&mutex);
2742 #ifdef CONFIG_SCHED_DEBUG
2743 void print_rt_stats(struct seq_file *m, int cpu)
2746 struct rt_rq *rt_rq;
2749 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2750 print_rt_rq(m, cpu, rt_rq);
2753 #endif /* CONFIG_SCHED_DEBUG */