GNU Linux-libre 4.14.259-gnu1
[releases.git] / kernel / sched / rt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6
7 #include "sched.h"
8
9 #include <linux/slab.h>
10 #include <linux/irq_work.h>
11
12 int sched_rr_timeslice = RR_TIMESLICE;
13 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
14
15 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
16
17 struct rt_bandwidth def_rt_bandwidth;
18
19 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
20 {
21         struct rt_bandwidth *rt_b =
22                 container_of(timer, struct rt_bandwidth, rt_period_timer);
23         int idle = 0;
24         int overrun;
25
26         raw_spin_lock(&rt_b->rt_runtime_lock);
27         for (;;) {
28                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
29                 if (!overrun)
30                         break;
31
32                 raw_spin_unlock(&rt_b->rt_runtime_lock);
33                 idle = do_sched_rt_period_timer(rt_b, overrun);
34                 raw_spin_lock(&rt_b->rt_runtime_lock);
35         }
36         if (idle)
37                 rt_b->rt_period_active = 0;
38         raw_spin_unlock(&rt_b->rt_runtime_lock);
39
40         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41 }
42
43 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
44 {
45         rt_b->rt_period = ns_to_ktime(period);
46         rt_b->rt_runtime = runtime;
47
48         raw_spin_lock_init(&rt_b->rt_runtime_lock);
49
50         hrtimer_init(&rt_b->rt_period_timer,
51                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
52         rt_b->rt_period_timer.function = sched_rt_period_timer;
53 }
54
55 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
56 {
57         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
58                 return;
59
60         raw_spin_lock(&rt_b->rt_runtime_lock);
61         if (!rt_b->rt_period_active) {
62                 rt_b->rt_period_active = 1;
63                 /*
64                  * SCHED_DEADLINE updates the bandwidth, as a run away
65                  * RT task with a DL task could hog a CPU. But DL does
66                  * not reset the period. If a deadline task was running
67                  * without an RT task running, it can cause RT tasks to
68                  * throttle when they start up. Kick the timer right away
69                  * to update the period.
70                  */
71                 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
72                 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
73         }
74         raw_spin_unlock(&rt_b->rt_runtime_lock);
75 }
76
77 void init_rt_rq(struct rt_rq *rt_rq)
78 {
79         struct rt_prio_array *array;
80         int i;
81
82         array = &rt_rq->active;
83         for (i = 0; i < MAX_RT_PRIO; i++) {
84                 INIT_LIST_HEAD(array->queue + i);
85                 __clear_bit(i, array->bitmap);
86         }
87         /* delimiter for bitsearch: */
88         __set_bit(MAX_RT_PRIO, array->bitmap);
89
90 #if defined CONFIG_SMP
91         rt_rq->highest_prio.curr = MAX_RT_PRIO;
92         rt_rq->highest_prio.next = MAX_RT_PRIO;
93         rt_rq->rt_nr_migratory = 0;
94         rt_rq->overloaded = 0;
95         plist_head_init(&rt_rq->pushable_tasks);
96 #endif /* CONFIG_SMP */
97         /* We start is dequeued state, because no RT tasks are queued */
98         rt_rq->rt_queued = 0;
99
100         rt_rq->rt_time = 0;
101         rt_rq->rt_throttled = 0;
102         rt_rq->rt_runtime = 0;
103         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
104 }
105
106 #ifdef CONFIG_RT_GROUP_SCHED
107 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
108 {
109         hrtimer_cancel(&rt_b->rt_period_timer);
110 }
111
112 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
113
114 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
115 {
116 #ifdef CONFIG_SCHED_DEBUG
117         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
118 #endif
119         return container_of(rt_se, struct task_struct, rt);
120 }
121
122 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
123 {
124         return rt_rq->rq;
125 }
126
127 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
128 {
129         return rt_se->rt_rq;
130 }
131
132 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
133 {
134         struct rt_rq *rt_rq = rt_se->rt_rq;
135
136         return rt_rq->rq;
137 }
138
139 void free_rt_sched_group(struct task_group *tg)
140 {
141         int i;
142
143         if (tg->rt_se)
144                 destroy_rt_bandwidth(&tg->rt_bandwidth);
145
146         for_each_possible_cpu(i) {
147                 if (tg->rt_rq)
148                         kfree(tg->rt_rq[i]);
149                 if (tg->rt_se)
150                         kfree(tg->rt_se[i]);
151         }
152
153         kfree(tg->rt_rq);
154         kfree(tg->rt_se);
155 }
156
157 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
158                 struct sched_rt_entity *rt_se, int cpu,
159                 struct sched_rt_entity *parent)
160 {
161         struct rq *rq = cpu_rq(cpu);
162
163         rt_rq->highest_prio.curr = MAX_RT_PRIO;
164         rt_rq->rt_nr_boosted = 0;
165         rt_rq->rq = rq;
166         rt_rq->tg = tg;
167
168         tg->rt_rq[cpu] = rt_rq;
169         tg->rt_se[cpu] = rt_se;
170
171         if (!rt_se)
172                 return;
173
174         if (!parent)
175                 rt_se->rt_rq = &rq->rt;
176         else
177                 rt_se->rt_rq = parent->my_q;
178
179         rt_se->my_q = rt_rq;
180         rt_se->parent = parent;
181         INIT_LIST_HEAD(&rt_se->run_list);
182 }
183
184 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
185 {
186         struct rt_rq *rt_rq;
187         struct sched_rt_entity *rt_se;
188         int i;
189
190         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
191         if (!tg->rt_rq)
192                 goto err;
193         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
194         if (!tg->rt_se)
195                 goto err;
196
197         init_rt_bandwidth(&tg->rt_bandwidth,
198                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
199
200         for_each_possible_cpu(i) {
201                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
202                                      GFP_KERNEL, cpu_to_node(i));
203                 if (!rt_rq)
204                         goto err;
205
206                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
207                                      GFP_KERNEL, cpu_to_node(i));
208                 if (!rt_se)
209                         goto err_free_rq;
210
211                 init_rt_rq(rt_rq);
212                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
213                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
214         }
215
216         return 1;
217
218 err_free_rq:
219         kfree(rt_rq);
220 err:
221         return 0;
222 }
223
224 #else /* CONFIG_RT_GROUP_SCHED */
225
226 #define rt_entity_is_task(rt_se) (1)
227
228 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
229 {
230         return container_of(rt_se, struct task_struct, rt);
231 }
232
233 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
234 {
235         return container_of(rt_rq, struct rq, rt);
236 }
237
238 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
239 {
240         struct task_struct *p = rt_task_of(rt_se);
241
242         return task_rq(p);
243 }
244
245 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
246 {
247         struct rq *rq = rq_of_rt_se(rt_se);
248
249         return &rq->rt;
250 }
251
252 void free_rt_sched_group(struct task_group *tg) { }
253
254 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
255 {
256         return 1;
257 }
258 #endif /* CONFIG_RT_GROUP_SCHED */
259
260 #ifdef CONFIG_SMP
261
262 static void pull_rt_task(struct rq *this_rq);
263
264 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
265 {
266         /* Try to pull RT tasks here if we lower this rq's prio */
267         return rq->rt.highest_prio.curr > prev->prio;
268 }
269
270 static inline int rt_overloaded(struct rq *rq)
271 {
272         return atomic_read(&rq->rd->rto_count);
273 }
274
275 static inline void rt_set_overload(struct rq *rq)
276 {
277         if (!rq->online)
278                 return;
279
280         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
281         /*
282          * Make sure the mask is visible before we set
283          * the overload count. That is checked to determine
284          * if we should look at the mask. It would be a shame
285          * if we looked at the mask, but the mask was not
286          * updated yet.
287          *
288          * Matched by the barrier in pull_rt_task().
289          */
290         smp_wmb();
291         atomic_inc(&rq->rd->rto_count);
292 }
293
294 static inline void rt_clear_overload(struct rq *rq)
295 {
296         if (!rq->online)
297                 return;
298
299         /* the order here really doesn't matter */
300         atomic_dec(&rq->rd->rto_count);
301         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
302 }
303
304 static void update_rt_migration(struct rt_rq *rt_rq)
305 {
306         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
307                 if (!rt_rq->overloaded) {
308                         rt_set_overload(rq_of_rt_rq(rt_rq));
309                         rt_rq->overloaded = 1;
310                 }
311         } else if (rt_rq->overloaded) {
312                 rt_clear_overload(rq_of_rt_rq(rt_rq));
313                 rt_rq->overloaded = 0;
314         }
315 }
316
317 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
318 {
319         struct task_struct *p;
320
321         if (!rt_entity_is_task(rt_se))
322                 return;
323
324         p = rt_task_of(rt_se);
325         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
326
327         rt_rq->rt_nr_total++;
328         if (p->nr_cpus_allowed > 1)
329                 rt_rq->rt_nr_migratory++;
330
331         update_rt_migration(rt_rq);
332 }
333
334 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
335 {
336         struct task_struct *p;
337
338         if (!rt_entity_is_task(rt_se))
339                 return;
340
341         p = rt_task_of(rt_se);
342         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
343
344         rt_rq->rt_nr_total--;
345         if (p->nr_cpus_allowed > 1)
346                 rt_rq->rt_nr_migratory--;
347
348         update_rt_migration(rt_rq);
349 }
350
351 static inline int has_pushable_tasks(struct rq *rq)
352 {
353         return !plist_head_empty(&rq->rt.pushable_tasks);
354 }
355
356 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
357 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
358
359 static void push_rt_tasks(struct rq *);
360 static void pull_rt_task(struct rq *);
361
362 static inline void queue_push_tasks(struct rq *rq)
363 {
364         if (!has_pushable_tasks(rq))
365                 return;
366
367         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
368 }
369
370 static inline void queue_pull_task(struct rq *rq)
371 {
372         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
373 }
374
375 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
376 {
377         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
378         plist_node_init(&p->pushable_tasks, p->prio);
379         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
380
381         /* Update the highest prio pushable task */
382         if (p->prio < rq->rt.highest_prio.next)
383                 rq->rt.highest_prio.next = p->prio;
384 }
385
386 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
387 {
388         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
389
390         /* Update the new highest prio pushable task */
391         if (has_pushable_tasks(rq)) {
392                 p = plist_first_entry(&rq->rt.pushable_tasks,
393                                       struct task_struct, pushable_tasks);
394                 rq->rt.highest_prio.next = p->prio;
395         } else
396                 rq->rt.highest_prio.next = MAX_RT_PRIO;
397 }
398
399 #else
400
401 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
402 {
403 }
404
405 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
406 {
407 }
408
409 static inline
410 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
411 {
412 }
413
414 static inline
415 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
416 {
417 }
418
419 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
420 {
421         return false;
422 }
423
424 static inline void pull_rt_task(struct rq *this_rq)
425 {
426 }
427
428 static inline void queue_push_tasks(struct rq *rq)
429 {
430 }
431 #endif /* CONFIG_SMP */
432
433 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
434 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
435
436 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
437 {
438         return rt_se->on_rq;
439 }
440
441 #ifdef CONFIG_RT_GROUP_SCHED
442
443 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
444 {
445         if (!rt_rq->tg)
446                 return RUNTIME_INF;
447
448         return rt_rq->rt_runtime;
449 }
450
451 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
452 {
453         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
454 }
455
456 typedef struct task_group *rt_rq_iter_t;
457
458 static inline struct task_group *next_task_group(struct task_group *tg)
459 {
460         do {
461                 tg = list_entry_rcu(tg->list.next,
462                         typeof(struct task_group), list);
463         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
464
465         if (&tg->list == &task_groups)
466                 tg = NULL;
467
468         return tg;
469 }
470
471 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
472         for (iter = container_of(&task_groups, typeof(*iter), list);    \
473                 (iter = next_task_group(iter)) &&                       \
474                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
475
476 #define for_each_sched_rt_entity(rt_se) \
477         for (; rt_se; rt_se = rt_se->parent)
478
479 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
480 {
481         return rt_se->my_q;
482 }
483
484 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
485 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
486
487 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
488 {
489         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
490         struct rq *rq = rq_of_rt_rq(rt_rq);
491         struct sched_rt_entity *rt_se;
492
493         int cpu = cpu_of(rq);
494
495         rt_se = rt_rq->tg->rt_se[cpu];
496
497         if (rt_rq->rt_nr_running) {
498                 if (!rt_se)
499                         enqueue_top_rt_rq(rt_rq);
500                 else if (!on_rt_rq(rt_se))
501                         enqueue_rt_entity(rt_se, 0);
502
503                 if (rt_rq->highest_prio.curr < curr->prio)
504                         resched_curr(rq);
505         }
506 }
507
508 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
509 {
510         struct sched_rt_entity *rt_se;
511         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
512
513         rt_se = rt_rq->tg->rt_se[cpu];
514
515         if (!rt_se)
516                 dequeue_top_rt_rq(rt_rq);
517         else if (on_rt_rq(rt_se))
518                 dequeue_rt_entity(rt_se, 0);
519 }
520
521 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
522 {
523         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
524 }
525
526 static int rt_se_boosted(struct sched_rt_entity *rt_se)
527 {
528         struct rt_rq *rt_rq = group_rt_rq(rt_se);
529         struct task_struct *p;
530
531         if (rt_rq)
532                 return !!rt_rq->rt_nr_boosted;
533
534         p = rt_task_of(rt_se);
535         return p->prio != p->normal_prio;
536 }
537
538 #ifdef CONFIG_SMP
539 static inline const struct cpumask *sched_rt_period_mask(void)
540 {
541         return this_rq()->rd->span;
542 }
543 #else
544 static inline const struct cpumask *sched_rt_period_mask(void)
545 {
546         return cpu_online_mask;
547 }
548 #endif
549
550 static inline
551 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
552 {
553         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
554 }
555
556 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
557 {
558         return &rt_rq->tg->rt_bandwidth;
559 }
560
561 #else /* !CONFIG_RT_GROUP_SCHED */
562
563 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
564 {
565         return rt_rq->rt_runtime;
566 }
567
568 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
569 {
570         return ktime_to_ns(def_rt_bandwidth.rt_period);
571 }
572
573 typedef struct rt_rq *rt_rq_iter_t;
574
575 #define for_each_rt_rq(rt_rq, iter, rq) \
576         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
577
578 #define for_each_sched_rt_entity(rt_se) \
579         for (; rt_se; rt_se = NULL)
580
581 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
582 {
583         return NULL;
584 }
585
586 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
587 {
588         struct rq *rq = rq_of_rt_rq(rt_rq);
589
590         if (!rt_rq->rt_nr_running)
591                 return;
592
593         enqueue_top_rt_rq(rt_rq);
594         resched_curr(rq);
595 }
596
597 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
598 {
599         dequeue_top_rt_rq(rt_rq);
600 }
601
602 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
603 {
604         return rt_rq->rt_throttled;
605 }
606
607 static inline const struct cpumask *sched_rt_period_mask(void)
608 {
609         return cpu_online_mask;
610 }
611
612 static inline
613 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
614 {
615         return &cpu_rq(cpu)->rt;
616 }
617
618 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
619 {
620         return &def_rt_bandwidth;
621 }
622
623 #endif /* CONFIG_RT_GROUP_SCHED */
624
625 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
626 {
627         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
628
629         return (hrtimer_active(&rt_b->rt_period_timer) ||
630                 rt_rq->rt_time < rt_b->rt_runtime);
631 }
632
633 #ifdef CONFIG_SMP
634 /*
635  * We ran out of runtime, see if we can borrow some from our neighbours.
636  */
637 static void do_balance_runtime(struct rt_rq *rt_rq)
638 {
639         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
640         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
641         int i, weight;
642         u64 rt_period;
643
644         weight = cpumask_weight(rd->span);
645
646         raw_spin_lock(&rt_b->rt_runtime_lock);
647         rt_period = ktime_to_ns(rt_b->rt_period);
648         for_each_cpu(i, rd->span) {
649                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
650                 s64 diff;
651
652                 if (iter == rt_rq)
653                         continue;
654
655                 raw_spin_lock(&iter->rt_runtime_lock);
656                 /*
657                  * Either all rqs have inf runtime and there's nothing to steal
658                  * or __disable_runtime() below sets a specific rq to inf to
659                  * indicate its been disabled and disalow stealing.
660                  */
661                 if (iter->rt_runtime == RUNTIME_INF)
662                         goto next;
663
664                 /*
665                  * From runqueues with spare time, take 1/n part of their
666                  * spare time, but no more than our period.
667                  */
668                 diff = iter->rt_runtime - iter->rt_time;
669                 if (diff > 0) {
670                         diff = div_u64((u64)diff, weight);
671                         if (rt_rq->rt_runtime + diff > rt_period)
672                                 diff = rt_period - rt_rq->rt_runtime;
673                         iter->rt_runtime -= diff;
674                         rt_rq->rt_runtime += diff;
675                         if (rt_rq->rt_runtime == rt_period) {
676                                 raw_spin_unlock(&iter->rt_runtime_lock);
677                                 break;
678                         }
679                 }
680 next:
681                 raw_spin_unlock(&iter->rt_runtime_lock);
682         }
683         raw_spin_unlock(&rt_b->rt_runtime_lock);
684 }
685
686 /*
687  * Ensure this RQ takes back all the runtime it lend to its neighbours.
688  */
689 static void __disable_runtime(struct rq *rq)
690 {
691         struct root_domain *rd = rq->rd;
692         rt_rq_iter_t iter;
693         struct rt_rq *rt_rq;
694
695         if (unlikely(!scheduler_running))
696                 return;
697
698         for_each_rt_rq(rt_rq, iter, rq) {
699                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
700                 s64 want;
701                 int i;
702
703                 raw_spin_lock(&rt_b->rt_runtime_lock);
704                 raw_spin_lock(&rt_rq->rt_runtime_lock);
705                 /*
706                  * Either we're all inf and nobody needs to borrow, or we're
707                  * already disabled and thus have nothing to do, or we have
708                  * exactly the right amount of runtime to take out.
709                  */
710                 if (rt_rq->rt_runtime == RUNTIME_INF ||
711                                 rt_rq->rt_runtime == rt_b->rt_runtime)
712                         goto balanced;
713                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
714
715                 /*
716                  * Calculate the difference between what we started out with
717                  * and what we current have, that's the amount of runtime
718                  * we lend and now have to reclaim.
719                  */
720                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
721
722                 /*
723                  * Greedy reclaim, take back as much as we can.
724                  */
725                 for_each_cpu(i, rd->span) {
726                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
727                         s64 diff;
728
729                         /*
730                          * Can't reclaim from ourselves or disabled runqueues.
731                          */
732                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
733                                 continue;
734
735                         raw_spin_lock(&iter->rt_runtime_lock);
736                         if (want > 0) {
737                                 diff = min_t(s64, iter->rt_runtime, want);
738                                 iter->rt_runtime -= diff;
739                                 want -= diff;
740                         } else {
741                                 iter->rt_runtime -= want;
742                                 want -= want;
743                         }
744                         raw_spin_unlock(&iter->rt_runtime_lock);
745
746                         if (!want)
747                                 break;
748                 }
749
750                 raw_spin_lock(&rt_rq->rt_runtime_lock);
751                 /*
752                  * We cannot be left wanting - that would mean some runtime
753                  * leaked out of the system.
754                  */
755                 BUG_ON(want);
756 balanced:
757                 /*
758                  * Disable all the borrow logic by pretending we have inf
759                  * runtime - in which case borrowing doesn't make sense.
760                  */
761                 rt_rq->rt_runtime = RUNTIME_INF;
762                 rt_rq->rt_throttled = 0;
763                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
764                 raw_spin_unlock(&rt_b->rt_runtime_lock);
765
766                 /* Make rt_rq available for pick_next_task() */
767                 sched_rt_rq_enqueue(rt_rq);
768         }
769 }
770
771 static void __enable_runtime(struct rq *rq)
772 {
773         rt_rq_iter_t iter;
774         struct rt_rq *rt_rq;
775
776         if (unlikely(!scheduler_running))
777                 return;
778
779         /*
780          * Reset each runqueue's bandwidth settings
781          */
782         for_each_rt_rq(rt_rq, iter, rq) {
783                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
784
785                 raw_spin_lock(&rt_b->rt_runtime_lock);
786                 raw_spin_lock(&rt_rq->rt_runtime_lock);
787                 rt_rq->rt_runtime = rt_b->rt_runtime;
788                 rt_rq->rt_time = 0;
789                 rt_rq->rt_throttled = 0;
790                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
791                 raw_spin_unlock(&rt_b->rt_runtime_lock);
792         }
793 }
794
795 static void balance_runtime(struct rt_rq *rt_rq)
796 {
797         if (!sched_feat(RT_RUNTIME_SHARE))
798                 return;
799
800         if (rt_rq->rt_time > rt_rq->rt_runtime) {
801                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
802                 do_balance_runtime(rt_rq);
803                 raw_spin_lock(&rt_rq->rt_runtime_lock);
804         }
805 }
806 #else /* !CONFIG_SMP */
807 static inline void balance_runtime(struct rt_rq *rt_rq) {}
808 #endif /* CONFIG_SMP */
809
810 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
811 {
812         int i, idle = 1, throttled = 0;
813         const struct cpumask *span;
814
815         span = sched_rt_period_mask();
816 #ifdef CONFIG_RT_GROUP_SCHED
817         /*
818          * FIXME: isolated CPUs should really leave the root task group,
819          * whether they are isolcpus or were isolated via cpusets, lest
820          * the timer run on a CPU which does not service all runqueues,
821          * potentially leaving other CPUs indefinitely throttled.  If
822          * isolation is really required, the user will turn the throttle
823          * off to kill the perturbations it causes anyway.  Meanwhile,
824          * this maintains functionality for boot and/or troubleshooting.
825          */
826         if (rt_b == &root_task_group.rt_bandwidth)
827                 span = cpu_online_mask;
828 #endif
829         for_each_cpu(i, span) {
830                 int enqueue = 0;
831                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
832                 struct rq *rq = rq_of_rt_rq(rt_rq);
833                 int skip;
834
835                 /*
836                  * When span == cpu_online_mask, taking each rq->lock
837                  * can be time-consuming. Try to avoid it when possible.
838                  */
839                 raw_spin_lock(&rt_rq->rt_runtime_lock);
840                 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
841                         rt_rq->rt_runtime = rt_b->rt_runtime;
842                 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
843                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
844                 if (skip)
845                         continue;
846
847                 raw_spin_lock(&rq->lock);
848                 update_rq_clock(rq);
849
850                 if (rt_rq->rt_time) {
851                         u64 runtime;
852
853                         raw_spin_lock(&rt_rq->rt_runtime_lock);
854                         if (rt_rq->rt_throttled)
855                                 balance_runtime(rt_rq);
856                         runtime = rt_rq->rt_runtime;
857                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
858                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
859                                 rt_rq->rt_throttled = 0;
860                                 enqueue = 1;
861
862                                 /*
863                                  * When we're idle and a woken (rt) task is
864                                  * throttled check_preempt_curr() will set
865                                  * skip_update and the time between the wakeup
866                                  * and this unthrottle will get accounted as
867                                  * 'runtime'.
868                                  */
869                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
870                                         rq_clock_skip_update(rq, false);
871                         }
872                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
873                                 idle = 0;
874                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
875                 } else if (rt_rq->rt_nr_running) {
876                         idle = 0;
877                         if (!rt_rq_throttled(rt_rq))
878                                 enqueue = 1;
879                 }
880                 if (rt_rq->rt_throttled)
881                         throttled = 1;
882
883                 if (enqueue)
884                         sched_rt_rq_enqueue(rt_rq);
885                 raw_spin_unlock(&rq->lock);
886         }
887
888         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
889                 return 1;
890
891         return idle;
892 }
893
894 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
895 {
896 #ifdef CONFIG_RT_GROUP_SCHED
897         struct rt_rq *rt_rq = group_rt_rq(rt_se);
898
899         if (rt_rq)
900                 return rt_rq->highest_prio.curr;
901 #endif
902
903         return rt_task_of(rt_se)->prio;
904 }
905
906 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
907 {
908         u64 runtime = sched_rt_runtime(rt_rq);
909
910         if (rt_rq->rt_throttled)
911                 return rt_rq_throttled(rt_rq);
912
913         if (runtime >= sched_rt_period(rt_rq))
914                 return 0;
915
916         balance_runtime(rt_rq);
917         runtime = sched_rt_runtime(rt_rq);
918         if (runtime == RUNTIME_INF)
919                 return 0;
920
921         if (rt_rq->rt_time > runtime) {
922                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
923
924                 /*
925                  * Don't actually throttle groups that have no runtime assigned
926                  * but accrue some time due to boosting.
927                  */
928                 if (likely(rt_b->rt_runtime)) {
929                         rt_rq->rt_throttled = 1;
930                         printk_deferred_once("sched: RT throttling activated\n");
931                 } else {
932                         /*
933                          * In case we did anyway, make it go away,
934                          * replenishment is a joke, since it will replenish us
935                          * with exactly 0 ns.
936                          */
937                         rt_rq->rt_time = 0;
938                 }
939
940                 if (rt_rq_throttled(rt_rq)) {
941                         sched_rt_rq_dequeue(rt_rq);
942                         return 1;
943                 }
944         }
945
946         return 0;
947 }
948
949 /*
950  * Update the current task's runtime statistics. Skip current tasks that
951  * are not in our scheduling class.
952  */
953 static void update_curr_rt(struct rq *rq)
954 {
955         struct task_struct *curr = rq->curr;
956         struct sched_rt_entity *rt_se = &curr->rt;
957         u64 delta_exec;
958
959         if (curr->sched_class != &rt_sched_class)
960                 return;
961
962         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
963         if (unlikely((s64)delta_exec <= 0))
964                 return;
965
966         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
967         cpufreq_update_util(rq, SCHED_CPUFREQ_RT);
968
969         schedstat_set(curr->se.statistics.exec_max,
970                       max(curr->se.statistics.exec_max, delta_exec));
971
972         curr->se.sum_exec_runtime += delta_exec;
973         account_group_exec_runtime(curr, delta_exec);
974
975         curr->se.exec_start = rq_clock_task(rq);
976         cpuacct_charge(curr, delta_exec);
977
978         sched_rt_avg_update(rq, delta_exec);
979
980         if (!rt_bandwidth_enabled())
981                 return;
982
983         for_each_sched_rt_entity(rt_se) {
984                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
985
986                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
987                         raw_spin_lock(&rt_rq->rt_runtime_lock);
988                         rt_rq->rt_time += delta_exec;
989                         if (sched_rt_runtime_exceeded(rt_rq))
990                                 resched_curr(rq);
991                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
992                 }
993         }
994 }
995
996 static void
997 dequeue_top_rt_rq(struct rt_rq *rt_rq)
998 {
999         struct rq *rq = rq_of_rt_rq(rt_rq);
1000
1001         BUG_ON(&rq->rt != rt_rq);
1002
1003         if (!rt_rq->rt_queued)
1004                 return;
1005
1006         BUG_ON(!rq->nr_running);
1007
1008         sub_nr_running(rq, rt_rq->rt_nr_running);
1009         rt_rq->rt_queued = 0;
1010 }
1011
1012 static void
1013 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1014 {
1015         struct rq *rq = rq_of_rt_rq(rt_rq);
1016
1017         BUG_ON(&rq->rt != rt_rq);
1018
1019         if (rt_rq->rt_queued)
1020                 return;
1021         if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1022                 return;
1023
1024         add_nr_running(rq, rt_rq->rt_nr_running);
1025         rt_rq->rt_queued = 1;
1026 }
1027
1028 #if defined CONFIG_SMP
1029
1030 static void
1031 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1032 {
1033         struct rq *rq = rq_of_rt_rq(rt_rq);
1034
1035 #ifdef CONFIG_RT_GROUP_SCHED
1036         /*
1037          * Change rq's cpupri only if rt_rq is the top queue.
1038          */
1039         if (&rq->rt != rt_rq)
1040                 return;
1041 #endif
1042         if (rq->online && prio < prev_prio)
1043                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1044 }
1045
1046 static void
1047 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1048 {
1049         struct rq *rq = rq_of_rt_rq(rt_rq);
1050
1051 #ifdef CONFIG_RT_GROUP_SCHED
1052         /*
1053          * Change rq's cpupri only if rt_rq is the top queue.
1054          */
1055         if (&rq->rt != rt_rq)
1056                 return;
1057 #endif
1058         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1059                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1060 }
1061
1062 #else /* CONFIG_SMP */
1063
1064 static inline
1065 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1066 static inline
1067 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1068
1069 #endif /* CONFIG_SMP */
1070
1071 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1072 static void
1073 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1074 {
1075         int prev_prio = rt_rq->highest_prio.curr;
1076
1077         if (prio < prev_prio)
1078                 rt_rq->highest_prio.curr = prio;
1079
1080         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1081 }
1082
1083 static void
1084 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1085 {
1086         int prev_prio = rt_rq->highest_prio.curr;
1087
1088         if (rt_rq->rt_nr_running) {
1089
1090                 WARN_ON(prio < prev_prio);
1091
1092                 /*
1093                  * This may have been our highest task, and therefore
1094                  * we may have some recomputation to do
1095                  */
1096                 if (prio == prev_prio) {
1097                         struct rt_prio_array *array = &rt_rq->active;
1098
1099                         rt_rq->highest_prio.curr =
1100                                 sched_find_first_bit(array->bitmap);
1101                 }
1102
1103         } else
1104                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1105
1106         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1107 }
1108
1109 #else
1110
1111 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1112 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1113
1114 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1115
1116 #ifdef CONFIG_RT_GROUP_SCHED
1117
1118 static void
1119 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1120 {
1121         if (rt_se_boosted(rt_se))
1122                 rt_rq->rt_nr_boosted++;
1123
1124         if (rt_rq->tg)
1125                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1126 }
1127
1128 static void
1129 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1130 {
1131         if (rt_se_boosted(rt_se))
1132                 rt_rq->rt_nr_boosted--;
1133
1134         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1135 }
1136
1137 #else /* CONFIG_RT_GROUP_SCHED */
1138
1139 static void
1140 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1141 {
1142         start_rt_bandwidth(&def_rt_bandwidth);
1143 }
1144
1145 static inline
1146 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1147
1148 #endif /* CONFIG_RT_GROUP_SCHED */
1149
1150 static inline
1151 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1152 {
1153         struct rt_rq *group_rq = group_rt_rq(rt_se);
1154
1155         if (group_rq)
1156                 return group_rq->rt_nr_running;
1157         else
1158                 return 1;
1159 }
1160
1161 static inline
1162 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1163 {
1164         struct rt_rq *group_rq = group_rt_rq(rt_se);
1165         struct task_struct *tsk;
1166
1167         if (group_rq)
1168                 return group_rq->rr_nr_running;
1169
1170         tsk = rt_task_of(rt_se);
1171
1172         return (tsk->policy == SCHED_RR) ? 1 : 0;
1173 }
1174
1175 static inline
1176 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1177 {
1178         int prio = rt_se_prio(rt_se);
1179
1180         WARN_ON(!rt_prio(prio));
1181         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1182         rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1183
1184         inc_rt_prio(rt_rq, prio);
1185         inc_rt_migration(rt_se, rt_rq);
1186         inc_rt_group(rt_se, rt_rq);
1187 }
1188
1189 static inline
1190 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1191 {
1192         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1193         WARN_ON(!rt_rq->rt_nr_running);
1194         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1195         rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1196
1197         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1198         dec_rt_migration(rt_se, rt_rq);
1199         dec_rt_group(rt_se, rt_rq);
1200 }
1201
1202 /*
1203  * Change rt_se->run_list location unless SAVE && !MOVE
1204  *
1205  * assumes ENQUEUE/DEQUEUE flags match
1206  */
1207 static inline bool move_entity(unsigned int flags)
1208 {
1209         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1210                 return false;
1211
1212         return true;
1213 }
1214
1215 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1216 {
1217         list_del_init(&rt_se->run_list);
1218
1219         if (list_empty(array->queue + rt_se_prio(rt_se)))
1220                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1221
1222         rt_se->on_list = 0;
1223 }
1224
1225 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1226 {
1227         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1228         struct rt_prio_array *array = &rt_rq->active;
1229         struct rt_rq *group_rq = group_rt_rq(rt_se);
1230         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1231
1232         /*
1233          * Don't enqueue the group if its throttled, or when empty.
1234          * The latter is a consequence of the former when a child group
1235          * get throttled and the current group doesn't have any other
1236          * active members.
1237          */
1238         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1239                 if (rt_se->on_list)
1240                         __delist_rt_entity(rt_se, array);
1241                 return;
1242         }
1243
1244         if (move_entity(flags)) {
1245                 WARN_ON_ONCE(rt_se->on_list);
1246                 if (flags & ENQUEUE_HEAD)
1247                         list_add(&rt_se->run_list, queue);
1248                 else
1249                         list_add_tail(&rt_se->run_list, queue);
1250
1251                 __set_bit(rt_se_prio(rt_se), array->bitmap);
1252                 rt_se->on_list = 1;
1253         }
1254         rt_se->on_rq = 1;
1255
1256         inc_rt_tasks(rt_se, rt_rq);
1257 }
1258
1259 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1260 {
1261         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1262         struct rt_prio_array *array = &rt_rq->active;
1263
1264         if (move_entity(flags)) {
1265                 WARN_ON_ONCE(!rt_se->on_list);
1266                 __delist_rt_entity(rt_se, array);
1267         }
1268         rt_se->on_rq = 0;
1269
1270         dec_rt_tasks(rt_se, rt_rq);
1271 }
1272
1273 /*
1274  * Because the prio of an upper entry depends on the lower
1275  * entries, we must remove entries top - down.
1276  */
1277 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1278 {
1279         struct sched_rt_entity *back = NULL;
1280
1281         for_each_sched_rt_entity(rt_se) {
1282                 rt_se->back = back;
1283                 back = rt_se;
1284         }
1285
1286         dequeue_top_rt_rq(rt_rq_of_se(back));
1287
1288         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1289                 if (on_rt_rq(rt_se))
1290                         __dequeue_rt_entity(rt_se, flags);
1291         }
1292 }
1293
1294 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1295 {
1296         struct rq *rq = rq_of_rt_se(rt_se);
1297
1298         dequeue_rt_stack(rt_se, flags);
1299         for_each_sched_rt_entity(rt_se)
1300                 __enqueue_rt_entity(rt_se, flags);
1301         enqueue_top_rt_rq(&rq->rt);
1302 }
1303
1304 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1305 {
1306         struct rq *rq = rq_of_rt_se(rt_se);
1307
1308         dequeue_rt_stack(rt_se, flags);
1309
1310         for_each_sched_rt_entity(rt_se) {
1311                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1312
1313                 if (rt_rq && rt_rq->rt_nr_running)
1314                         __enqueue_rt_entity(rt_se, flags);
1315         }
1316         enqueue_top_rt_rq(&rq->rt);
1317 }
1318
1319 /*
1320  * Adding/removing a task to/from a priority array:
1321  */
1322 static void
1323 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1324 {
1325         struct sched_rt_entity *rt_se = &p->rt;
1326
1327         if (flags & ENQUEUE_WAKEUP)
1328                 rt_se->timeout = 0;
1329
1330         enqueue_rt_entity(rt_se, flags);
1331
1332         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1333                 enqueue_pushable_task(rq, p);
1334 }
1335
1336 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1337 {
1338         struct sched_rt_entity *rt_se = &p->rt;
1339
1340         update_curr_rt(rq);
1341         dequeue_rt_entity(rt_se, flags);
1342
1343         dequeue_pushable_task(rq, p);
1344 }
1345
1346 /*
1347  * Put task to the head or the end of the run list without the overhead of
1348  * dequeue followed by enqueue.
1349  */
1350 static void
1351 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1352 {
1353         if (on_rt_rq(rt_se)) {
1354                 struct rt_prio_array *array = &rt_rq->active;
1355                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1356
1357                 if (head)
1358                         list_move(&rt_se->run_list, queue);
1359                 else
1360                         list_move_tail(&rt_se->run_list, queue);
1361         }
1362 }
1363
1364 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1365 {
1366         struct sched_rt_entity *rt_se = &p->rt;
1367         struct rt_rq *rt_rq;
1368
1369         for_each_sched_rt_entity(rt_se) {
1370                 rt_rq = rt_rq_of_se(rt_se);
1371                 requeue_rt_entity(rt_rq, rt_se, head);
1372         }
1373 }
1374
1375 static void yield_task_rt(struct rq *rq)
1376 {
1377         requeue_task_rt(rq, rq->curr, 0);
1378 }
1379
1380 #ifdef CONFIG_SMP
1381 static int find_lowest_rq(struct task_struct *task);
1382
1383 static int
1384 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1385 {
1386         struct task_struct *curr;
1387         struct rq *rq;
1388
1389         /* For anything but wake ups, just return the task_cpu */
1390         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1391                 goto out;
1392
1393         rq = cpu_rq(cpu);
1394
1395         rcu_read_lock();
1396         curr = READ_ONCE(rq->curr); /* unlocked access */
1397
1398         /*
1399          * If the current task on @p's runqueue is an RT task, then
1400          * try to see if we can wake this RT task up on another
1401          * runqueue. Otherwise simply start this RT task
1402          * on its current runqueue.
1403          *
1404          * We want to avoid overloading runqueues. If the woken
1405          * task is a higher priority, then it will stay on this CPU
1406          * and the lower prio task should be moved to another CPU.
1407          * Even though this will probably make the lower prio task
1408          * lose its cache, we do not want to bounce a higher task
1409          * around just because it gave up its CPU, perhaps for a
1410          * lock?
1411          *
1412          * For equal prio tasks, we just let the scheduler sort it out.
1413          *
1414          * Otherwise, just let it ride on the affined RQ and the
1415          * post-schedule router will push the preempted task away
1416          *
1417          * This test is optimistic, if we get it wrong the load-balancer
1418          * will have to sort it out.
1419          */
1420         if (curr && unlikely(rt_task(curr)) &&
1421             (curr->nr_cpus_allowed < 2 ||
1422              curr->prio <= p->prio)) {
1423                 int target = find_lowest_rq(p);
1424
1425                 /*
1426                  * Don't bother moving it if the destination CPU is
1427                  * not running a lower priority task.
1428                  */
1429                 if (target != -1 &&
1430                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1431                         cpu = target;
1432         }
1433         rcu_read_unlock();
1434
1435 out:
1436         return cpu;
1437 }
1438
1439 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1440 {
1441         /*
1442          * Current can't be migrated, useless to reschedule,
1443          * let's hope p can move out.
1444          */
1445         if (rq->curr->nr_cpus_allowed == 1 ||
1446             !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1447                 return;
1448
1449         /*
1450          * p is migratable, so let's not schedule it and
1451          * see if it is pushed or pulled somewhere else.
1452          */
1453         if (p->nr_cpus_allowed != 1
1454             && cpupri_find(&rq->rd->cpupri, p, NULL))
1455                 return;
1456
1457         /*
1458          * There appears to be other cpus that can accept
1459          * current and none to run 'p', so lets reschedule
1460          * to try and push current away:
1461          */
1462         requeue_task_rt(rq, p, 1);
1463         resched_curr(rq);
1464 }
1465
1466 #endif /* CONFIG_SMP */
1467
1468 /*
1469  * Preempt the current task with a newly woken task if needed:
1470  */
1471 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1472 {
1473         if (p->prio < rq->curr->prio) {
1474                 resched_curr(rq);
1475                 return;
1476         }
1477
1478 #ifdef CONFIG_SMP
1479         /*
1480          * If:
1481          *
1482          * - the newly woken task is of equal priority to the current task
1483          * - the newly woken task is non-migratable while current is migratable
1484          * - current will be preempted on the next reschedule
1485          *
1486          * we should check to see if current can readily move to a different
1487          * cpu.  If so, we will reschedule to allow the push logic to try
1488          * to move current somewhere else, making room for our non-migratable
1489          * task.
1490          */
1491         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1492                 check_preempt_equal_prio(rq, p);
1493 #endif
1494 }
1495
1496 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1497                                                    struct rt_rq *rt_rq)
1498 {
1499         struct rt_prio_array *array = &rt_rq->active;
1500         struct sched_rt_entity *next = NULL;
1501         struct list_head *queue;
1502         int idx;
1503
1504         idx = sched_find_first_bit(array->bitmap);
1505         BUG_ON(idx >= MAX_RT_PRIO);
1506
1507         queue = array->queue + idx;
1508         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1509
1510         return next;
1511 }
1512
1513 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1514 {
1515         struct sched_rt_entity *rt_se;
1516         struct task_struct *p;
1517         struct rt_rq *rt_rq  = &rq->rt;
1518
1519         do {
1520                 rt_se = pick_next_rt_entity(rq, rt_rq);
1521                 BUG_ON(!rt_se);
1522                 rt_rq = group_rt_rq(rt_se);
1523         } while (rt_rq);
1524
1525         p = rt_task_of(rt_se);
1526         p->se.exec_start = rq_clock_task(rq);
1527
1528         return p;
1529 }
1530
1531 static struct task_struct *
1532 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1533 {
1534         struct task_struct *p;
1535         struct rt_rq *rt_rq = &rq->rt;
1536
1537         if (need_pull_rt_task(rq, prev)) {
1538                 /*
1539                  * This is OK, because current is on_cpu, which avoids it being
1540                  * picked for load-balance and preemption/IRQs are still
1541                  * disabled avoiding further scheduler activity on it and we're
1542                  * being very careful to re-start the picking loop.
1543                  */
1544                 rq_unpin_lock(rq, rf);
1545                 pull_rt_task(rq);
1546                 rq_repin_lock(rq, rf);
1547                 /*
1548                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1549                  * means a dl or stop task can slip in, in which case we need
1550                  * to re-start task selection.
1551                  */
1552                 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1553                              rq->dl.dl_nr_running))
1554                         return RETRY_TASK;
1555         }
1556
1557         /*
1558          * We may dequeue prev's rt_rq in put_prev_task().
1559          * So, we update time before rt_nr_running check.
1560          */
1561         if (prev->sched_class == &rt_sched_class)
1562                 update_curr_rt(rq);
1563
1564         if (!rt_rq->rt_queued)
1565                 return NULL;
1566
1567         put_prev_task(rq, prev);
1568
1569         p = _pick_next_task_rt(rq);
1570
1571         /* The running task is never eligible for pushing */
1572         dequeue_pushable_task(rq, p);
1573
1574         queue_push_tasks(rq);
1575
1576         return p;
1577 }
1578
1579 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1580 {
1581         update_curr_rt(rq);
1582
1583         /*
1584          * The previous task needs to be made eligible for pushing
1585          * if it is still active
1586          */
1587         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1588                 enqueue_pushable_task(rq, p);
1589 }
1590
1591 #ifdef CONFIG_SMP
1592
1593 /* Only try algorithms three times */
1594 #define RT_MAX_TRIES 3
1595
1596 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1597 {
1598         if (!task_running(rq, p) &&
1599             cpumask_test_cpu(cpu, &p->cpus_allowed))
1600                 return 1;
1601         return 0;
1602 }
1603
1604 /*
1605  * Return the highest pushable rq's task, which is suitable to be executed
1606  * on the cpu, NULL otherwise
1607  */
1608 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1609 {
1610         struct plist_head *head = &rq->rt.pushable_tasks;
1611         struct task_struct *p;
1612
1613         if (!has_pushable_tasks(rq))
1614                 return NULL;
1615
1616         plist_for_each_entry(p, head, pushable_tasks) {
1617                 if (pick_rt_task(rq, p, cpu))
1618                         return p;
1619         }
1620
1621         return NULL;
1622 }
1623
1624 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1625
1626 static int find_lowest_rq(struct task_struct *task)
1627 {
1628         struct sched_domain *sd;
1629         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1630         int this_cpu = smp_processor_id();
1631         int cpu      = task_cpu(task);
1632
1633         /* Make sure the mask is initialized first */
1634         if (unlikely(!lowest_mask))
1635                 return -1;
1636
1637         if (task->nr_cpus_allowed == 1)
1638                 return -1; /* No other targets possible */
1639
1640         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1641                 return -1; /* No targets found */
1642
1643         /*
1644          * At this point we have built a mask of cpus representing the
1645          * lowest priority tasks in the system.  Now we want to elect
1646          * the best one based on our affinity and topology.
1647          *
1648          * We prioritize the last cpu that the task executed on since
1649          * it is most likely cache-hot in that location.
1650          */
1651         if (cpumask_test_cpu(cpu, lowest_mask))
1652                 return cpu;
1653
1654         /*
1655          * Otherwise, we consult the sched_domains span maps to figure
1656          * out which cpu is logically closest to our hot cache data.
1657          */
1658         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1659                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1660
1661         rcu_read_lock();
1662         for_each_domain(cpu, sd) {
1663                 if (sd->flags & SD_WAKE_AFFINE) {
1664                         int best_cpu;
1665
1666                         /*
1667                          * "this_cpu" is cheaper to preempt than a
1668                          * remote processor.
1669                          */
1670                         if (this_cpu != -1 &&
1671                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1672                                 rcu_read_unlock();
1673                                 return this_cpu;
1674                         }
1675
1676                         best_cpu = cpumask_first_and(lowest_mask,
1677                                                      sched_domain_span(sd));
1678                         if (best_cpu < nr_cpu_ids) {
1679                                 rcu_read_unlock();
1680                                 return best_cpu;
1681                         }
1682                 }
1683         }
1684         rcu_read_unlock();
1685
1686         /*
1687          * And finally, if there were no matches within the domains
1688          * just give the caller *something* to work with from the compatible
1689          * locations.
1690          */
1691         if (this_cpu != -1)
1692                 return this_cpu;
1693
1694         cpu = cpumask_any(lowest_mask);
1695         if (cpu < nr_cpu_ids)
1696                 return cpu;
1697         return -1;
1698 }
1699
1700 /* Will lock the rq it finds */
1701 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1702 {
1703         struct rq *lowest_rq = NULL;
1704         int tries;
1705         int cpu;
1706
1707         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1708                 cpu = find_lowest_rq(task);
1709
1710                 if ((cpu == -1) || (cpu == rq->cpu))
1711                         break;
1712
1713                 lowest_rq = cpu_rq(cpu);
1714
1715                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1716                         /*
1717                          * Target rq has tasks of equal or higher priority,
1718                          * retrying does not release any lock and is unlikely
1719                          * to yield a different result.
1720                          */
1721                         lowest_rq = NULL;
1722                         break;
1723                 }
1724
1725                 /* if the prio of this runqueue changed, try again */
1726                 if (double_lock_balance(rq, lowest_rq)) {
1727                         /*
1728                          * We had to unlock the run queue. In
1729                          * the mean time, task could have
1730                          * migrated already or had its affinity changed.
1731                          * Also make sure that it wasn't scheduled on its rq.
1732                          */
1733                         if (unlikely(task_rq(task) != rq ||
1734                                      !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
1735                                      task_running(rq, task) ||
1736                                      !rt_task(task) ||
1737                                      !task_on_rq_queued(task))) {
1738
1739                                 double_unlock_balance(rq, lowest_rq);
1740                                 lowest_rq = NULL;
1741                                 break;
1742                         }
1743                 }
1744
1745                 /* If this rq is still suitable use it. */
1746                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1747                         break;
1748
1749                 /* try again */
1750                 double_unlock_balance(rq, lowest_rq);
1751                 lowest_rq = NULL;
1752         }
1753
1754         return lowest_rq;
1755 }
1756
1757 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1758 {
1759         struct task_struct *p;
1760
1761         if (!has_pushable_tasks(rq))
1762                 return NULL;
1763
1764         p = plist_first_entry(&rq->rt.pushable_tasks,
1765                               struct task_struct, pushable_tasks);
1766
1767         BUG_ON(rq->cpu != task_cpu(p));
1768         BUG_ON(task_current(rq, p));
1769         BUG_ON(p->nr_cpus_allowed <= 1);
1770
1771         BUG_ON(!task_on_rq_queued(p));
1772         BUG_ON(!rt_task(p));
1773
1774         return p;
1775 }
1776
1777 /*
1778  * If the current CPU has more than one RT task, see if the non
1779  * running task can migrate over to a CPU that is running a task
1780  * of lesser priority.
1781  */
1782 static int push_rt_task(struct rq *rq)
1783 {
1784         struct task_struct *next_task;
1785         struct rq *lowest_rq;
1786         int ret = 0;
1787
1788         if (!rq->rt.overloaded)
1789                 return 0;
1790
1791         next_task = pick_next_pushable_task(rq);
1792         if (!next_task)
1793                 return 0;
1794
1795 retry:
1796         if (unlikely(next_task == rq->curr)) {
1797                 WARN_ON(1);
1798                 return 0;
1799         }
1800
1801         /*
1802          * It's possible that the next_task slipped in of
1803          * higher priority than current. If that's the case
1804          * just reschedule current.
1805          */
1806         if (unlikely(next_task->prio < rq->curr->prio)) {
1807                 resched_curr(rq);
1808                 return 0;
1809         }
1810
1811         /* We might release rq lock */
1812         get_task_struct(next_task);
1813
1814         /* find_lock_lowest_rq locks the rq if found */
1815         lowest_rq = find_lock_lowest_rq(next_task, rq);
1816         if (!lowest_rq) {
1817                 struct task_struct *task;
1818                 /*
1819                  * find_lock_lowest_rq releases rq->lock
1820                  * so it is possible that next_task has migrated.
1821                  *
1822                  * We need to make sure that the task is still on the same
1823                  * run-queue and is also still the next task eligible for
1824                  * pushing.
1825                  */
1826                 task = pick_next_pushable_task(rq);
1827                 if (task == next_task) {
1828                         /*
1829                          * The task hasn't migrated, and is still the next
1830                          * eligible task, but we failed to find a run-queue
1831                          * to push it to.  Do not retry in this case, since
1832                          * other cpus will pull from us when ready.
1833                          */
1834                         goto out;
1835                 }
1836
1837                 if (!task)
1838                         /* No more tasks, just exit */
1839                         goto out;
1840
1841                 /*
1842                  * Something has shifted, try again.
1843                  */
1844                 put_task_struct(next_task);
1845                 next_task = task;
1846                 goto retry;
1847         }
1848
1849         deactivate_task(rq, next_task, 0);
1850         set_task_cpu(next_task, lowest_rq->cpu);
1851         activate_task(lowest_rq, next_task, 0);
1852         ret = 1;
1853
1854         resched_curr(lowest_rq);
1855
1856         double_unlock_balance(rq, lowest_rq);
1857
1858 out:
1859         put_task_struct(next_task);
1860
1861         return ret;
1862 }
1863
1864 static void push_rt_tasks(struct rq *rq)
1865 {
1866         /* push_rt_task will return true if it moved an RT */
1867         while (push_rt_task(rq))
1868                 ;
1869 }
1870
1871 #ifdef HAVE_RT_PUSH_IPI
1872
1873 /*
1874  * When a high priority task schedules out from a CPU and a lower priority
1875  * task is scheduled in, a check is made to see if there's any RT tasks
1876  * on other CPUs that are waiting to run because a higher priority RT task
1877  * is currently running on its CPU. In this case, the CPU with multiple RT
1878  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1879  * up that may be able to run one of its non-running queued RT tasks.
1880  *
1881  * All CPUs with overloaded RT tasks need to be notified as there is currently
1882  * no way to know which of these CPUs have the highest priority task waiting
1883  * to run. Instead of trying to take a spinlock on each of these CPUs,
1884  * which has shown to cause large latency when done on machines with many
1885  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1886  * RT tasks waiting to run.
1887  *
1888  * Just sending an IPI to each of the CPUs is also an issue, as on large
1889  * count CPU machines, this can cause an IPI storm on a CPU, especially
1890  * if its the only CPU with multiple RT tasks queued, and a large number
1891  * of CPUs scheduling a lower priority task at the same time.
1892  *
1893  * Each root domain has its own irq work function that can iterate over
1894  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1895  * tassk must be checked if there's one or many CPUs that are lowering
1896  * their priority, there's a single irq work iterator that will try to
1897  * push off RT tasks that are waiting to run.
1898  *
1899  * When a CPU schedules a lower priority task, it will kick off the
1900  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1901  * As it only takes the first CPU that schedules a lower priority task
1902  * to start the process, the rto_start variable is incremented and if
1903  * the atomic result is one, then that CPU will try to take the rto_lock.
1904  * This prevents high contention on the lock as the process handles all
1905  * CPUs scheduling lower priority tasks.
1906  *
1907  * All CPUs that are scheduling a lower priority task will increment the
1908  * rt_loop_next variable. This will make sure that the irq work iterator
1909  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1910  * priority task, even if the iterator is in the middle of a scan. Incrementing
1911  * the rt_loop_next will cause the iterator to perform another scan.
1912  *
1913  */
1914 static int rto_next_cpu(struct root_domain *rd)
1915 {
1916         int next;
1917         int cpu;
1918
1919         /*
1920          * When starting the IPI RT pushing, the rto_cpu is set to -1,
1921          * rt_next_cpu() will simply return the first CPU found in
1922          * the rto_mask.
1923          *
1924          * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
1925          * will return the next CPU found in the rto_mask.
1926          *
1927          * If there are no more CPUs left in the rto_mask, then a check is made
1928          * against rto_loop and rto_loop_next. rto_loop is only updated with
1929          * the rto_lock held, but any CPU may increment the rto_loop_next
1930          * without any locking.
1931          */
1932         for (;;) {
1933
1934                 /* When rto_cpu is -1 this acts like cpumask_first() */
1935                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1936
1937                 rd->rto_cpu = cpu;
1938
1939                 if (cpu < nr_cpu_ids)
1940                         return cpu;
1941
1942                 rd->rto_cpu = -1;
1943
1944                 /*
1945                  * ACQUIRE ensures we see the @rto_mask changes
1946                  * made prior to the @next value observed.
1947                  *
1948                  * Matches WMB in rt_set_overload().
1949                  */
1950                 next = atomic_read_acquire(&rd->rto_loop_next);
1951
1952                 if (rd->rto_loop == next)
1953                         break;
1954
1955                 rd->rto_loop = next;
1956         }
1957
1958         return -1;
1959 }
1960
1961 static inline bool rto_start_trylock(atomic_t *v)
1962 {
1963         return !atomic_cmpxchg_acquire(v, 0, 1);
1964 }
1965
1966 static inline void rto_start_unlock(atomic_t *v)
1967 {
1968         atomic_set_release(v, 0);
1969 }
1970
1971 static void tell_cpu_to_push(struct rq *rq)
1972 {
1973         int cpu = -1;
1974
1975         /* Keep the loop going if the IPI is currently active */
1976         atomic_inc(&rq->rd->rto_loop_next);
1977
1978         /* Only one CPU can initiate a loop at a time */
1979         if (!rto_start_trylock(&rq->rd->rto_loop_start))
1980                 return;
1981
1982         raw_spin_lock(&rq->rd->rto_lock);
1983
1984         /*
1985          * The rto_cpu is updated under the lock, if it has a valid cpu
1986          * then the IPI is still running and will continue due to the
1987          * update to loop_next, and nothing needs to be done here.
1988          * Otherwise it is finishing up and an ipi needs to be sent.
1989          */
1990         if (rq->rd->rto_cpu < 0)
1991                 cpu = rto_next_cpu(rq->rd);
1992
1993         raw_spin_unlock(&rq->rd->rto_lock);
1994
1995         rto_start_unlock(&rq->rd->rto_loop_start);
1996
1997         if (cpu >= 0) {
1998                 /* Make sure the rd does not get freed while pushing */
1999                 sched_get_rd(rq->rd);
2000                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2001         }
2002 }
2003
2004 /* Called from hardirq context */
2005 void rto_push_irq_work_func(struct irq_work *work)
2006 {
2007         struct root_domain *rd =
2008                 container_of(work, struct root_domain, rto_push_work);
2009         struct rq *rq;
2010         int cpu;
2011
2012         rq = this_rq();
2013
2014         /*
2015          * We do not need to grab the lock to check for has_pushable_tasks.
2016          * When it gets updated, a check is made if a push is possible.
2017          */
2018         if (has_pushable_tasks(rq)) {
2019                 raw_spin_lock(&rq->lock);
2020                 push_rt_tasks(rq);
2021                 raw_spin_unlock(&rq->lock);
2022         }
2023
2024         raw_spin_lock(&rd->rto_lock);
2025
2026         /* Pass the IPI to the next rt overloaded queue */
2027         cpu = rto_next_cpu(rd);
2028
2029         raw_spin_unlock(&rd->rto_lock);
2030
2031         if (cpu < 0) {
2032                 sched_put_rd(rd);
2033                 return;
2034         }
2035
2036         /* Try the next RT overloaded CPU */
2037         irq_work_queue_on(&rd->rto_push_work, cpu);
2038 }
2039 #endif /* HAVE_RT_PUSH_IPI */
2040
2041 static void pull_rt_task(struct rq *this_rq)
2042 {
2043         int this_cpu = this_rq->cpu, cpu;
2044         bool resched = false;
2045         struct task_struct *p;
2046         struct rq *src_rq;
2047         int rt_overload_count = rt_overloaded(this_rq);
2048
2049         if (likely(!rt_overload_count))
2050                 return;
2051
2052         /*
2053          * Match the barrier from rt_set_overloaded; this guarantees that if we
2054          * see overloaded we must also see the rto_mask bit.
2055          */
2056         smp_rmb();
2057
2058         /* If we are the only overloaded CPU do nothing */
2059         if (rt_overload_count == 1 &&
2060             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2061                 return;
2062
2063 #ifdef HAVE_RT_PUSH_IPI
2064         if (sched_feat(RT_PUSH_IPI)) {
2065                 tell_cpu_to_push(this_rq);
2066                 return;
2067         }
2068 #endif
2069
2070         for_each_cpu(cpu, this_rq->rd->rto_mask) {
2071                 if (this_cpu == cpu)
2072                         continue;
2073
2074                 src_rq = cpu_rq(cpu);
2075
2076                 /*
2077                  * Don't bother taking the src_rq->lock if the next highest
2078                  * task is known to be lower-priority than our current task.
2079                  * This may look racy, but if this value is about to go
2080                  * logically higher, the src_rq will push this task away.
2081                  * And if its going logically lower, we do not care
2082                  */
2083                 if (src_rq->rt.highest_prio.next >=
2084                     this_rq->rt.highest_prio.curr)
2085                         continue;
2086
2087                 /*
2088                  * We can potentially drop this_rq's lock in
2089                  * double_lock_balance, and another CPU could
2090                  * alter this_rq
2091                  */
2092                 double_lock_balance(this_rq, src_rq);
2093
2094                 /*
2095                  * We can pull only a task, which is pushable
2096                  * on its rq, and no others.
2097                  */
2098                 p = pick_highest_pushable_task(src_rq, this_cpu);
2099
2100                 /*
2101                  * Do we have an RT task that preempts
2102                  * the to-be-scheduled task?
2103                  */
2104                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2105                         WARN_ON(p == src_rq->curr);
2106                         WARN_ON(!task_on_rq_queued(p));
2107
2108                         /*
2109                          * There's a chance that p is higher in priority
2110                          * than what's currently running on its cpu.
2111                          * This is just that p is wakeing up and hasn't
2112                          * had a chance to schedule. We only pull
2113                          * p if it is lower in priority than the
2114                          * current task on the run queue
2115                          */
2116                         if (p->prio < src_rq->curr->prio)
2117                                 goto skip;
2118
2119                         resched = true;
2120
2121                         deactivate_task(src_rq, p, 0);
2122                         set_task_cpu(p, this_cpu);
2123                         activate_task(this_rq, p, 0);
2124                         /*
2125                          * We continue with the search, just in
2126                          * case there's an even higher prio task
2127                          * in another runqueue. (low likelihood
2128                          * but possible)
2129                          */
2130                 }
2131 skip:
2132                 double_unlock_balance(this_rq, src_rq);
2133         }
2134
2135         if (resched)
2136                 resched_curr(this_rq);
2137 }
2138
2139 /*
2140  * If we are not running and we are not going to reschedule soon, we should
2141  * try to push tasks away now
2142  */
2143 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2144 {
2145         if (!task_running(rq, p) &&
2146             !test_tsk_need_resched(rq->curr) &&
2147             p->nr_cpus_allowed > 1 &&
2148             (dl_task(rq->curr) || rt_task(rq->curr)) &&
2149             (rq->curr->nr_cpus_allowed < 2 ||
2150              rq->curr->prio <= p->prio))
2151                 push_rt_tasks(rq);
2152 }
2153
2154 /* Assumes rq->lock is held */
2155 static void rq_online_rt(struct rq *rq)
2156 {
2157         if (rq->rt.overloaded)
2158                 rt_set_overload(rq);
2159
2160         __enable_runtime(rq);
2161
2162         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2163 }
2164
2165 /* Assumes rq->lock is held */
2166 static void rq_offline_rt(struct rq *rq)
2167 {
2168         if (rq->rt.overloaded)
2169                 rt_clear_overload(rq);
2170
2171         __disable_runtime(rq);
2172
2173         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2174 }
2175
2176 /*
2177  * When switch from the rt queue, we bring ourselves to a position
2178  * that we might want to pull RT tasks from other runqueues.
2179  */
2180 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2181 {
2182         /*
2183          * If there are other RT tasks then we will reschedule
2184          * and the scheduling of the other RT tasks will handle
2185          * the balancing. But if we are the last RT task
2186          * we may need to handle the pulling of RT tasks
2187          * now.
2188          */
2189         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2190                 return;
2191
2192         queue_pull_task(rq);
2193 }
2194
2195 void __init init_sched_rt_class(void)
2196 {
2197         unsigned int i;
2198
2199         for_each_possible_cpu(i) {
2200                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2201                                         GFP_KERNEL, cpu_to_node(i));
2202         }
2203 }
2204 #endif /* CONFIG_SMP */
2205
2206 /*
2207  * When switching a task to RT, we may overload the runqueue
2208  * with RT tasks. In this case we try to push them off to
2209  * other runqueues.
2210  */
2211 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2212 {
2213         /*
2214          * If we are already running, then there's nothing
2215          * that needs to be done. But if we are not running
2216          * we may need to preempt the current running task.
2217          * If that current running task is also an RT task
2218          * then see if we can move to another run queue.
2219          */
2220         if (task_on_rq_queued(p) && rq->curr != p) {
2221 #ifdef CONFIG_SMP
2222                 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2223                         queue_push_tasks(rq);
2224 #endif /* CONFIG_SMP */
2225                 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2226                         resched_curr(rq);
2227         }
2228 }
2229
2230 /*
2231  * Priority of the task has changed. This may cause
2232  * us to initiate a push or pull.
2233  */
2234 static void
2235 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2236 {
2237         if (!task_on_rq_queued(p))
2238                 return;
2239
2240         if (rq->curr == p) {
2241 #ifdef CONFIG_SMP
2242                 /*
2243                  * If our priority decreases while running, we
2244                  * may need to pull tasks to this runqueue.
2245                  */
2246                 if (oldprio < p->prio)
2247                         queue_pull_task(rq);
2248
2249                 /*
2250                  * If there's a higher priority task waiting to run
2251                  * then reschedule.
2252                  */
2253                 if (p->prio > rq->rt.highest_prio.curr)
2254                         resched_curr(rq);
2255 #else
2256                 /* For UP simply resched on drop of prio */
2257                 if (oldprio < p->prio)
2258                         resched_curr(rq);
2259 #endif /* CONFIG_SMP */
2260         } else {
2261                 /*
2262                  * This task is not running, but if it is
2263                  * greater than the current running task
2264                  * then reschedule.
2265                  */
2266                 if (p->prio < rq->curr->prio)
2267                         resched_curr(rq);
2268         }
2269 }
2270
2271 #ifdef CONFIG_POSIX_TIMERS
2272 static void watchdog(struct rq *rq, struct task_struct *p)
2273 {
2274         unsigned long soft, hard;
2275
2276         /* max may change after cur was read, this will be fixed next tick */
2277         soft = task_rlimit(p, RLIMIT_RTTIME);
2278         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2279
2280         if (soft != RLIM_INFINITY) {
2281                 unsigned long next;
2282
2283                 if (p->rt.watchdog_stamp != jiffies) {
2284                         p->rt.timeout++;
2285                         p->rt.watchdog_stamp = jiffies;
2286                 }
2287
2288                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2289                 if (p->rt.timeout > next)
2290                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2291         }
2292 }
2293 #else
2294 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2295 #endif
2296
2297 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2298 {
2299         struct sched_rt_entity *rt_se = &p->rt;
2300
2301         update_curr_rt(rq);
2302
2303         watchdog(rq, p);
2304
2305         /*
2306          * RR tasks need a special form of timeslice management.
2307          * FIFO tasks have no timeslices.
2308          */
2309         if (p->policy != SCHED_RR)
2310                 return;
2311
2312         if (--p->rt.time_slice)
2313                 return;
2314
2315         p->rt.time_slice = sched_rr_timeslice;
2316
2317         /*
2318          * Requeue to the end of queue if we (and all of our ancestors) are not
2319          * the only element on the queue
2320          */
2321         for_each_sched_rt_entity(rt_se) {
2322                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2323                         requeue_task_rt(rq, p, 0);
2324                         resched_curr(rq);
2325                         return;
2326                 }
2327         }
2328 }
2329
2330 static void set_curr_task_rt(struct rq *rq)
2331 {
2332         struct task_struct *p = rq->curr;
2333
2334         p->se.exec_start = rq_clock_task(rq);
2335
2336         /* The running task is never eligible for pushing */
2337         dequeue_pushable_task(rq, p);
2338 }
2339
2340 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2341 {
2342         /*
2343          * Time slice is 0 for SCHED_FIFO tasks
2344          */
2345         if (task->policy == SCHED_RR)
2346                 return sched_rr_timeslice;
2347         else
2348                 return 0;
2349 }
2350
2351 const struct sched_class rt_sched_class = {
2352         .next                   = &fair_sched_class,
2353         .enqueue_task           = enqueue_task_rt,
2354         .dequeue_task           = dequeue_task_rt,
2355         .yield_task             = yield_task_rt,
2356
2357         .check_preempt_curr     = check_preempt_curr_rt,
2358
2359         .pick_next_task         = pick_next_task_rt,
2360         .put_prev_task          = put_prev_task_rt,
2361
2362 #ifdef CONFIG_SMP
2363         .select_task_rq         = select_task_rq_rt,
2364
2365         .set_cpus_allowed       = set_cpus_allowed_common,
2366         .rq_online              = rq_online_rt,
2367         .rq_offline             = rq_offline_rt,
2368         .task_woken             = task_woken_rt,
2369         .switched_from          = switched_from_rt,
2370 #endif
2371
2372         .set_curr_task          = set_curr_task_rt,
2373         .task_tick              = task_tick_rt,
2374
2375         .get_rr_interval        = get_rr_interval_rt,
2376
2377         .prio_changed           = prio_changed_rt,
2378         .switched_to            = switched_to_rt,
2379
2380         .update_curr            = update_curr_rt,
2381 };
2382
2383 #ifdef CONFIG_RT_GROUP_SCHED
2384 /*
2385  * Ensure that the real time constraints are schedulable.
2386  */
2387 static DEFINE_MUTEX(rt_constraints_mutex);
2388
2389 /* Must be called with tasklist_lock held */
2390 static inline int tg_has_rt_tasks(struct task_group *tg)
2391 {
2392         struct task_struct *g, *p;
2393
2394         /*
2395          * Autogroups do not have RT tasks; see autogroup_create().
2396          */
2397         if (task_group_is_autogroup(tg))
2398                 return 0;
2399
2400         for_each_process_thread(g, p) {
2401                 if (rt_task(p) && task_group(p) == tg)
2402                         return 1;
2403         }
2404
2405         return 0;
2406 }
2407
2408 struct rt_schedulable_data {
2409         struct task_group *tg;
2410         u64 rt_period;
2411         u64 rt_runtime;
2412 };
2413
2414 static int tg_rt_schedulable(struct task_group *tg, void *data)
2415 {
2416         struct rt_schedulable_data *d = data;
2417         struct task_group *child;
2418         unsigned long total, sum = 0;
2419         u64 period, runtime;
2420
2421         period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2422         runtime = tg->rt_bandwidth.rt_runtime;
2423
2424         if (tg == d->tg) {
2425                 period = d->rt_period;
2426                 runtime = d->rt_runtime;
2427         }
2428
2429         /*
2430          * Cannot have more runtime than the period.
2431          */
2432         if (runtime > period && runtime != RUNTIME_INF)
2433                 return -EINVAL;
2434
2435         /*
2436          * Ensure we don't starve existing RT tasks.
2437          */
2438         if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
2439                 return -EBUSY;
2440
2441         total = to_ratio(period, runtime);
2442
2443         /*
2444          * Nobody can have more than the global setting allows.
2445          */
2446         if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2447                 return -EINVAL;
2448
2449         /*
2450          * The sum of our children's runtime should not exceed our own.
2451          */
2452         list_for_each_entry_rcu(child, &tg->children, siblings) {
2453                 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2454                 runtime = child->rt_bandwidth.rt_runtime;
2455
2456                 if (child == d->tg) {
2457                         period = d->rt_period;
2458                         runtime = d->rt_runtime;
2459                 }
2460
2461                 sum += to_ratio(period, runtime);
2462         }
2463
2464         if (sum > total)
2465                 return -EINVAL;
2466
2467         return 0;
2468 }
2469
2470 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2471 {
2472         int ret;
2473
2474         struct rt_schedulable_data data = {
2475                 .tg = tg,
2476                 .rt_period = period,
2477                 .rt_runtime = runtime,
2478         };
2479
2480         rcu_read_lock();
2481         ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2482         rcu_read_unlock();
2483
2484         return ret;
2485 }
2486
2487 static int tg_set_rt_bandwidth(struct task_group *tg,
2488                 u64 rt_period, u64 rt_runtime)
2489 {
2490         int i, err = 0;
2491
2492         /*
2493          * Disallowing the root group RT runtime is BAD, it would disallow the
2494          * kernel creating (and or operating) RT threads.
2495          */
2496         if (tg == &root_task_group && rt_runtime == 0)
2497                 return -EINVAL;
2498
2499         /* No period doesn't make any sense. */
2500         if (rt_period == 0)
2501                 return -EINVAL;
2502
2503         mutex_lock(&rt_constraints_mutex);
2504         read_lock(&tasklist_lock);
2505         err = __rt_schedulable(tg, rt_period, rt_runtime);
2506         if (err)
2507                 goto unlock;
2508
2509         raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2510         tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2511         tg->rt_bandwidth.rt_runtime = rt_runtime;
2512
2513         for_each_possible_cpu(i) {
2514                 struct rt_rq *rt_rq = tg->rt_rq[i];
2515
2516                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2517                 rt_rq->rt_runtime = rt_runtime;
2518                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2519         }
2520         raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2521 unlock:
2522         read_unlock(&tasklist_lock);
2523         mutex_unlock(&rt_constraints_mutex);
2524
2525         return err;
2526 }
2527
2528 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2529 {
2530         u64 rt_runtime, rt_period;
2531
2532         rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2533         rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2534         if (rt_runtime_us < 0)
2535                 rt_runtime = RUNTIME_INF;
2536         else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2537                 return -EINVAL;
2538
2539         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2540 }
2541
2542 long sched_group_rt_runtime(struct task_group *tg)
2543 {
2544         u64 rt_runtime_us;
2545
2546         if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2547                 return -1;
2548
2549         rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2550         do_div(rt_runtime_us, NSEC_PER_USEC);
2551         return rt_runtime_us;
2552 }
2553
2554 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2555 {
2556         u64 rt_runtime, rt_period;
2557
2558         if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2559                 return -EINVAL;
2560
2561         rt_period = rt_period_us * NSEC_PER_USEC;
2562         rt_runtime = tg->rt_bandwidth.rt_runtime;
2563
2564         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2565 }
2566
2567 long sched_group_rt_period(struct task_group *tg)
2568 {
2569         u64 rt_period_us;
2570
2571         rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2572         do_div(rt_period_us, NSEC_PER_USEC);
2573         return rt_period_us;
2574 }
2575
2576 static int sched_rt_global_constraints(void)
2577 {
2578         int ret = 0;
2579
2580         mutex_lock(&rt_constraints_mutex);
2581         read_lock(&tasklist_lock);
2582         ret = __rt_schedulable(NULL, 0, 0);
2583         read_unlock(&tasklist_lock);
2584         mutex_unlock(&rt_constraints_mutex);
2585
2586         return ret;
2587 }
2588
2589 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2590 {
2591         /* Don't accept realtime tasks when there is no way for them to run */
2592         if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2593                 return 0;
2594
2595         return 1;
2596 }
2597
2598 #else /* !CONFIG_RT_GROUP_SCHED */
2599 static int sched_rt_global_constraints(void)
2600 {
2601         unsigned long flags;
2602         int i;
2603
2604         raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2605         for_each_possible_cpu(i) {
2606                 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2607
2608                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2609                 rt_rq->rt_runtime = global_rt_runtime();
2610                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2611         }
2612         raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2613
2614         return 0;
2615 }
2616 #endif /* CONFIG_RT_GROUP_SCHED */
2617
2618 static int sched_rt_global_validate(void)
2619 {
2620         if (sysctl_sched_rt_period <= 0)
2621                 return -EINVAL;
2622
2623         if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2624                 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
2625                 return -EINVAL;
2626
2627         return 0;
2628 }
2629
2630 static void sched_rt_do_global(void)
2631 {
2632         def_rt_bandwidth.rt_runtime = global_rt_runtime();
2633         def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2634 }
2635
2636 int sched_rt_handler(struct ctl_table *table, int write,
2637                 void __user *buffer, size_t *lenp,
2638                 loff_t *ppos)
2639 {
2640         int old_period, old_runtime;
2641         static DEFINE_MUTEX(mutex);
2642         int ret;
2643
2644         mutex_lock(&mutex);
2645         old_period = sysctl_sched_rt_period;
2646         old_runtime = sysctl_sched_rt_runtime;
2647
2648         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2649
2650         if (!ret && write) {
2651                 ret = sched_rt_global_validate();
2652                 if (ret)
2653                         goto undo;
2654
2655                 ret = sched_dl_global_validate();
2656                 if (ret)
2657                         goto undo;
2658
2659                 ret = sched_rt_global_constraints();
2660                 if (ret)
2661                         goto undo;
2662
2663                 sched_rt_do_global();
2664                 sched_dl_do_global();
2665         }
2666         if (0) {
2667 undo:
2668                 sysctl_sched_rt_period = old_period;
2669                 sysctl_sched_rt_runtime = old_runtime;
2670         }
2671         mutex_unlock(&mutex);
2672
2673         return ret;
2674 }
2675
2676 int sched_rr_handler(struct ctl_table *table, int write,
2677                 void __user *buffer, size_t *lenp,
2678                 loff_t *ppos)
2679 {
2680         int ret;
2681         static DEFINE_MUTEX(mutex);
2682
2683         mutex_lock(&mutex);
2684         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2685         /*
2686          * Make sure that internally we keep jiffies.
2687          * Also, writing zero resets the timeslice to default:
2688          */
2689         if (!ret && write) {
2690                 sched_rr_timeslice =
2691                         sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2692                         msecs_to_jiffies(sysctl_sched_rr_timeslice);
2693         }
2694         mutex_unlock(&mutex);
2695         return ret;
2696 }
2697
2698 #ifdef CONFIG_SCHED_DEBUG
2699 void print_rt_stats(struct seq_file *m, int cpu)
2700 {
2701         rt_rq_iter_t iter;
2702         struct rt_rq *rt_rq;
2703
2704         rcu_read_lock();
2705         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2706                 print_rt_rq(m, cpu, rt_rq);
2707         rcu_read_unlock();
2708 }
2709 #endif /* CONFIG_SCHED_DEBUG */