GNU Linux-libre 4.9.317-gnu1
[releases.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
13
14 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
15
16 struct rt_bandwidth def_rt_bandwidth;
17
18 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
19 {
20         struct rt_bandwidth *rt_b =
21                 container_of(timer, struct rt_bandwidth, rt_period_timer);
22         int idle = 0;
23         int overrun;
24
25         raw_spin_lock(&rt_b->rt_runtime_lock);
26         for (;;) {
27                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
28                 if (!overrun)
29                         break;
30
31                 raw_spin_unlock(&rt_b->rt_runtime_lock);
32                 idle = do_sched_rt_period_timer(rt_b, overrun);
33                 raw_spin_lock(&rt_b->rt_runtime_lock);
34         }
35         if (idle)
36                 rt_b->rt_period_active = 0;
37         raw_spin_unlock(&rt_b->rt_runtime_lock);
38
39         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
40 }
41
42 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
43 {
44         rt_b->rt_period = ns_to_ktime(period);
45         rt_b->rt_runtime = runtime;
46
47         raw_spin_lock_init(&rt_b->rt_runtime_lock);
48
49         hrtimer_init(&rt_b->rt_period_timer,
50                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
51         rt_b->rt_period_timer.function = sched_rt_period_timer;
52 }
53
54 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
55 {
56         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
57                 return;
58
59         raw_spin_lock(&rt_b->rt_runtime_lock);
60         if (!rt_b->rt_period_active) {
61                 rt_b->rt_period_active = 1;
62                 /*
63                  * SCHED_DEADLINE updates the bandwidth, as a run away
64                  * RT task with a DL task could hog a CPU. But DL does
65                  * not reset the period. If a deadline task was running
66                  * without an RT task running, it can cause RT tasks to
67                  * throttle when they start up. Kick the timer right away
68                  * to update the period.
69                  */
70                 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
71                 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
72         }
73         raw_spin_unlock(&rt_b->rt_runtime_lock);
74 }
75
76 void init_rt_rq(struct rt_rq *rt_rq)
77 {
78         struct rt_prio_array *array;
79         int i;
80
81         array = &rt_rq->active;
82         for (i = 0; i < MAX_RT_PRIO; i++) {
83                 INIT_LIST_HEAD(array->queue + i);
84                 __clear_bit(i, array->bitmap);
85         }
86         /* delimiter for bitsearch: */
87         __set_bit(MAX_RT_PRIO, array->bitmap);
88
89 #if defined CONFIG_SMP
90         rt_rq->highest_prio.curr = MAX_RT_PRIO;
91         rt_rq->highest_prio.next = MAX_RT_PRIO;
92         rt_rq->rt_nr_migratory = 0;
93         rt_rq->overloaded = 0;
94         plist_head_init(&rt_rq->pushable_tasks);
95 #endif /* CONFIG_SMP */
96         /* We start is dequeued state, because no RT tasks are queued */
97         rt_rq->rt_queued = 0;
98
99         rt_rq->rt_time = 0;
100         rt_rq->rt_throttled = 0;
101         rt_rq->rt_runtime = 0;
102         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
103 }
104
105 #ifdef CONFIG_RT_GROUP_SCHED
106 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
107 {
108         hrtimer_cancel(&rt_b->rt_period_timer);
109 }
110
111 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
112
113 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
114 {
115 #ifdef CONFIG_SCHED_DEBUG
116         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
117 #endif
118         return container_of(rt_se, struct task_struct, rt);
119 }
120
121 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
122 {
123         return rt_rq->rq;
124 }
125
126 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
127 {
128         return rt_se->rt_rq;
129 }
130
131 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
132 {
133         struct rt_rq *rt_rq = rt_se->rt_rq;
134
135         return rt_rq->rq;
136 }
137
138 void free_rt_sched_group(struct task_group *tg)
139 {
140         int i;
141
142         if (tg->rt_se)
143                 destroy_rt_bandwidth(&tg->rt_bandwidth);
144
145         for_each_possible_cpu(i) {
146                 if (tg->rt_rq)
147                         kfree(tg->rt_rq[i]);
148                 if (tg->rt_se)
149                         kfree(tg->rt_se[i]);
150         }
151
152         kfree(tg->rt_rq);
153         kfree(tg->rt_se);
154 }
155
156 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
157                 struct sched_rt_entity *rt_se, int cpu,
158                 struct sched_rt_entity *parent)
159 {
160         struct rq *rq = cpu_rq(cpu);
161
162         rt_rq->highest_prio.curr = MAX_RT_PRIO;
163         rt_rq->rt_nr_boosted = 0;
164         rt_rq->rq = rq;
165         rt_rq->tg = tg;
166
167         tg->rt_rq[cpu] = rt_rq;
168         tg->rt_se[cpu] = rt_se;
169
170         if (!rt_se)
171                 return;
172
173         if (!parent)
174                 rt_se->rt_rq = &rq->rt;
175         else
176                 rt_se->rt_rq = parent->my_q;
177
178         rt_se->my_q = rt_rq;
179         rt_se->parent = parent;
180         INIT_LIST_HEAD(&rt_se->run_list);
181 }
182
183 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
184 {
185         struct rt_rq *rt_rq;
186         struct sched_rt_entity *rt_se;
187         int i;
188
189         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
190         if (!tg->rt_rq)
191                 goto err;
192         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
193         if (!tg->rt_se)
194                 goto err;
195
196         init_rt_bandwidth(&tg->rt_bandwidth,
197                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
198
199         for_each_possible_cpu(i) {
200                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
201                                      GFP_KERNEL, cpu_to_node(i));
202                 if (!rt_rq)
203                         goto err;
204
205                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
206                                      GFP_KERNEL, cpu_to_node(i));
207                 if (!rt_se)
208                         goto err_free_rq;
209
210                 init_rt_rq(rt_rq);
211                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
212                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
213         }
214
215         return 1;
216
217 err_free_rq:
218         kfree(rt_rq);
219 err:
220         return 0;
221 }
222
223 #else /* CONFIG_RT_GROUP_SCHED */
224
225 #define rt_entity_is_task(rt_se) (1)
226
227 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
228 {
229         return container_of(rt_se, struct task_struct, rt);
230 }
231
232 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
233 {
234         return container_of(rt_rq, struct rq, rt);
235 }
236
237 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
238 {
239         struct task_struct *p = rt_task_of(rt_se);
240
241         return task_rq(p);
242 }
243
244 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
245 {
246         struct rq *rq = rq_of_rt_se(rt_se);
247
248         return &rq->rt;
249 }
250
251 void free_rt_sched_group(struct task_group *tg) { }
252
253 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
254 {
255         return 1;
256 }
257 #endif /* CONFIG_RT_GROUP_SCHED */
258
259 #ifdef CONFIG_SMP
260
261 static void pull_rt_task(struct rq *this_rq);
262
263 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
264 {
265         /* Try to pull RT tasks here if we lower this rq's prio */
266         return rq->rt.highest_prio.curr > prev->prio;
267 }
268
269 static inline int rt_overloaded(struct rq *rq)
270 {
271         return atomic_read(&rq->rd->rto_count);
272 }
273
274 static inline void rt_set_overload(struct rq *rq)
275 {
276         if (!rq->online)
277                 return;
278
279         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
280         /*
281          * Make sure the mask is visible before we set
282          * the overload count. That is checked to determine
283          * if we should look at the mask. It would be a shame
284          * if we looked at the mask, but the mask was not
285          * updated yet.
286          *
287          * Matched by the barrier in pull_rt_task().
288          */
289         smp_wmb();
290         atomic_inc(&rq->rd->rto_count);
291 }
292
293 static inline void rt_clear_overload(struct rq *rq)
294 {
295         if (!rq->online)
296                 return;
297
298         /* the order here really doesn't matter */
299         atomic_dec(&rq->rd->rto_count);
300         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
301 }
302
303 static void update_rt_migration(struct rt_rq *rt_rq)
304 {
305         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
306                 if (!rt_rq->overloaded) {
307                         rt_set_overload(rq_of_rt_rq(rt_rq));
308                         rt_rq->overloaded = 1;
309                 }
310         } else if (rt_rq->overloaded) {
311                 rt_clear_overload(rq_of_rt_rq(rt_rq));
312                 rt_rq->overloaded = 0;
313         }
314 }
315
316 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
317 {
318         struct task_struct *p;
319
320         if (!rt_entity_is_task(rt_se))
321                 return;
322
323         p = rt_task_of(rt_se);
324         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
325
326         rt_rq->rt_nr_total++;
327         if (tsk_nr_cpus_allowed(p) > 1)
328                 rt_rq->rt_nr_migratory++;
329
330         update_rt_migration(rt_rq);
331 }
332
333 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
334 {
335         struct task_struct *p;
336
337         if (!rt_entity_is_task(rt_se))
338                 return;
339
340         p = rt_task_of(rt_se);
341         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
342
343         rt_rq->rt_nr_total--;
344         if (tsk_nr_cpus_allowed(p) > 1)
345                 rt_rq->rt_nr_migratory--;
346
347         update_rt_migration(rt_rq);
348 }
349
350 static inline int has_pushable_tasks(struct rq *rq)
351 {
352         return !plist_head_empty(&rq->rt.pushable_tasks);
353 }
354
355 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
356 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
357
358 static void push_rt_tasks(struct rq *);
359 static void pull_rt_task(struct rq *);
360
361 static inline void queue_push_tasks(struct rq *rq)
362 {
363         if (!has_pushable_tasks(rq))
364                 return;
365
366         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
367 }
368
369 static inline void queue_pull_task(struct rq *rq)
370 {
371         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
372 }
373
374 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
375 {
376         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
377         plist_node_init(&p->pushable_tasks, p->prio);
378         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
379
380         /* Update the highest prio pushable task */
381         if (p->prio < rq->rt.highest_prio.next)
382                 rq->rt.highest_prio.next = p->prio;
383 }
384
385 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
386 {
387         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
388
389         /* Update the new highest prio pushable task */
390         if (has_pushable_tasks(rq)) {
391                 p = plist_first_entry(&rq->rt.pushable_tasks,
392                                       struct task_struct, pushable_tasks);
393                 rq->rt.highest_prio.next = p->prio;
394         } else
395                 rq->rt.highest_prio.next = MAX_RT_PRIO;
396 }
397
398 #else
399
400 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
401 {
402 }
403
404 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
405 {
406 }
407
408 static inline
409 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
410 {
411 }
412
413 static inline
414 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
415 {
416 }
417
418 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
419 {
420         return false;
421 }
422
423 static inline void pull_rt_task(struct rq *this_rq)
424 {
425 }
426
427 static inline void queue_push_tasks(struct rq *rq)
428 {
429 }
430 #endif /* CONFIG_SMP */
431
432 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
433 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
434
435 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
436 {
437         return rt_se->on_rq;
438 }
439
440 #ifdef CONFIG_RT_GROUP_SCHED
441
442 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
443 {
444         if (!rt_rq->tg)
445                 return RUNTIME_INF;
446
447         return rt_rq->rt_runtime;
448 }
449
450 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
451 {
452         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
453 }
454
455 typedef struct task_group *rt_rq_iter_t;
456
457 static inline struct task_group *next_task_group(struct task_group *tg)
458 {
459         do {
460                 tg = list_entry_rcu(tg->list.next,
461                         typeof(struct task_group), list);
462         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
463
464         if (&tg->list == &task_groups)
465                 tg = NULL;
466
467         return tg;
468 }
469
470 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
471         for (iter = container_of(&task_groups, typeof(*iter), list);    \
472                 (iter = next_task_group(iter)) &&                       \
473                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
474
475 #define for_each_sched_rt_entity(rt_se) \
476         for (; rt_se; rt_se = rt_se->parent)
477
478 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
479 {
480         return rt_se->my_q;
481 }
482
483 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
484 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
485
486 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
487 {
488         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
489         struct rq *rq = rq_of_rt_rq(rt_rq);
490         struct sched_rt_entity *rt_se;
491
492         int cpu = cpu_of(rq);
493
494         rt_se = rt_rq->tg->rt_se[cpu];
495
496         if (rt_rq->rt_nr_running) {
497                 if (!rt_se)
498                         enqueue_top_rt_rq(rt_rq);
499                 else if (!on_rt_rq(rt_se))
500                         enqueue_rt_entity(rt_se, 0);
501
502                 if (rt_rq->highest_prio.curr < curr->prio)
503                         resched_curr(rq);
504         }
505 }
506
507 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
508 {
509         struct sched_rt_entity *rt_se;
510         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
511
512         rt_se = rt_rq->tg->rt_se[cpu];
513
514         if (!rt_se)
515                 dequeue_top_rt_rq(rt_rq);
516         else if (on_rt_rq(rt_se))
517                 dequeue_rt_entity(rt_se, 0);
518 }
519
520 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
521 {
522         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
523 }
524
525 static int rt_se_boosted(struct sched_rt_entity *rt_se)
526 {
527         struct rt_rq *rt_rq = group_rt_rq(rt_se);
528         struct task_struct *p;
529
530         if (rt_rq)
531                 return !!rt_rq->rt_nr_boosted;
532
533         p = rt_task_of(rt_se);
534         return p->prio != p->normal_prio;
535 }
536
537 #ifdef CONFIG_SMP
538 static inline const struct cpumask *sched_rt_period_mask(void)
539 {
540         return this_rq()->rd->span;
541 }
542 #else
543 static inline const struct cpumask *sched_rt_period_mask(void)
544 {
545         return cpu_online_mask;
546 }
547 #endif
548
549 static inline
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 {
552         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
553 }
554
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 {
557         return &rt_rq->tg->rt_bandwidth;
558 }
559
560 #else /* !CONFIG_RT_GROUP_SCHED */
561
562 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
563 {
564         return rt_rq->rt_runtime;
565 }
566
567 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
568 {
569         return ktime_to_ns(def_rt_bandwidth.rt_period);
570 }
571
572 typedef struct rt_rq *rt_rq_iter_t;
573
574 #define for_each_rt_rq(rt_rq, iter, rq) \
575         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
576
577 #define for_each_sched_rt_entity(rt_se) \
578         for (; rt_se; rt_se = NULL)
579
580 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
581 {
582         return NULL;
583 }
584
585 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
586 {
587         struct rq *rq = rq_of_rt_rq(rt_rq);
588
589         if (!rt_rq->rt_nr_running)
590                 return;
591
592         enqueue_top_rt_rq(rt_rq);
593         resched_curr(rq);
594 }
595
596 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
597 {
598         dequeue_top_rt_rq(rt_rq);
599 }
600
601 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
602 {
603         return rt_rq->rt_throttled;
604 }
605
606 static inline const struct cpumask *sched_rt_period_mask(void)
607 {
608         return cpu_online_mask;
609 }
610
611 static inline
612 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
613 {
614         return &cpu_rq(cpu)->rt;
615 }
616
617 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
618 {
619         return &def_rt_bandwidth;
620 }
621
622 #endif /* CONFIG_RT_GROUP_SCHED */
623
624 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
625 {
626         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
627
628         return (hrtimer_active(&rt_b->rt_period_timer) ||
629                 rt_rq->rt_time < rt_b->rt_runtime);
630 }
631
632 #ifdef CONFIG_SMP
633 /*
634  * We ran out of runtime, see if we can borrow some from our neighbours.
635  */
636 static void do_balance_runtime(struct rt_rq *rt_rq)
637 {
638         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
639         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
640         int i, weight;
641         u64 rt_period;
642
643         weight = cpumask_weight(rd->span);
644
645         raw_spin_lock(&rt_b->rt_runtime_lock);
646         rt_period = ktime_to_ns(rt_b->rt_period);
647         for_each_cpu(i, rd->span) {
648                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
649                 s64 diff;
650
651                 if (iter == rt_rq)
652                         continue;
653
654                 raw_spin_lock(&iter->rt_runtime_lock);
655                 /*
656                  * Either all rqs have inf runtime and there's nothing to steal
657                  * or __disable_runtime() below sets a specific rq to inf to
658                  * indicate its been disabled and disalow stealing.
659                  */
660                 if (iter->rt_runtime == RUNTIME_INF)
661                         goto next;
662
663                 /*
664                  * From runqueues with spare time, take 1/n part of their
665                  * spare time, but no more than our period.
666                  */
667                 diff = iter->rt_runtime - iter->rt_time;
668                 if (diff > 0) {
669                         diff = div_u64((u64)diff, weight);
670                         if (rt_rq->rt_runtime + diff > rt_period)
671                                 diff = rt_period - rt_rq->rt_runtime;
672                         iter->rt_runtime -= diff;
673                         rt_rq->rt_runtime += diff;
674                         if (rt_rq->rt_runtime == rt_period) {
675                                 raw_spin_unlock(&iter->rt_runtime_lock);
676                                 break;
677                         }
678                 }
679 next:
680                 raw_spin_unlock(&iter->rt_runtime_lock);
681         }
682         raw_spin_unlock(&rt_b->rt_runtime_lock);
683 }
684
685 /*
686  * Ensure this RQ takes back all the runtime it lend to its neighbours.
687  */
688 static void __disable_runtime(struct rq *rq)
689 {
690         struct root_domain *rd = rq->rd;
691         rt_rq_iter_t iter;
692         struct rt_rq *rt_rq;
693
694         if (unlikely(!scheduler_running))
695                 return;
696
697         for_each_rt_rq(rt_rq, iter, rq) {
698                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
699                 s64 want;
700                 int i;
701
702                 raw_spin_lock(&rt_b->rt_runtime_lock);
703                 raw_spin_lock(&rt_rq->rt_runtime_lock);
704                 /*
705                  * Either we're all inf and nobody needs to borrow, or we're
706                  * already disabled and thus have nothing to do, or we have
707                  * exactly the right amount of runtime to take out.
708                  */
709                 if (rt_rq->rt_runtime == RUNTIME_INF ||
710                                 rt_rq->rt_runtime == rt_b->rt_runtime)
711                         goto balanced;
712                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
713
714                 /*
715                  * Calculate the difference between what we started out with
716                  * and what we current have, that's the amount of runtime
717                  * we lend and now have to reclaim.
718                  */
719                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
720
721                 /*
722                  * Greedy reclaim, take back as much as we can.
723                  */
724                 for_each_cpu(i, rd->span) {
725                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
726                         s64 diff;
727
728                         /*
729                          * Can't reclaim from ourselves or disabled runqueues.
730                          */
731                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
732                                 continue;
733
734                         raw_spin_lock(&iter->rt_runtime_lock);
735                         if (want > 0) {
736                                 diff = min_t(s64, iter->rt_runtime, want);
737                                 iter->rt_runtime -= diff;
738                                 want -= diff;
739                         } else {
740                                 iter->rt_runtime -= want;
741                                 want -= want;
742                         }
743                         raw_spin_unlock(&iter->rt_runtime_lock);
744
745                         if (!want)
746                                 break;
747                 }
748
749                 raw_spin_lock(&rt_rq->rt_runtime_lock);
750                 /*
751                  * We cannot be left wanting - that would mean some runtime
752                  * leaked out of the system.
753                  */
754                 BUG_ON(want);
755 balanced:
756                 /*
757                  * Disable all the borrow logic by pretending we have inf
758                  * runtime - in which case borrowing doesn't make sense.
759                  */
760                 rt_rq->rt_runtime = RUNTIME_INF;
761                 rt_rq->rt_throttled = 0;
762                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
763                 raw_spin_unlock(&rt_b->rt_runtime_lock);
764
765                 /* Make rt_rq available for pick_next_task() */
766                 sched_rt_rq_enqueue(rt_rq);
767         }
768 }
769
770 static void __enable_runtime(struct rq *rq)
771 {
772         rt_rq_iter_t iter;
773         struct rt_rq *rt_rq;
774
775         if (unlikely(!scheduler_running))
776                 return;
777
778         /*
779          * Reset each runqueue's bandwidth settings
780          */
781         for_each_rt_rq(rt_rq, iter, rq) {
782                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
783
784                 raw_spin_lock(&rt_b->rt_runtime_lock);
785                 raw_spin_lock(&rt_rq->rt_runtime_lock);
786                 rt_rq->rt_runtime = rt_b->rt_runtime;
787                 rt_rq->rt_time = 0;
788                 rt_rq->rt_throttled = 0;
789                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
790                 raw_spin_unlock(&rt_b->rt_runtime_lock);
791         }
792 }
793
794 static void balance_runtime(struct rt_rq *rt_rq)
795 {
796         if (!sched_feat(RT_RUNTIME_SHARE))
797                 return;
798
799         if (rt_rq->rt_time > rt_rq->rt_runtime) {
800                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
801                 do_balance_runtime(rt_rq);
802                 raw_spin_lock(&rt_rq->rt_runtime_lock);
803         }
804 }
805 #else /* !CONFIG_SMP */
806 static inline void balance_runtime(struct rt_rq *rt_rq) {}
807 #endif /* CONFIG_SMP */
808
809 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
810 {
811         int i, idle = 1, throttled = 0;
812         const struct cpumask *span;
813
814         span = sched_rt_period_mask();
815 #ifdef CONFIG_RT_GROUP_SCHED
816         /*
817          * FIXME: isolated CPUs should really leave the root task group,
818          * whether they are isolcpus or were isolated via cpusets, lest
819          * the timer run on a CPU which does not service all runqueues,
820          * potentially leaving other CPUs indefinitely throttled.  If
821          * isolation is really required, the user will turn the throttle
822          * off to kill the perturbations it causes anyway.  Meanwhile,
823          * this maintains functionality for boot and/or troubleshooting.
824          */
825         if (rt_b == &root_task_group.rt_bandwidth)
826                 span = cpu_online_mask;
827 #endif
828         for_each_cpu(i, span) {
829                 int enqueue = 0;
830                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
831                 struct rq *rq = rq_of_rt_rq(rt_rq);
832
833                 raw_spin_lock(&rq->lock);
834                 update_rq_clock(rq);
835
836                 if (rt_rq->rt_time) {
837                         u64 runtime;
838
839                         raw_spin_lock(&rt_rq->rt_runtime_lock);
840                         if (rt_rq->rt_throttled)
841                                 balance_runtime(rt_rq);
842                         runtime = rt_rq->rt_runtime;
843                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
844                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
845                                 rt_rq->rt_throttled = 0;
846                                 enqueue = 1;
847
848                                 /*
849                                  * When we're idle and a woken (rt) task is
850                                  * throttled check_preempt_curr() will set
851                                  * skip_update and the time between the wakeup
852                                  * and this unthrottle will get accounted as
853                                  * 'runtime'.
854                                  */
855                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
856                                         rq_clock_skip_update(rq, false);
857                         }
858                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
859                                 idle = 0;
860                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
861                 } else if (rt_rq->rt_nr_running) {
862                         idle = 0;
863                         if (!rt_rq_throttled(rt_rq))
864                                 enqueue = 1;
865                 }
866                 if (rt_rq->rt_throttled)
867                         throttled = 1;
868
869                 if (enqueue)
870                         sched_rt_rq_enqueue(rt_rq);
871                 raw_spin_unlock(&rq->lock);
872         }
873
874         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
875                 return 1;
876
877         return idle;
878 }
879
880 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
881 {
882 #ifdef CONFIG_RT_GROUP_SCHED
883         struct rt_rq *rt_rq = group_rt_rq(rt_se);
884
885         if (rt_rq)
886                 return rt_rq->highest_prio.curr;
887 #endif
888
889         return rt_task_of(rt_se)->prio;
890 }
891
892 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
893 {
894         u64 runtime = sched_rt_runtime(rt_rq);
895
896         if (rt_rq->rt_throttled)
897                 return rt_rq_throttled(rt_rq);
898
899         if (runtime >= sched_rt_period(rt_rq))
900                 return 0;
901
902         balance_runtime(rt_rq);
903         runtime = sched_rt_runtime(rt_rq);
904         if (runtime == RUNTIME_INF)
905                 return 0;
906
907         if (rt_rq->rt_time > runtime) {
908                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
909
910                 /*
911                  * Don't actually throttle groups that have no runtime assigned
912                  * but accrue some time due to boosting.
913                  */
914                 if (likely(rt_b->rt_runtime)) {
915                         rt_rq->rt_throttled = 1;
916                         printk_deferred_once("sched: RT throttling activated\n");
917                 } else {
918                         /*
919                          * In case we did anyway, make it go away,
920                          * replenishment is a joke, since it will replenish us
921                          * with exactly 0 ns.
922                          */
923                         rt_rq->rt_time = 0;
924                 }
925
926                 if (rt_rq_throttled(rt_rq)) {
927                         sched_rt_rq_dequeue(rt_rq);
928                         return 1;
929                 }
930         }
931
932         return 0;
933 }
934
935 /*
936  * Update the current task's runtime statistics. Skip current tasks that
937  * are not in our scheduling class.
938  */
939 static void update_curr_rt(struct rq *rq)
940 {
941         struct task_struct *curr = rq->curr;
942         struct sched_rt_entity *rt_se = &curr->rt;
943         u64 delta_exec;
944
945         if (curr->sched_class != &rt_sched_class)
946                 return;
947
948         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
949         if (unlikely((s64)delta_exec <= 0))
950                 return;
951
952         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
953         cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
954
955         schedstat_set(curr->se.statistics.exec_max,
956                       max(curr->se.statistics.exec_max, delta_exec));
957
958         curr->se.sum_exec_runtime += delta_exec;
959         account_group_exec_runtime(curr, delta_exec);
960
961         curr->se.exec_start = rq_clock_task(rq);
962         cpuacct_charge(curr, delta_exec);
963
964         sched_rt_avg_update(rq, delta_exec);
965
966         if (!rt_bandwidth_enabled())
967                 return;
968
969         for_each_sched_rt_entity(rt_se) {
970                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
971
972                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
973                         raw_spin_lock(&rt_rq->rt_runtime_lock);
974                         rt_rq->rt_time += delta_exec;
975                         if (sched_rt_runtime_exceeded(rt_rq))
976                                 resched_curr(rq);
977                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
978                 }
979         }
980 }
981
982 static void
983 dequeue_top_rt_rq(struct rt_rq *rt_rq)
984 {
985         struct rq *rq = rq_of_rt_rq(rt_rq);
986
987         BUG_ON(&rq->rt != rt_rq);
988
989         if (!rt_rq->rt_queued)
990                 return;
991
992         BUG_ON(!rq->nr_running);
993
994         sub_nr_running(rq, rt_rq->rt_nr_running);
995         rt_rq->rt_queued = 0;
996 }
997
998 static void
999 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1000 {
1001         struct rq *rq = rq_of_rt_rq(rt_rq);
1002
1003         BUG_ON(&rq->rt != rt_rq);
1004
1005         if (rt_rq->rt_queued)
1006                 return;
1007         if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1008                 return;
1009
1010         add_nr_running(rq, rt_rq->rt_nr_running);
1011         rt_rq->rt_queued = 1;
1012 }
1013
1014 #if defined CONFIG_SMP
1015
1016 static void
1017 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1018 {
1019         struct rq *rq = rq_of_rt_rq(rt_rq);
1020
1021 #ifdef CONFIG_RT_GROUP_SCHED
1022         /*
1023          * Change rq's cpupri only if rt_rq is the top queue.
1024          */
1025         if (&rq->rt != rt_rq)
1026                 return;
1027 #endif
1028         if (rq->online && prio < prev_prio)
1029                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1030 }
1031
1032 static void
1033 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1034 {
1035         struct rq *rq = rq_of_rt_rq(rt_rq);
1036
1037 #ifdef CONFIG_RT_GROUP_SCHED
1038         /*
1039          * Change rq's cpupri only if rt_rq is the top queue.
1040          */
1041         if (&rq->rt != rt_rq)
1042                 return;
1043 #endif
1044         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1045                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1046 }
1047
1048 #else /* CONFIG_SMP */
1049
1050 static inline
1051 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1052 static inline
1053 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1054
1055 #endif /* CONFIG_SMP */
1056
1057 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1058 static void
1059 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1060 {
1061         int prev_prio = rt_rq->highest_prio.curr;
1062
1063         if (prio < prev_prio)
1064                 rt_rq->highest_prio.curr = prio;
1065
1066         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1067 }
1068
1069 static void
1070 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1071 {
1072         int prev_prio = rt_rq->highest_prio.curr;
1073
1074         if (rt_rq->rt_nr_running) {
1075
1076                 WARN_ON(prio < prev_prio);
1077
1078                 /*
1079                  * This may have been our highest task, and therefore
1080                  * we may have some recomputation to do
1081                  */
1082                 if (prio == prev_prio) {
1083                         struct rt_prio_array *array = &rt_rq->active;
1084
1085                         rt_rq->highest_prio.curr =
1086                                 sched_find_first_bit(array->bitmap);
1087                 }
1088
1089         } else
1090                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1091
1092         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1093 }
1094
1095 #else
1096
1097 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1098 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1099
1100 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1101
1102 #ifdef CONFIG_RT_GROUP_SCHED
1103
1104 static void
1105 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1106 {
1107         if (rt_se_boosted(rt_se))
1108                 rt_rq->rt_nr_boosted++;
1109
1110         if (rt_rq->tg)
1111                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1112 }
1113
1114 static void
1115 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1116 {
1117         if (rt_se_boosted(rt_se))
1118                 rt_rq->rt_nr_boosted--;
1119
1120         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1121 }
1122
1123 #else /* CONFIG_RT_GROUP_SCHED */
1124
1125 static void
1126 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1127 {
1128         start_rt_bandwidth(&def_rt_bandwidth);
1129 }
1130
1131 static inline
1132 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1133
1134 #endif /* CONFIG_RT_GROUP_SCHED */
1135
1136 static inline
1137 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1138 {
1139         struct rt_rq *group_rq = group_rt_rq(rt_se);
1140
1141         if (group_rq)
1142                 return group_rq->rt_nr_running;
1143         else
1144                 return 1;
1145 }
1146
1147 static inline
1148 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1149 {
1150         struct rt_rq *group_rq = group_rt_rq(rt_se);
1151         struct task_struct *tsk;
1152
1153         if (group_rq)
1154                 return group_rq->rr_nr_running;
1155
1156         tsk = rt_task_of(rt_se);
1157
1158         return (tsk->policy == SCHED_RR) ? 1 : 0;
1159 }
1160
1161 static inline
1162 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1163 {
1164         int prio = rt_se_prio(rt_se);
1165
1166         WARN_ON(!rt_prio(prio));
1167         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1168         rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1169
1170         inc_rt_prio(rt_rq, prio);
1171         inc_rt_migration(rt_se, rt_rq);
1172         inc_rt_group(rt_se, rt_rq);
1173 }
1174
1175 static inline
1176 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1177 {
1178         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1179         WARN_ON(!rt_rq->rt_nr_running);
1180         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1181         rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1182
1183         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1184         dec_rt_migration(rt_se, rt_rq);
1185         dec_rt_group(rt_se, rt_rq);
1186 }
1187
1188 /*
1189  * Change rt_se->run_list location unless SAVE && !MOVE
1190  *
1191  * assumes ENQUEUE/DEQUEUE flags match
1192  */
1193 static inline bool move_entity(unsigned int flags)
1194 {
1195         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1196                 return false;
1197
1198         return true;
1199 }
1200
1201 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1202 {
1203         list_del_init(&rt_se->run_list);
1204
1205         if (list_empty(array->queue + rt_se_prio(rt_se)))
1206                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1207
1208         rt_se->on_list = 0;
1209 }
1210
1211 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1212 {
1213         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1214         struct rt_prio_array *array = &rt_rq->active;
1215         struct rt_rq *group_rq = group_rt_rq(rt_se);
1216         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1217
1218         /*
1219          * Don't enqueue the group if its throttled, or when empty.
1220          * The latter is a consequence of the former when a child group
1221          * get throttled and the current group doesn't have any other
1222          * active members.
1223          */
1224         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1225                 if (rt_se->on_list)
1226                         __delist_rt_entity(rt_se, array);
1227                 return;
1228         }
1229
1230         if (move_entity(flags)) {
1231                 WARN_ON_ONCE(rt_se->on_list);
1232                 if (flags & ENQUEUE_HEAD)
1233                         list_add(&rt_se->run_list, queue);
1234                 else
1235                         list_add_tail(&rt_se->run_list, queue);
1236
1237                 __set_bit(rt_se_prio(rt_se), array->bitmap);
1238                 rt_se->on_list = 1;
1239         }
1240         rt_se->on_rq = 1;
1241
1242         inc_rt_tasks(rt_se, rt_rq);
1243 }
1244
1245 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1246 {
1247         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1248         struct rt_prio_array *array = &rt_rq->active;
1249
1250         if (move_entity(flags)) {
1251                 WARN_ON_ONCE(!rt_se->on_list);
1252                 __delist_rt_entity(rt_se, array);
1253         }
1254         rt_se->on_rq = 0;
1255
1256         dec_rt_tasks(rt_se, rt_rq);
1257 }
1258
1259 /*
1260  * Because the prio of an upper entry depends on the lower
1261  * entries, we must remove entries top - down.
1262  */
1263 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1264 {
1265         struct sched_rt_entity *back = NULL;
1266
1267         for_each_sched_rt_entity(rt_se) {
1268                 rt_se->back = back;
1269                 back = rt_se;
1270         }
1271
1272         dequeue_top_rt_rq(rt_rq_of_se(back));
1273
1274         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1275                 if (on_rt_rq(rt_se))
1276                         __dequeue_rt_entity(rt_se, flags);
1277         }
1278 }
1279
1280 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1281 {
1282         struct rq *rq = rq_of_rt_se(rt_se);
1283
1284         dequeue_rt_stack(rt_se, flags);
1285         for_each_sched_rt_entity(rt_se)
1286                 __enqueue_rt_entity(rt_se, flags);
1287         enqueue_top_rt_rq(&rq->rt);
1288 }
1289
1290 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1291 {
1292         struct rq *rq = rq_of_rt_se(rt_se);
1293
1294         dequeue_rt_stack(rt_se, flags);
1295
1296         for_each_sched_rt_entity(rt_se) {
1297                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1298
1299                 if (rt_rq && rt_rq->rt_nr_running)
1300                         __enqueue_rt_entity(rt_se, flags);
1301         }
1302         enqueue_top_rt_rq(&rq->rt);
1303 }
1304
1305 /*
1306  * Adding/removing a task to/from a priority array:
1307  */
1308 static void
1309 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1310 {
1311         struct sched_rt_entity *rt_se = &p->rt;
1312
1313         if (flags & ENQUEUE_WAKEUP)
1314                 rt_se->timeout = 0;
1315
1316         enqueue_rt_entity(rt_se, flags);
1317
1318         if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
1319                 enqueue_pushable_task(rq, p);
1320 }
1321
1322 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1323 {
1324         struct sched_rt_entity *rt_se = &p->rt;
1325
1326         update_curr_rt(rq);
1327         dequeue_rt_entity(rt_se, flags);
1328
1329         dequeue_pushable_task(rq, p);
1330 }
1331
1332 /*
1333  * Put task to the head or the end of the run list without the overhead of
1334  * dequeue followed by enqueue.
1335  */
1336 static void
1337 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1338 {
1339         if (on_rt_rq(rt_se)) {
1340                 struct rt_prio_array *array = &rt_rq->active;
1341                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1342
1343                 if (head)
1344                         list_move(&rt_se->run_list, queue);
1345                 else
1346                         list_move_tail(&rt_se->run_list, queue);
1347         }
1348 }
1349
1350 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1351 {
1352         struct sched_rt_entity *rt_se = &p->rt;
1353         struct rt_rq *rt_rq;
1354
1355         for_each_sched_rt_entity(rt_se) {
1356                 rt_rq = rt_rq_of_se(rt_se);
1357                 requeue_rt_entity(rt_rq, rt_se, head);
1358         }
1359 }
1360
1361 static void yield_task_rt(struct rq *rq)
1362 {
1363         requeue_task_rt(rq, rq->curr, 0);
1364 }
1365
1366 #ifdef CONFIG_SMP
1367 static int find_lowest_rq(struct task_struct *task);
1368
1369 static int
1370 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1371 {
1372         struct task_struct *curr;
1373         struct rq *rq;
1374
1375         /* For anything but wake ups, just return the task_cpu */
1376         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1377                 goto out;
1378
1379         rq = cpu_rq(cpu);
1380
1381         rcu_read_lock();
1382         curr = READ_ONCE(rq->curr); /* unlocked access */
1383
1384         /*
1385          * If the current task on @p's runqueue is an RT task, then
1386          * try to see if we can wake this RT task up on another
1387          * runqueue. Otherwise simply start this RT task
1388          * on its current runqueue.
1389          *
1390          * We want to avoid overloading runqueues. If the woken
1391          * task is a higher priority, then it will stay on this CPU
1392          * and the lower prio task should be moved to another CPU.
1393          * Even though this will probably make the lower prio task
1394          * lose its cache, we do not want to bounce a higher task
1395          * around just because it gave up its CPU, perhaps for a
1396          * lock?
1397          *
1398          * For equal prio tasks, we just let the scheduler sort it out.
1399          *
1400          * Otherwise, just let it ride on the affined RQ and the
1401          * post-schedule router will push the preempted task away
1402          *
1403          * This test is optimistic, if we get it wrong the load-balancer
1404          * will have to sort it out.
1405          */
1406         if (curr && unlikely(rt_task(curr)) &&
1407             (tsk_nr_cpus_allowed(curr) < 2 ||
1408              curr->prio <= p->prio)) {
1409                 int target = find_lowest_rq(p);
1410
1411                 /*
1412                  * Don't bother moving it if the destination CPU is
1413                  * not running a lower priority task.
1414                  */
1415                 if (target != -1 &&
1416                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1417                         cpu = target;
1418         }
1419         rcu_read_unlock();
1420
1421 out:
1422         return cpu;
1423 }
1424
1425 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1426 {
1427         /*
1428          * Current can't be migrated, useless to reschedule,
1429          * let's hope p can move out.
1430          */
1431         if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
1432             !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1433                 return;
1434
1435         /*
1436          * p is migratable, so let's not schedule it and
1437          * see if it is pushed or pulled somewhere else.
1438          */
1439         if (tsk_nr_cpus_allowed(p) != 1
1440             && cpupri_find(&rq->rd->cpupri, p, NULL))
1441                 return;
1442
1443         /*
1444          * There appears to be other cpus that can accept
1445          * current and none to run 'p', so lets reschedule
1446          * to try and push current away:
1447          */
1448         requeue_task_rt(rq, p, 1);
1449         resched_curr(rq);
1450 }
1451
1452 #endif /* CONFIG_SMP */
1453
1454 /*
1455  * Preempt the current task with a newly woken task if needed:
1456  */
1457 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1458 {
1459         if (p->prio < rq->curr->prio) {
1460                 resched_curr(rq);
1461                 return;
1462         }
1463
1464 #ifdef CONFIG_SMP
1465         /*
1466          * If:
1467          *
1468          * - the newly woken task is of equal priority to the current task
1469          * - the newly woken task is non-migratable while current is migratable
1470          * - current will be preempted on the next reschedule
1471          *
1472          * we should check to see if current can readily move to a different
1473          * cpu.  If so, we will reschedule to allow the push logic to try
1474          * to move current somewhere else, making room for our non-migratable
1475          * task.
1476          */
1477         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1478                 check_preempt_equal_prio(rq, p);
1479 #endif
1480 }
1481
1482 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1483                                                    struct rt_rq *rt_rq)
1484 {
1485         struct rt_prio_array *array = &rt_rq->active;
1486         struct sched_rt_entity *next = NULL;
1487         struct list_head *queue;
1488         int idx;
1489
1490         idx = sched_find_first_bit(array->bitmap);
1491         BUG_ON(idx >= MAX_RT_PRIO);
1492
1493         queue = array->queue + idx;
1494         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1495
1496         return next;
1497 }
1498
1499 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1500 {
1501         struct sched_rt_entity *rt_se;
1502         struct task_struct *p;
1503         struct rt_rq *rt_rq  = &rq->rt;
1504
1505         do {
1506                 rt_se = pick_next_rt_entity(rq, rt_rq);
1507                 BUG_ON(!rt_se);
1508                 rt_rq = group_rt_rq(rt_se);
1509         } while (rt_rq);
1510
1511         p = rt_task_of(rt_se);
1512         p->se.exec_start = rq_clock_task(rq);
1513
1514         return p;
1515 }
1516
1517 static struct task_struct *
1518 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1519 {
1520         struct task_struct *p;
1521         struct rt_rq *rt_rq = &rq->rt;
1522
1523         if (need_pull_rt_task(rq, prev)) {
1524                 /*
1525                  * This is OK, because current is on_cpu, which avoids it being
1526                  * picked for load-balance and preemption/IRQs are still
1527                  * disabled avoiding further scheduler activity on it and we're
1528                  * being very careful to re-start the picking loop.
1529                  */
1530                 lockdep_unpin_lock(&rq->lock, cookie);
1531                 pull_rt_task(rq);
1532                 lockdep_repin_lock(&rq->lock, cookie);
1533                 /*
1534                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1535                  * means a dl or stop task can slip in, in which case we need
1536                  * to re-start task selection.
1537                  */
1538                 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1539                              rq->dl.dl_nr_running))
1540                         return RETRY_TASK;
1541         }
1542
1543         /*
1544          * We may dequeue prev's rt_rq in put_prev_task().
1545          * So, we update time before rt_nr_running check.
1546          */
1547         if (prev->sched_class == &rt_sched_class)
1548                 update_curr_rt(rq);
1549
1550         if (!rt_rq->rt_queued)
1551                 return NULL;
1552
1553         put_prev_task(rq, prev);
1554
1555         p = _pick_next_task_rt(rq);
1556
1557         /* The running task is never eligible for pushing */
1558         dequeue_pushable_task(rq, p);
1559
1560         queue_push_tasks(rq);
1561
1562         return p;
1563 }
1564
1565 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1566 {
1567         update_curr_rt(rq);
1568
1569         /*
1570          * The previous task needs to be made eligible for pushing
1571          * if it is still active
1572          */
1573         if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
1574                 enqueue_pushable_task(rq, p);
1575 }
1576
1577 #ifdef CONFIG_SMP
1578
1579 /* Only try algorithms three times */
1580 #define RT_MAX_TRIES 3
1581
1582 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1583 {
1584         if (!task_running(rq, p) &&
1585             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1586                 return 1;
1587         return 0;
1588 }
1589
1590 /*
1591  * Return the highest pushable rq's task, which is suitable to be executed
1592  * on the cpu, NULL otherwise
1593  */
1594 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1595 {
1596         struct plist_head *head = &rq->rt.pushable_tasks;
1597         struct task_struct *p;
1598
1599         if (!has_pushable_tasks(rq))
1600                 return NULL;
1601
1602         plist_for_each_entry(p, head, pushable_tasks) {
1603                 if (pick_rt_task(rq, p, cpu))
1604                         return p;
1605         }
1606
1607         return NULL;
1608 }
1609
1610 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1611
1612 static int find_lowest_rq(struct task_struct *task)
1613 {
1614         struct sched_domain *sd;
1615         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1616         int this_cpu = smp_processor_id();
1617         int cpu      = task_cpu(task);
1618
1619         /* Make sure the mask is initialized first */
1620         if (unlikely(!lowest_mask))
1621                 return -1;
1622
1623         if (tsk_nr_cpus_allowed(task) == 1)
1624                 return -1; /* No other targets possible */
1625
1626         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1627                 return -1; /* No targets found */
1628
1629         /*
1630          * At this point we have built a mask of cpus representing the
1631          * lowest priority tasks in the system.  Now we want to elect
1632          * the best one based on our affinity and topology.
1633          *
1634          * We prioritize the last cpu that the task executed on since
1635          * it is most likely cache-hot in that location.
1636          */
1637         if (cpumask_test_cpu(cpu, lowest_mask))
1638                 return cpu;
1639
1640         /*
1641          * Otherwise, we consult the sched_domains span maps to figure
1642          * out which cpu is logically closest to our hot cache data.
1643          */
1644         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1645                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1646
1647         rcu_read_lock();
1648         for_each_domain(cpu, sd) {
1649                 if (sd->flags & SD_WAKE_AFFINE) {
1650                         int best_cpu;
1651
1652                         /*
1653                          * "this_cpu" is cheaper to preempt than a
1654                          * remote processor.
1655                          */
1656                         if (this_cpu != -1 &&
1657                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1658                                 rcu_read_unlock();
1659                                 return this_cpu;
1660                         }
1661
1662                         best_cpu = cpumask_first_and(lowest_mask,
1663                                                      sched_domain_span(sd));
1664                         if (best_cpu < nr_cpu_ids) {
1665                                 rcu_read_unlock();
1666                                 return best_cpu;
1667                         }
1668                 }
1669         }
1670         rcu_read_unlock();
1671
1672         /*
1673          * And finally, if there were no matches within the domains
1674          * just give the caller *something* to work with from the compatible
1675          * locations.
1676          */
1677         if (this_cpu != -1)
1678                 return this_cpu;
1679
1680         cpu = cpumask_any(lowest_mask);
1681         if (cpu < nr_cpu_ids)
1682                 return cpu;
1683         return -1;
1684 }
1685
1686 /* Will lock the rq it finds */
1687 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1688 {
1689         struct rq *lowest_rq = NULL;
1690         int tries;
1691         int cpu;
1692
1693         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1694                 cpu = find_lowest_rq(task);
1695
1696                 if ((cpu == -1) || (cpu == rq->cpu))
1697                         break;
1698
1699                 lowest_rq = cpu_rq(cpu);
1700
1701                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1702                         /*
1703                          * Target rq has tasks of equal or higher priority,
1704                          * retrying does not release any lock and is unlikely
1705                          * to yield a different result.
1706                          */
1707                         lowest_rq = NULL;
1708                         break;
1709                 }
1710
1711                 /* if the prio of this runqueue changed, try again */
1712                 if (double_lock_balance(rq, lowest_rq)) {
1713                         /*
1714                          * We had to unlock the run queue. In
1715                          * the mean time, task could have
1716                          * migrated already or had its affinity changed.
1717                          * Also make sure that it wasn't scheduled on its rq.
1718                          */
1719                         if (unlikely(task_rq(task) != rq ||
1720                                      !cpumask_test_cpu(lowest_rq->cpu,
1721                                                        tsk_cpus_allowed(task)) ||
1722                                      task_running(rq, task) ||
1723                                      !rt_task(task) ||
1724                                      !task_on_rq_queued(task))) {
1725
1726                                 double_unlock_balance(rq, lowest_rq);
1727                                 lowest_rq = NULL;
1728                                 break;
1729                         }
1730                 }
1731
1732                 /* If this rq is still suitable use it. */
1733                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1734                         break;
1735
1736                 /* try again */
1737                 double_unlock_balance(rq, lowest_rq);
1738                 lowest_rq = NULL;
1739         }
1740
1741         return lowest_rq;
1742 }
1743
1744 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1745 {
1746         struct task_struct *p;
1747
1748         if (!has_pushable_tasks(rq))
1749                 return NULL;
1750
1751         p = plist_first_entry(&rq->rt.pushable_tasks,
1752                               struct task_struct, pushable_tasks);
1753
1754         BUG_ON(rq->cpu != task_cpu(p));
1755         BUG_ON(task_current(rq, p));
1756         BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
1757
1758         BUG_ON(!task_on_rq_queued(p));
1759         BUG_ON(!rt_task(p));
1760
1761         return p;
1762 }
1763
1764 /*
1765  * If the current CPU has more than one RT task, see if the non
1766  * running task can migrate over to a CPU that is running a task
1767  * of lesser priority.
1768  */
1769 static int push_rt_task(struct rq *rq)
1770 {
1771         struct task_struct *next_task;
1772         struct rq *lowest_rq;
1773         int ret = 0;
1774
1775         if (!rq->rt.overloaded)
1776                 return 0;
1777
1778         next_task = pick_next_pushable_task(rq);
1779         if (!next_task)
1780                 return 0;
1781
1782 retry:
1783         if (unlikely(next_task == rq->curr)) {
1784                 WARN_ON(1);
1785                 return 0;
1786         }
1787
1788         /*
1789          * It's possible that the next_task slipped in of
1790          * higher priority than current. If that's the case
1791          * just reschedule current.
1792          */
1793         if (unlikely(next_task->prio < rq->curr->prio)) {
1794                 resched_curr(rq);
1795                 return 0;
1796         }
1797
1798         /* We might release rq lock */
1799         get_task_struct(next_task);
1800
1801         /* find_lock_lowest_rq locks the rq if found */
1802         lowest_rq = find_lock_lowest_rq(next_task, rq);
1803         if (!lowest_rq) {
1804                 struct task_struct *task;
1805                 /*
1806                  * find_lock_lowest_rq releases rq->lock
1807                  * so it is possible that next_task has migrated.
1808                  *
1809                  * We need to make sure that the task is still on the same
1810                  * run-queue and is also still the next task eligible for
1811                  * pushing.
1812                  */
1813                 task = pick_next_pushable_task(rq);
1814                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1815                         /*
1816                          * The task hasn't migrated, and is still the next
1817                          * eligible task, but we failed to find a run-queue
1818                          * to push it to.  Do not retry in this case, since
1819                          * other cpus will pull from us when ready.
1820                          */
1821                         goto out;
1822                 }
1823
1824                 if (!task)
1825                         /* No more tasks, just exit */
1826                         goto out;
1827
1828                 /*
1829                  * Something has shifted, try again.
1830                  */
1831                 put_task_struct(next_task);
1832                 next_task = task;
1833                 goto retry;
1834         }
1835
1836         deactivate_task(rq, next_task, 0);
1837         set_task_cpu(next_task, lowest_rq->cpu);
1838         activate_task(lowest_rq, next_task, 0);
1839         ret = 1;
1840
1841         resched_curr(lowest_rq);
1842
1843         double_unlock_balance(rq, lowest_rq);
1844
1845 out:
1846         put_task_struct(next_task);
1847
1848         return ret;
1849 }
1850
1851 static void push_rt_tasks(struct rq *rq)
1852 {
1853         /* push_rt_task will return true if it moved an RT */
1854         while (push_rt_task(rq))
1855                 ;
1856 }
1857
1858 #ifdef HAVE_RT_PUSH_IPI
1859
1860 /*
1861  * When a high priority task schedules out from a CPU and a lower priority
1862  * task is scheduled in, a check is made to see if there's any RT tasks
1863  * on other CPUs that are waiting to run because a higher priority RT task
1864  * is currently running on its CPU. In this case, the CPU with multiple RT
1865  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1866  * up that may be able to run one of its non-running queued RT tasks.
1867  *
1868  * All CPUs with overloaded RT tasks need to be notified as there is currently
1869  * no way to know which of these CPUs have the highest priority task waiting
1870  * to run. Instead of trying to take a spinlock on each of these CPUs,
1871  * which has shown to cause large latency when done on machines with many
1872  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1873  * RT tasks waiting to run.
1874  *
1875  * Just sending an IPI to each of the CPUs is also an issue, as on large
1876  * count CPU machines, this can cause an IPI storm on a CPU, especially
1877  * if its the only CPU with multiple RT tasks queued, and a large number
1878  * of CPUs scheduling a lower priority task at the same time.
1879  *
1880  * Each root domain has its own irq work function that can iterate over
1881  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1882  * tassk must be checked if there's one or many CPUs that are lowering
1883  * their priority, there's a single irq work iterator that will try to
1884  * push off RT tasks that are waiting to run.
1885  *
1886  * When a CPU schedules a lower priority task, it will kick off the
1887  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1888  * As it only takes the first CPU that schedules a lower priority task
1889  * to start the process, the rto_start variable is incremented and if
1890  * the atomic result is one, then that CPU will try to take the rto_lock.
1891  * This prevents high contention on the lock as the process handles all
1892  * CPUs scheduling lower priority tasks.
1893  *
1894  * All CPUs that are scheduling a lower priority task will increment the
1895  * rt_loop_next variable. This will make sure that the irq work iterator
1896  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1897  * priority task, even if the iterator is in the middle of a scan. Incrementing
1898  * the rt_loop_next will cause the iterator to perform another scan.
1899  *
1900  */
1901 static int rto_next_cpu(struct root_domain *rd)
1902 {
1903         int next;
1904         int cpu;
1905
1906         /*
1907          * When starting the IPI RT pushing, the rto_cpu is set to -1,
1908          * rt_next_cpu() will simply return the first CPU found in
1909          * the rto_mask.
1910          *
1911          * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
1912          * will return the next CPU found in the rto_mask.
1913          *
1914          * If there are no more CPUs left in the rto_mask, then a check is made
1915          * against rto_loop and rto_loop_next. rto_loop is only updated with
1916          * the rto_lock held, but any CPU may increment the rto_loop_next
1917          * without any locking.
1918          */
1919         for (;;) {
1920
1921                 /* When rto_cpu is -1 this acts like cpumask_first() */
1922                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1923
1924                 rd->rto_cpu = cpu;
1925
1926                 if (cpu < nr_cpu_ids)
1927                         return cpu;
1928
1929                 rd->rto_cpu = -1;
1930
1931                 /*
1932                  * ACQUIRE ensures we see the @rto_mask changes
1933                  * made prior to the @next value observed.
1934                  *
1935                  * Matches WMB in rt_set_overload().
1936                  */
1937                 next = atomic_read_acquire(&rd->rto_loop_next);
1938
1939                 if (rd->rto_loop == next)
1940                         break;
1941
1942                 rd->rto_loop = next;
1943         }
1944
1945         return -1;
1946 }
1947
1948 static inline bool rto_start_trylock(atomic_t *v)
1949 {
1950         return !atomic_cmpxchg_acquire(v, 0, 1);
1951 }
1952
1953 static inline void rto_start_unlock(atomic_t *v)
1954 {
1955         atomic_set_release(v, 0);
1956 }
1957
1958 static void tell_cpu_to_push(struct rq *rq)
1959 {
1960         int cpu = -1;
1961
1962         /* Keep the loop going if the IPI is currently active */
1963         atomic_inc(&rq->rd->rto_loop_next);
1964
1965         /* Only one CPU can initiate a loop at a time */
1966         if (!rto_start_trylock(&rq->rd->rto_loop_start))
1967                 return;
1968
1969         raw_spin_lock(&rq->rd->rto_lock);
1970
1971         /*
1972          * The rto_cpu is updated under the lock, if it has a valid cpu
1973          * then the IPI is still running and will continue due to the
1974          * update to loop_next, and nothing needs to be done here.
1975          * Otherwise it is finishing up and an ipi needs to be sent.
1976          */
1977         if (rq->rd->rto_cpu < 0)
1978                 cpu = rto_next_cpu(rq->rd);
1979
1980         raw_spin_unlock(&rq->rd->rto_lock);
1981
1982         rto_start_unlock(&rq->rd->rto_loop_start);
1983
1984         if (cpu >= 0) {
1985                 /* Make sure the rd does not get freed while pushing */
1986                 sched_get_rd(rq->rd);
1987                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1988         }
1989 }
1990
1991 /* Called from hardirq context */
1992 void rto_push_irq_work_func(struct irq_work *work)
1993 {
1994         struct root_domain *rd =
1995                 container_of(work, struct root_domain, rto_push_work);
1996         struct rq *rq;
1997         int cpu;
1998
1999         rq = this_rq();
2000
2001         /*
2002          * We do not need to grab the lock to check for has_pushable_tasks.
2003          * When it gets updated, a check is made if a push is possible.
2004          */
2005         if (has_pushable_tasks(rq)) {
2006                 raw_spin_lock(&rq->lock);
2007                 push_rt_tasks(rq);
2008                 raw_spin_unlock(&rq->lock);
2009         }
2010
2011         raw_spin_lock(&rd->rto_lock);
2012
2013         /* Pass the IPI to the next rt overloaded queue */
2014         cpu = rto_next_cpu(rd);
2015
2016         raw_spin_unlock(&rd->rto_lock);
2017
2018         if (cpu < 0) {
2019                 sched_put_rd(rd);
2020                 return;
2021         }
2022
2023         /* Try the next RT overloaded CPU */
2024         irq_work_queue_on(&rd->rto_push_work, cpu);
2025 }
2026 #endif /* HAVE_RT_PUSH_IPI */
2027
2028 static void pull_rt_task(struct rq *this_rq)
2029 {
2030         int this_cpu = this_rq->cpu, cpu;
2031         bool resched = false;
2032         struct task_struct *p;
2033         struct rq *src_rq;
2034         int rt_overload_count = rt_overloaded(this_rq);
2035
2036         if (likely(!rt_overload_count))
2037                 return;
2038
2039         /*
2040          * Match the barrier from rt_set_overloaded; this guarantees that if we
2041          * see overloaded we must also see the rto_mask bit.
2042          */
2043         smp_rmb();
2044
2045         /* If we are the only overloaded CPU do nothing */
2046         if (rt_overload_count == 1 &&
2047             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2048                 return;
2049
2050 #ifdef HAVE_RT_PUSH_IPI
2051         if (sched_feat(RT_PUSH_IPI)) {
2052                 tell_cpu_to_push(this_rq);
2053                 return;
2054         }
2055 #endif
2056
2057         for_each_cpu(cpu, this_rq->rd->rto_mask) {
2058                 if (this_cpu == cpu)
2059                         continue;
2060
2061                 src_rq = cpu_rq(cpu);
2062
2063                 /*
2064                  * Don't bother taking the src_rq->lock if the next highest
2065                  * task is known to be lower-priority than our current task.
2066                  * This may look racy, but if this value is about to go
2067                  * logically higher, the src_rq will push this task away.
2068                  * And if its going logically lower, we do not care
2069                  */
2070                 if (src_rq->rt.highest_prio.next >=
2071                     this_rq->rt.highest_prio.curr)
2072                         continue;
2073
2074                 /*
2075                  * We can potentially drop this_rq's lock in
2076                  * double_lock_balance, and another CPU could
2077                  * alter this_rq
2078                  */
2079                 double_lock_balance(this_rq, src_rq);
2080
2081                 /*
2082                  * We can pull only a task, which is pushable
2083                  * on its rq, and no others.
2084                  */
2085                 p = pick_highest_pushable_task(src_rq, this_cpu);
2086
2087                 /*
2088                  * Do we have an RT task that preempts
2089                  * the to-be-scheduled task?
2090                  */
2091                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2092                         WARN_ON(p == src_rq->curr);
2093                         WARN_ON(!task_on_rq_queued(p));
2094
2095                         /*
2096                          * There's a chance that p is higher in priority
2097                          * than what's currently running on its cpu.
2098                          * This is just that p is wakeing up and hasn't
2099                          * had a chance to schedule. We only pull
2100                          * p if it is lower in priority than the
2101                          * current task on the run queue
2102                          */
2103                         if (p->prio < src_rq->curr->prio)
2104                                 goto skip;
2105
2106                         resched = true;
2107
2108                         deactivate_task(src_rq, p, 0);
2109                         set_task_cpu(p, this_cpu);
2110                         activate_task(this_rq, p, 0);
2111                         /*
2112                          * We continue with the search, just in
2113                          * case there's an even higher prio task
2114                          * in another runqueue. (low likelihood
2115                          * but possible)
2116                          */
2117                 }
2118 skip:
2119                 double_unlock_balance(this_rq, src_rq);
2120         }
2121
2122         if (resched)
2123                 resched_curr(this_rq);
2124 }
2125
2126 /*
2127  * If we are not running and we are not going to reschedule soon, we should
2128  * try to push tasks away now
2129  */
2130 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2131 {
2132         if (!task_running(rq, p) &&
2133             !test_tsk_need_resched(rq->curr) &&
2134             tsk_nr_cpus_allowed(p) > 1 &&
2135             (dl_task(rq->curr) || rt_task(rq->curr)) &&
2136             (tsk_nr_cpus_allowed(rq->curr) < 2 ||
2137              rq->curr->prio <= p->prio))
2138                 push_rt_tasks(rq);
2139 }
2140
2141 /* Assumes rq->lock is held */
2142 static void rq_online_rt(struct rq *rq)
2143 {
2144         if (rq->rt.overloaded)
2145                 rt_set_overload(rq);
2146
2147         __enable_runtime(rq);
2148
2149         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2150 }
2151
2152 /* Assumes rq->lock is held */
2153 static void rq_offline_rt(struct rq *rq)
2154 {
2155         if (rq->rt.overloaded)
2156                 rt_clear_overload(rq);
2157
2158         __disable_runtime(rq);
2159
2160         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2161 }
2162
2163 /*
2164  * When switch from the rt queue, we bring ourselves to a position
2165  * that we might want to pull RT tasks from other runqueues.
2166  */
2167 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2168 {
2169         /*
2170          * If there are other RT tasks then we will reschedule
2171          * and the scheduling of the other RT tasks will handle
2172          * the balancing. But if we are the last RT task
2173          * we may need to handle the pulling of RT tasks
2174          * now.
2175          */
2176         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2177                 return;
2178
2179         queue_pull_task(rq);
2180 }
2181
2182 void __init init_sched_rt_class(void)
2183 {
2184         unsigned int i;
2185
2186         for_each_possible_cpu(i) {
2187                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2188                                         GFP_KERNEL, cpu_to_node(i));
2189         }
2190 }
2191 #endif /* CONFIG_SMP */
2192
2193 /*
2194  * When switching a task to RT, we may overload the runqueue
2195  * with RT tasks. In this case we try to push them off to
2196  * other runqueues.
2197  */
2198 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2199 {
2200         /*
2201          * If we are already running, then there's nothing
2202          * that needs to be done. But if we are not running
2203          * we may need to preempt the current running task.
2204          * If that current running task is also an RT task
2205          * then see if we can move to another run queue.
2206          */
2207         if (task_on_rq_queued(p) && rq->curr != p) {
2208 #ifdef CONFIG_SMP
2209                 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
2210                         queue_push_tasks(rq);
2211 #endif /* CONFIG_SMP */
2212                 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2213                         resched_curr(rq);
2214         }
2215 }
2216
2217 /*
2218  * Priority of the task has changed. This may cause
2219  * us to initiate a push or pull.
2220  */
2221 static void
2222 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2223 {
2224         if (!task_on_rq_queued(p))
2225                 return;
2226
2227         if (rq->curr == p) {
2228 #ifdef CONFIG_SMP
2229                 /*
2230                  * If our priority decreases while running, we
2231                  * may need to pull tasks to this runqueue.
2232                  */
2233                 if (oldprio < p->prio)
2234                         queue_pull_task(rq);
2235
2236                 /*
2237                  * If there's a higher priority task waiting to run
2238                  * then reschedule.
2239                  */
2240                 if (p->prio > rq->rt.highest_prio.curr)
2241                         resched_curr(rq);
2242 #else
2243                 /* For UP simply resched on drop of prio */
2244                 if (oldprio < p->prio)
2245                         resched_curr(rq);
2246 #endif /* CONFIG_SMP */
2247         } else {
2248                 /*
2249                  * This task is not running, but if it is
2250                  * greater than the current running task
2251                  * then reschedule.
2252                  */
2253                 if (p->prio < rq->curr->prio)
2254                         resched_curr(rq);
2255         }
2256 }
2257
2258 static void watchdog(struct rq *rq, struct task_struct *p)
2259 {
2260         unsigned long soft, hard;
2261
2262         /* max may change after cur was read, this will be fixed next tick */
2263         soft = task_rlimit(p, RLIMIT_RTTIME);
2264         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2265
2266         if (soft != RLIM_INFINITY) {
2267                 unsigned long next;
2268
2269                 if (p->rt.watchdog_stamp != jiffies) {
2270                         p->rt.timeout++;
2271                         p->rt.watchdog_stamp = jiffies;
2272                 }
2273
2274                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2275                 if (p->rt.timeout > next)
2276                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2277         }
2278 }
2279
2280 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2281 {
2282         struct sched_rt_entity *rt_se = &p->rt;
2283
2284         update_curr_rt(rq);
2285
2286         watchdog(rq, p);
2287
2288         /*
2289          * RR tasks need a special form of timeslice management.
2290          * FIFO tasks have no timeslices.
2291          */
2292         if (p->policy != SCHED_RR)
2293                 return;
2294
2295         if (--p->rt.time_slice)
2296                 return;
2297
2298         p->rt.time_slice = sched_rr_timeslice;
2299
2300         /*
2301          * Requeue to the end of queue if we (and all of our ancestors) are not
2302          * the only element on the queue
2303          */
2304         for_each_sched_rt_entity(rt_se) {
2305                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2306                         requeue_task_rt(rq, p, 0);
2307                         resched_curr(rq);
2308                         return;
2309                 }
2310         }
2311 }
2312
2313 static void set_curr_task_rt(struct rq *rq)
2314 {
2315         struct task_struct *p = rq->curr;
2316
2317         p->se.exec_start = rq_clock_task(rq);
2318
2319         /* The running task is never eligible for pushing */
2320         dequeue_pushable_task(rq, p);
2321 }
2322
2323 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2324 {
2325         /*
2326          * Time slice is 0 for SCHED_FIFO tasks
2327          */
2328         if (task->policy == SCHED_RR)
2329                 return sched_rr_timeslice;
2330         else
2331                 return 0;
2332 }
2333
2334 const struct sched_class rt_sched_class = {
2335         .next                   = &fair_sched_class,
2336         .enqueue_task           = enqueue_task_rt,
2337         .dequeue_task           = dequeue_task_rt,
2338         .yield_task             = yield_task_rt,
2339
2340         .check_preempt_curr     = check_preempt_curr_rt,
2341
2342         .pick_next_task         = pick_next_task_rt,
2343         .put_prev_task          = put_prev_task_rt,
2344
2345 #ifdef CONFIG_SMP
2346         .select_task_rq         = select_task_rq_rt,
2347
2348         .set_cpus_allowed       = set_cpus_allowed_common,
2349         .rq_online              = rq_online_rt,
2350         .rq_offline             = rq_offline_rt,
2351         .task_woken             = task_woken_rt,
2352         .switched_from          = switched_from_rt,
2353 #endif
2354
2355         .set_curr_task          = set_curr_task_rt,
2356         .task_tick              = task_tick_rt,
2357
2358         .get_rr_interval        = get_rr_interval_rt,
2359
2360         .prio_changed           = prio_changed_rt,
2361         .switched_to            = switched_to_rt,
2362
2363         .update_curr            = update_curr_rt,
2364 };
2365
2366 #ifdef CONFIG_SCHED_DEBUG
2367 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2368
2369 void print_rt_stats(struct seq_file *m, int cpu)
2370 {
2371         rt_rq_iter_t iter;
2372         struct rt_rq *rt_rq;
2373
2374         rcu_read_lock();
2375         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2376                 print_rt_rq(m, cpu, rt_rq);
2377         rcu_read_unlock();
2378 }
2379 #endif /* CONFIG_SCHED_DEBUG */