GNU Linux-libre 4.4.294-gnu1
[releases.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9 #include <linux/irq_work.h>
10
11 int sched_rr_timeslice = RR_TIMESLICE;
12 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
13
14 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
15
16 struct rt_bandwidth def_rt_bandwidth;
17
18 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
19 {
20         struct rt_bandwidth *rt_b =
21                 container_of(timer, struct rt_bandwidth, rt_period_timer);
22         int idle = 0;
23         int overrun;
24
25         raw_spin_lock(&rt_b->rt_runtime_lock);
26         for (;;) {
27                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
28                 if (!overrun)
29                         break;
30
31                 raw_spin_unlock(&rt_b->rt_runtime_lock);
32                 idle = do_sched_rt_period_timer(rt_b, overrun);
33                 raw_spin_lock(&rt_b->rt_runtime_lock);
34         }
35         if (idle)
36                 rt_b->rt_period_active = 0;
37         raw_spin_unlock(&rt_b->rt_runtime_lock);
38
39         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
40 }
41
42 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
43 {
44         rt_b->rt_period = ns_to_ktime(period);
45         rt_b->rt_runtime = runtime;
46
47         raw_spin_lock_init(&rt_b->rt_runtime_lock);
48
49         hrtimer_init(&rt_b->rt_period_timer,
50                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
51         rt_b->rt_period_timer.function = sched_rt_period_timer;
52 }
53
54 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
55 {
56         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
57                 return;
58
59         raw_spin_lock(&rt_b->rt_runtime_lock);
60         if (!rt_b->rt_period_active) {
61                 rt_b->rt_period_active = 1;
62                 hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
63                 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
64         }
65         raw_spin_unlock(&rt_b->rt_runtime_lock);
66 }
67
68 void init_rt_rq(struct rt_rq *rt_rq)
69 {
70         struct rt_prio_array *array;
71         int i;
72
73         array = &rt_rq->active;
74         for (i = 0; i < MAX_RT_PRIO; i++) {
75                 INIT_LIST_HEAD(array->queue + i);
76                 __clear_bit(i, array->bitmap);
77         }
78         /* delimiter for bitsearch: */
79         __set_bit(MAX_RT_PRIO, array->bitmap);
80
81 #if defined CONFIG_SMP
82         rt_rq->highest_prio.curr = MAX_RT_PRIO;
83         rt_rq->highest_prio.next = MAX_RT_PRIO;
84         rt_rq->rt_nr_migratory = 0;
85         rt_rq->overloaded = 0;
86         plist_head_init(&rt_rq->pushable_tasks);
87 #endif /* CONFIG_SMP */
88         /* We start is dequeued state, because no RT tasks are queued */
89         rt_rq->rt_queued = 0;
90
91         rt_rq->rt_time = 0;
92         rt_rq->rt_throttled = 0;
93         rt_rq->rt_runtime = 0;
94         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
95 }
96
97 #ifdef CONFIG_RT_GROUP_SCHED
98 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
99 {
100         hrtimer_cancel(&rt_b->rt_period_timer);
101 }
102
103 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
104
105 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
106 {
107 #ifdef CONFIG_SCHED_DEBUG
108         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
109 #endif
110         return container_of(rt_se, struct task_struct, rt);
111 }
112
113 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
114 {
115         return rt_rq->rq;
116 }
117
118 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
119 {
120         return rt_se->rt_rq;
121 }
122
123 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
124 {
125         struct rt_rq *rt_rq = rt_se->rt_rq;
126
127         return rt_rq->rq;
128 }
129
130 void free_rt_sched_group(struct task_group *tg)
131 {
132         int i;
133
134         if (tg->rt_se)
135                 destroy_rt_bandwidth(&tg->rt_bandwidth);
136
137         for_each_possible_cpu(i) {
138                 if (tg->rt_rq)
139                         kfree(tg->rt_rq[i]);
140                 if (tg->rt_se)
141                         kfree(tg->rt_se[i]);
142         }
143
144         kfree(tg->rt_rq);
145         kfree(tg->rt_se);
146 }
147
148 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
149                 struct sched_rt_entity *rt_se, int cpu,
150                 struct sched_rt_entity *parent)
151 {
152         struct rq *rq = cpu_rq(cpu);
153
154         rt_rq->highest_prio.curr = MAX_RT_PRIO;
155         rt_rq->rt_nr_boosted = 0;
156         rt_rq->rq = rq;
157         rt_rq->tg = tg;
158
159         tg->rt_rq[cpu] = rt_rq;
160         tg->rt_se[cpu] = rt_se;
161
162         if (!rt_se)
163                 return;
164
165         if (!parent)
166                 rt_se->rt_rq = &rq->rt;
167         else
168                 rt_se->rt_rq = parent->my_q;
169
170         rt_se->my_q = rt_rq;
171         rt_se->parent = parent;
172         INIT_LIST_HEAD(&rt_se->run_list);
173 }
174
175 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
176 {
177         struct rt_rq *rt_rq;
178         struct sched_rt_entity *rt_se;
179         int i;
180
181         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
182         if (!tg->rt_rq)
183                 goto err;
184         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
185         if (!tg->rt_se)
186                 goto err;
187
188         init_rt_bandwidth(&tg->rt_bandwidth,
189                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
190
191         for_each_possible_cpu(i) {
192                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
193                                      GFP_KERNEL, cpu_to_node(i));
194                 if (!rt_rq)
195                         goto err;
196
197                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
198                                      GFP_KERNEL, cpu_to_node(i));
199                 if (!rt_se)
200                         goto err_free_rq;
201
202                 init_rt_rq(rt_rq);
203                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
204                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
205         }
206
207         return 1;
208
209 err_free_rq:
210         kfree(rt_rq);
211 err:
212         return 0;
213 }
214
215 #else /* CONFIG_RT_GROUP_SCHED */
216
217 #define rt_entity_is_task(rt_se) (1)
218
219 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
220 {
221         return container_of(rt_se, struct task_struct, rt);
222 }
223
224 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
225 {
226         return container_of(rt_rq, struct rq, rt);
227 }
228
229 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
230 {
231         struct task_struct *p = rt_task_of(rt_se);
232
233         return task_rq(p);
234 }
235
236 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
237 {
238         struct rq *rq = rq_of_rt_se(rt_se);
239
240         return &rq->rt;
241 }
242
243 void free_rt_sched_group(struct task_group *tg) { }
244
245 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
246 {
247         return 1;
248 }
249 #endif /* CONFIG_RT_GROUP_SCHED */
250
251 #ifdef CONFIG_SMP
252
253 static void pull_rt_task(struct rq *this_rq);
254
255 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
256 {
257         /* Try to pull RT tasks here if we lower this rq's prio */
258         return rq->rt.highest_prio.curr > prev->prio;
259 }
260
261 static inline int rt_overloaded(struct rq *rq)
262 {
263         return atomic_read(&rq->rd->rto_count);
264 }
265
266 static inline void rt_set_overload(struct rq *rq)
267 {
268         if (!rq->online)
269                 return;
270
271         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
272         /*
273          * Make sure the mask is visible before we set
274          * the overload count. That is checked to determine
275          * if we should look at the mask. It would be a shame
276          * if we looked at the mask, but the mask was not
277          * updated yet.
278          *
279          * Matched by the barrier in pull_rt_task().
280          */
281         smp_wmb();
282         atomic_inc(&rq->rd->rto_count);
283 }
284
285 static inline void rt_clear_overload(struct rq *rq)
286 {
287         if (!rq->online)
288                 return;
289
290         /* the order here really doesn't matter */
291         atomic_dec(&rq->rd->rto_count);
292         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
293 }
294
295 static void update_rt_migration(struct rt_rq *rt_rq)
296 {
297         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
298                 if (!rt_rq->overloaded) {
299                         rt_set_overload(rq_of_rt_rq(rt_rq));
300                         rt_rq->overloaded = 1;
301                 }
302         } else if (rt_rq->overloaded) {
303                 rt_clear_overload(rq_of_rt_rq(rt_rq));
304                 rt_rq->overloaded = 0;
305         }
306 }
307
308 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
309 {
310         struct task_struct *p;
311
312         if (!rt_entity_is_task(rt_se))
313                 return;
314
315         p = rt_task_of(rt_se);
316         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
317
318         rt_rq->rt_nr_total++;
319         if (p->nr_cpus_allowed > 1)
320                 rt_rq->rt_nr_migratory++;
321
322         update_rt_migration(rt_rq);
323 }
324
325 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
326 {
327         struct task_struct *p;
328
329         if (!rt_entity_is_task(rt_se))
330                 return;
331
332         p = rt_task_of(rt_se);
333         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
334
335         rt_rq->rt_nr_total--;
336         if (p->nr_cpus_allowed > 1)
337                 rt_rq->rt_nr_migratory--;
338
339         update_rt_migration(rt_rq);
340 }
341
342 static inline int has_pushable_tasks(struct rq *rq)
343 {
344         return !plist_head_empty(&rq->rt.pushable_tasks);
345 }
346
347 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
348 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
349
350 static void push_rt_tasks(struct rq *);
351 static void pull_rt_task(struct rq *);
352
353 static inline void queue_push_tasks(struct rq *rq)
354 {
355         if (!has_pushable_tasks(rq))
356                 return;
357
358         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
359 }
360
361 static inline void queue_pull_task(struct rq *rq)
362 {
363         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
364 }
365
366 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
367 {
368         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
369         plist_node_init(&p->pushable_tasks, p->prio);
370         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
371
372         /* Update the highest prio pushable task */
373         if (p->prio < rq->rt.highest_prio.next)
374                 rq->rt.highest_prio.next = p->prio;
375 }
376
377 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
378 {
379         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
380
381         /* Update the new highest prio pushable task */
382         if (has_pushable_tasks(rq)) {
383                 p = plist_first_entry(&rq->rt.pushable_tasks,
384                                       struct task_struct, pushable_tasks);
385                 rq->rt.highest_prio.next = p->prio;
386         } else
387                 rq->rt.highest_prio.next = MAX_RT_PRIO;
388 }
389
390 #else
391
392 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
393 {
394 }
395
396 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
397 {
398 }
399
400 static inline
401 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
402 {
403 }
404
405 static inline
406 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
407 {
408 }
409
410 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
411 {
412         return false;
413 }
414
415 static inline void pull_rt_task(struct rq *this_rq)
416 {
417 }
418
419 static inline void queue_push_tasks(struct rq *rq)
420 {
421 }
422 #endif /* CONFIG_SMP */
423
424 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
425 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
426
427 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
428 {
429         return !list_empty(&rt_se->run_list);
430 }
431
432 #ifdef CONFIG_RT_GROUP_SCHED
433
434 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
435 {
436         if (!rt_rq->tg)
437                 return RUNTIME_INF;
438
439         return rt_rq->rt_runtime;
440 }
441
442 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
443 {
444         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
445 }
446
447 typedef struct task_group *rt_rq_iter_t;
448
449 static inline struct task_group *next_task_group(struct task_group *tg)
450 {
451         do {
452                 tg = list_entry_rcu(tg->list.next,
453                         typeof(struct task_group), list);
454         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
455
456         if (&tg->list == &task_groups)
457                 tg = NULL;
458
459         return tg;
460 }
461
462 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
463         for (iter = container_of(&task_groups, typeof(*iter), list);    \
464                 (iter = next_task_group(iter)) &&                       \
465                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
466
467 #define for_each_sched_rt_entity(rt_se) \
468         for (; rt_se; rt_se = rt_se->parent)
469
470 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
471 {
472         return rt_se->my_q;
473 }
474
475 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
476 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
477
478 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
479 {
480         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
481         struct rq *rq = rq_of_rt_rq(rt_rq);
482         struct sched_rt_entity *rt_se;
483
484         int cpu = cpu_of(rq);
485
486         rt_se = rt_rq->tg->rt_se[cpu];
487
488         if (rt_rq->rt_nr_running) {
489                 if (!rt_se)
490                         enqueue_top_rt_rq(rt_rq);
491                 else if (!on_rt_rq(rt_se))
492                         enqueue_rt_entity(rt_se, false);
493
494                 if (rt_rq->highest_prio.curr < curr->prio)
495                         resched_curr(rq);
496         }
497 }
498
499 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
500 {
501         struct sched_rt_entity *rt_se;
502         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
503
504         rt_se = rt_rq->tg->rt_se[cpu];
505
506         if (!rt_se)
507                 dequeue_top_rt_rq(rt_rq);
508         else if (on_rt_rq(rt_se))
509                 dequeue_rt_entity(rt_se);
510 }
511
512 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
513 {
514         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
515 }
516
517 static int rt_se_boosted(struct sched_rt_entity *rt_se)
518 {
519         struct rt_rq *rt_rq = group_rt_rq(rt_se);
520         struct task_struct *p;
521
522         if (rt_rq)
523                 return !!rt_rq->rt_nr_boosted;
524
525         p = rt_task_of(rt_se);
526         return p->prio != p->normal_prio;
527 }
528
529 #ifdef CONFIG_SMP
530 static inline const struct cpumask *sched_rt_period_mask(void)
531 {
532         return this_rq()->rd->span;
533 }
534 #else
535 static inline const struct cpumask *sched_rt_period_mask(void)
536 {
537         return cpu_online_mask;
538 }
539 #endif
540
541 static inline
542 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
543 {
544         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
545 }
546
547 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
548 {
549         return &rt_rq->tg->rt_bandwidth;
550 }
551
552 #else /* !CONFIG_RT_GROUP_SCHED */
553
554 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
555 {
556         return rt_rq->rt_runtime;
557 }
558
559 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
560 {
561         return ktime_to_ns(def_rt_bandwidth.rt_period);
562 }
563
564 typedef struct rt_rq *rt_rq_iter_t;
565
566 #define for_each_rt_rq(rt_rq, iter, rq) \
567         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
568
569 #define for_each_sched_rt_entity(rt_se) \
570         for (; rt_se; rt_se = NULL)
571
572 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
573 {
574         return NULL;
575 }
576
577 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
578 {
579         struct rq *rq = rq_of_rt_rq(rt_rq);
580
581         if (!rt_rq->rt_nr_running)
582                 return;
583
584         enqueue_top_rt_rq(rt_rq);
585         resched_curr(rq);
586 }
587
588 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
589 {
590         dequeue_top_rt_rq(rt_rq);
591 }
592
593 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
594 {
595         return rt_rq->rt_throttled;
596 }
597
598 static inline const struct cpumask *sched_rt_period_mask(void)
599 {
600         return cpu_online_mask;
601 }
602
603 static inline
604 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
605 {
606         return &cpu_rq(cpu)->rt;
607 }
608
609 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
610 {
611         return &def_rt_bandwidth;
612 }
613
614 #endif /* CONFIG_RT_GROUP_SCHED */
615
616 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
617 {
618         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
619
620         return (hrtimer_active(&rt_b->rt_period_timer) ||
621                 rt_rq->rt_time < rt_b->rt_runtime);
622 }
623
624 #ifdef CONFIG_SMP
625 /*
626  * We ran out of runtime, see if we can borrow some from our neighbours.
627  */
628 static void do_balance_runtime(struct rt_rq *rt_rq)
629 {
630         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
631         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
632         int i, weight;
633         u64 rt_period;
634
635         weight = cpumask_weight(rd->span);
636
637         raw_spin_lock(&rt_b->rt_runtime_lock);
638         rt_period = ktime_to_ns(rt_b->rt_period);
639         for_each_cpu(i, rd->span) {
640                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
641                 s64 diff;
642
643                 if (iter == rt_rq)
644                         continue;
645
646                 raw_spin_lock(&iter->rt_runtime_lock);
647                 /*
648                  * Either all rqs have inf runtime and there's nothing to steal
649                  * or __disable_runtime() below sets a specific rq to inf to
650                  * indicate its been disabled and disalow stealing.
651                  */
652                 if (iter->rt_runtime == RUNTIME_INF)
653                         goto next;
654
655                 /*
656                  * From runqueues with spare time, take 1/n part of their
657                  * spare time, but no more than our period.
658                  */
659                 diff = iter->rt_runtime - iter->rt_time;
660                 if (diff > 0) {
661                         diff = div_u64((u64)diff, weight);
662                         if (rt_rq->rt_runtime + diff > rt_period)
663                                 diff = rt_period - rt_rq->rt_runtime;
664                         iter->rt_runtime -= diff;
665                         rt_rq->rt_runtime += diff;
666                         if (rt_rq->rt_runtime == rt_period) {
667                                 raw_spin_unlock(&iter->rt_runtime_lock);
668                                 break;
669                         }
670                 }
671 next:
672                 raw_spin_unlock(&iter->rt_runtime_lock);
673         }
674         raw_spin_unlock(&rt_b->rt_runtime_lock);
675 }
676
677 /*
678  * Ensure this RQ takes back all the runtime it lend to its neighbours.
679  */
680 static void __disable_runtime(struct rq *rq)
681 {
682         struct root_domain *rd = rq->rd;
683         rt_rq_iter_t iter;
684         struct rt_rq *rt_rq;
685
686         if (unlikely(!scheduler_running))
687                 return;
688
689         for_each_rt_rq(rt_rq, iter, rq) {
690                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
691                 s64 want;
692                 int i;
693
694                 raw_spin_lock(&rt_b->rt_runtime_lock);
695                 raw_spin_lock(&rt_rq->rt_runtime_lock);
696                 /*
697                  * Either we're all inf and nobody needs to borrow, or we're
698                  * already disabled and thus have nothing to do, or we have
699                  * exactly the right amount of runtime to take out.
700                  */
701                 if (rt_rq->rt_runtime == RUNTIME_INF ||
702                                 rt_rq->rt_runtime == rt_b->rt_runtime)
703                         goto balanced;
704                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
705
706                 /*
707                  * Calculate the difference between what we started out with
708                  * and what we current have, that's the amount of runtime
709                  * we lend and now have to reclaim.
710                  */
711                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
712
713                 /*
714                  * Greedy reclaim, take back as much as we can.
715                  */
716                 for_each_cpu(i, rd->span) {
717                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
718                         s64 diff;
719
720                         /*
721                          * Can't reclaim from ourselves or disabled runqueues.
722                          */
723                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
724                                 continue;
725
726                         raw_spin_lock(&iter->rt_runtime_lock);
727                         if (want > 0) {
728                                 diff = min_t(s64, iter->rt_runtime, want);
729                                 iter->rt_runtime -= diff;
730                                 want -= diff;
731                         } else {
732                                 iter->rt_runtime -= want;
733                                 want -= want;
734                         }
735                         raw_spin_unlock(&iter->rt_runtime_lock);
736
737                         if (!want)
738                                 break;
739                 }
740
741                 raw_spin_lock(&rt_rq->rt_runtime_lock);
742                 /*
743                  * We cannot be left wanting - that would mean some runtime
744                  * leaked out of the system.
745                  */
746                 BUG_ON(want);
747 balanced:
748                 /*
749                  * Disable all the borrow logic by pretending we have inf
750                  * runtime - in which case borrowing doesn't make sense.
751                  */
752                 rt_rq->rt_runtime = RUNTIME_INF;
753                 rt_rq->rt_throttled = 0;
754                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
755                 raw_spin_unlock(&rt_b->rt_runtime_lock);
756
757                 /* Make rt_rq available for pick_next_task() */
758                 sched_rt_rq_enqueue(rt_rq);
759         }
760 }
761
762 static void __enable_runtime(struct rq *rq)
763 {
764         rt_rq_iter_t iter;
765         struct rt_rq *rt_rq;
766
767         if (unlikely(!scheduler_running))
768                 return;
769
770         /*
771          * Reset each runqueue's bandwidth settings
772          */
773         for_each_rt_rq(rt_rq, iter, rq) {
774                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
775
776                 raw_spin_lock(&rt_b->rt_runtime_lock);
777                 raw_spin_lock(&rt_rq->rt_runtime_lock);
778                 rt_rq->rt_runtime = rt_b->rt_runtime;
779                 rt_rq->rt_time = 0;
780                 rt_rq->rt_throttled = 0;
781                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
782                 raw_spin_unlock(&rt_b->rt_runtime_lock);
783         }
784 }
785
786 static void balance_runtime(struct rt_rq *rt_rq)
787 {
788         if (!sched_feat(RT_RUNTIME_SHARE))
789                 return;
790
791         if (rt_rq->rt_time > rt_rq->rt_runtime) {
792                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
793                 do_balance_runtime(rt_rq);
794                 raw_spin_lock(&rt_rq->rt_runtime_lock);
795         }
796 }
797 #else /* !CONFIG_SMP */
798 static inline void balance_runtime(struct rt_rq *rt_rq) {}
799 #endif /* CONFIG_SMP */
800
801 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
802 {
803         int i, idle = 1, throttled = 0;
804         const struct cpumask *span;
805
806         span = sched_rt_period_mask();
807 #ifdef CONFIG_RT_GROUP_SCHED
808         /*
809          * FIXME: isolated CPUs should really leave the root task group,
810          * whether they are isolcpus or were isolated via cpusets, lest
811          * the timer run on a CPU which does not service all runqueues,
812          * potentially leaving other CPUs indefinitely throttled.  If
813          * isolation is really required, the user will turn the throttle
814          * off to kill the perturbations it causes anyway.  Meanwhile,
815          * this maintains functionality for boot and/or troubleshooting.
816          */
817         if (rt_b == &root_task_group.rt_bandwidth)
818                 span = cpu_online_mask;
819 #endif
820         for_each_cpu(i, span) {
821                 int enqueue = 0;
822                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
823                 struct rq *rq = rq_of_rt_rq(rt_rq);
824
825                 raw_spin_lock(&rq->lock);
826                 update_rq_clock(rq);
827
828                 if (rt_rq->rt_time) {
829                         u64 runtime;
830
831                         raw_spin_lock(&rt_rq->rt_runtime_lock);
832                         if (rt_rq->rt_throttled)
833                                 balance_runtime(rt_rq);
834                         runtime = rt_rq->rt_runtime;
835                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
836                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
837                                 rt_rq->rt_throttled = 0;
838                                 enqueue = 1;
839
840                                 /*
841                                  * When we're idle and a woken (rt) task is
842                                  * throttled check_preempt_curr() will set
843                                  * skip_update and the time between the wakeup
844                                  * and this unthrottle will get accounted as
845                                  * 'runtime'.
846                                  */
847                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
848                                         rq_clock_skip_update(rq, false);
849                         }
850                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
851                                 idle = 0;
852                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
853                 } else if (rt_rq->rt_nr_running) {
854                         idle = 0;
855                         if (!rt_rq_throttled(rt_rq))
856                                 enqueue = 1;
857                 }
858                 if (rt_rq->rt_throttled)
859                         throttled = 1;
860
861                 if (enqueue)
862                         sched_rt_rq_enqueue(rt_rq);
863                 raw_spin_unlock(&rq->lock);
864         }
865
866         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
867                 return 1;
868
869         return idle;
870 }
871
872 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
873 {
874 #ifdef CONFIG_RT_GROUP_SCHED
875         struct rt_rq *rt_rq = group_rt_rq(rt_se);
876
877         if (rt_rq)
878                 return rt_rq->highest_prio.curr;
879 #endif
880
881         return rt_task_of(rt_se)->prio;
882 }
883
884 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
885 {
886         u64 runtime = sched_rt_runtime(rt_rq);
887
888         if (rt_rq->rt_throttled)
889                 return rt_rq_throttled(rt_rq);
890
891         if (runtime >= sched_rt_period(rt_rq))
892                 return 0;
893
894         balance_runtime(rt_rq);
895         runtime = sched_rt_runtime(rt_rq);
896         if (runtime == RUNTIME_INF)
897                 return 0;
898
899         if (rt_rq->rt_time > runtime) {
900                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
901
902                 /*
903                  * Don't actually throttle groups that have no runtime assigned
904                  * but accrue some time due to boosting.
905                  */
906                 if (likely(rt_b->rt_runtime)) {
907                         rt_rq->rt_throttled = 1;
908                         printk_deferred_once("sched: RT throttling activated\n");
909                 } else {
910                         /*
911                          * In case we did anyway, make it go away,
912                          * replenishment is a joke, since it will replenish us
913                          * with exactly 0 ns.
914                          */
915                         rt_rq->rt_time = 0;
916                 }
917
918                 if (rt_rq_throttled(rt_rq)) {
919                         sched_rt_rq_dequeue(rt_rq);
920                         return 1;
921                 }
922         }
923
924         return 0;
925 }
926
927 /*
928  * Update the current task's runtime statistics. Skip current tasks that
929  * are not in our scheduling class.
930  */
931 static void update_curr_rt(struct rq *rq)
932 {
933         struct task_struct *curr = rq->curr;
934         struct sched_rt_entity *rt_se = &curr->rt;
935         u64 delta_exec;
936
937         if (curr->sched_class != &rt_sched_class)
938                 return;
939
940         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
941         if (unlikely((s64)delta_exec <= 0))
942                 return;
943
944         schedstat_set(curr->se.statistics.exec_max,
945                       max(curr->se.statistics.exec_max, delta_exec));
946
947         curr->se.sum_exec_runtime += delta_exec;
948         account_group_exec_runtime(curr, delta_exec);
949
950         curr->se.exec_start = rq_clock_task(rq);
951         cpuacct_charge(curr, delta_exec);
952
953         sched_rt_avg_update(rq, delta_exec);
954
955         if (!rt_bandwidth_enabled())
956                 return;
957
958         for_each_sched_rt_entity(rt_se) {
959                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
960
961                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
962                         raw_spin_lock(&rt_rq->rt_runtime_lock);
963                         rt_rq->rt_time += delta_exec;
964                         if (sched_rt_runtime_exceeded(rt_rq))
965                                 resched_curr(rq);
966                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
967                 }
968         }
969 }
970
971 static void
972 dequeue_top_rt_rq(struct rt_rq *rt_rq)
973 {
974         struct rq *rq = rq_of_rt_rq(rt_rq);
975
976         BUG_ON(&rq->rt != rt_rq);
977
978         if (!rt_rq->rt_queued)
979                 return;
980
981         BUG_ON(!rq->nr_running);
982
983         sub_nr_running(rq, rt_rq->rt_nr_running);
984         rt_rq->rt_queued = 0;
985 }
986
987 static void
988 enqueue_top_rt_rq(struct rt_rq *rt_rq)
989 {
990         struct rq *rq = rq_of_rt_rq(rt_rq);
991
992         BUG_ON(&rq->rt != rt_rq);
993
994         if (rt_rq->rt_queued)
995                 return;
996         if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
997                 return;
998
999         add_nr_running(rq, rt_rq->rt_nr_running);
1000         rt_rq->rt_queued = 1;
1001 }
1002
1003 #if defined CONFIG_SMP
1004
1005 static void
1006 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1007 {
1008         struct rq *rq = rq_of_rt_rq(rt_rq);
1009
1010 #ifdef CONFIG_RT_GROUP_SCHED
1011         /*
1012          * Change rq's cpupri only if rt_rq is the top queue.
1013          */
1014         if (&rq->rt != rt_rq)
1015                 return;
1016 #endif
1017         if (rq->online && prio < prev_prio)
1018                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1019 }
1020
1021 static void
1022 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1023 {
1024         struct rq *rq = rq_of_rt_rq(rt_rq);
1025
1026 #ifdef CONFIG_RT_GROUP_SCHED
1027         /*
1028          * Change rq's cpupri only if rt_rq is the top queue.
1029          */
1030         if (&rq->rt != rt_rq)
1031                 return;
1032 #endif
1033         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1034                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1035 }
1036
1037 #else /* CONFIG_SMP */
1038
1039 static inline
1040 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1041 static inline
1042 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1043
1044 #endif /* CONFIG_SMP */
1045
1046 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1047 static void
1048 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1049 {
1050         int prev_prio = rt_rq->highest_prio.curr;
1051
1052         if (prio < prev_prio)
1053                 rt_rq->highest_prio.curr = prio;
1054
1055         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1056 }
1057
1058 static void
1059 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1060 {
1061         int prev_prio = rt_rq->highest_prio.curr;
1062
1063         if (rt_rq->rt_nr_running) {
1064
1065                 WARN_ON(prio < prev_prio);
1066
1067                 /*
1068                  * This may have been our highest task, and therefore
1069                  * we may have some recomputation to do
1070                  */
1071                 if (prio == prev_prio) {
1072                         struct rt_prio_array *array = &rt_rq->active;
1073
1074                         rt_rq->highest_prio.curr =
1075                                 sched_find_first_bit(array->bitmap);
1076                 }
1077
1078         } else
1079                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1080
1081         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1082 }
1083
1084 #else
1085
1086 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1087 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1088
1089 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1090
1091 #ifdef CONFIG_RT_GROUP_SCHED
1092
1093 static void
1094 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1095 {
1096         if (rt_se_boosted(rt_se))
1097                 rt_rq->rt_nr_boosted++;
1098
1099         if (rt_rq->tg)
1100                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1101 }
1102
1103 static void
1104 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1105 {
1106         if (rt_se_boosted(rt_se))
1107                 rt_rq->rt_nr_boosted--;
1108
1109         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1110 }
1111
1112 #else /* CONFIG_RT_GROUP_SCHED */
1113
1114 static void
1115 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1116 {
1117         start_rt_bandwidth(&def_rt_bandwidth);
1118 }
1119
1120 static inline
1121 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1122
1123 #endif /* CONFIG_RT_GROUP_SCHED */
1124
1125 static inline
1126 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1127 {
1128         struct rt_rq *group_rq = group_rt_rq(rt_se);
1129
1130         if (group_rq)
1131                 return group_rq->rt_nr_running;
1132         else
1133                 return 1;
1134 }
1135
1136 static inline
1137 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1138 {
1139         int prio = rt_se_prio(rt_se);
1140
1141         WARN_ON(!rt_prio(prio));
1142         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1143
1144         inc_rt_prio(rt_rq, prio);
1145         inc_rt_migration(rt_se, rt_rq);
1146         inc_rt_group(rt_se, rt_rq);
1147 }
1148
1149 static inline
1150 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1151 {
1152         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1153         WARN_ON(!rt_rq->rt_nr_running);
1154         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1155
1156         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1157         dec_rt_migration(rt_se, rt_rq);
1158         dec_rt_group(rt_se, rt_rq);
1159 }
1160
1161 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1162 {
1163         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1164         struct rt_prio_array *array = &rt_rq->active;
1165         struct rt_rq *group_rq = group_rt_rq(rt_se);
1166         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1167
1168         /*
1169          * Don't enqueue the group if its throttled, or when empty.
1170          * The latter is a consequence of the former when a child group
1171          * get throttled and the current group doesn't have any other
1172          * active members.
1173          */
1174         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1175                 return;
1176
1177         if (head)
1178                 list_add(&rt_se->run_list, queue);
1179         else
1180                 list_add_tail(&rt_se->run_list, queue);
1181         __set_bit(rt_se_prio(rt_se), array->bitmap);
1182
1183         inc_rt_tasks(rt_se, rt_rq);
1184 }
1185
1186 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1187 {
1188         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1189         struct rt_prio_array *array = &rt_rq->active;
1190
1191         list_del_init(&rt_se->run_list);
1192         if (list_empty(array->queue + rt_se_prio(rt_se)))
1193                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1194
1195         dec_rt_tasks(rt_se, rt_rq);
1196 }
1197
1198 /*
1199  * Because the prio of an upper entry depends on the lower
1200  * entries, we must remove entries top - down.
1201  */
1202 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1203 {
1204         struct sched_rt_entity *back = NULL;
1205
1206         for_each_sched_rt_entity(rt_se) {
1207                 rt_se->back = back;
1208                 back = rt_se;
1209         }
1210
1211         dequeue_top_rt_rq(rt_rq_of_se(back));
1212
1213         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1214                 if (on_rt_rq(rt_se))
1215                         __dequeue_rt_entity(rt_se);
1216         }
1217 }
1218
1219 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1220 {
1221         struct rq *rq = rq_of_rt_se(rt_se);
1222
1223         dequeue_rt_stack(rt_se);
1224         for_each_sched_rt_entity(rt_se)
1225                 __enqueue_rt_entity(rt_se, head);
1226         enqueue_top_rt_rq(&rq->rt);
1227 }
1228
1229 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1230 {
1231         struct rq *rq = rq_of_rt_se(rt_se);
1232
1233         dequeue_rt_stack(rt_se);
1234
1235         for_each_sched_rt_entity(rt_se) {
1236                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1237
1238                 if (rt_rq && rt_rq->rt_nr_running)
1239                         __enqueue_rt_entity(rt_se, false);
1240         }
1241         enqueue_top_rt_rq(&rq->rt);
1242 }
1243
1244 /*
1245  * Adding/removing a task to/from a priority array:
1246  */
1247 static void
1248 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1249 {
1250         struct sched_rt_entity *rt_se = &p->rt;
1251
1252         if (flags & ENQUEUE_WAKEUP)
1253                 rt_se->timeout = 0;
1254
1255         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1256
1257         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1258                 enqueue_pushable_task(rq, p);
1259 }
1260
1261 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1262 {
1263         struct sched_rt_entity *rt_se = &p->rt;
1264
1265         update_curr_rt(rq);
1266         dequeue_rt_entity(rt_se);
1267
1268         dequeue_pushable_task(rq, p);
1269 }
1270
1271 /*
1272  * Put task to the head or the end of the run list without the overhead of
1273  * dequeue followed by enqueue.
1274  */
1275 static void
1276 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1277 {
1278         if (on_rt_rq(rt_se)) {
1279                 struct rt_prio_array *array = &rt_rq->active;
1280                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1281
1282                 if (head)
1283                         list_move(&rt_se->run_list, queue);
1284                 else
1285                         list_move_tail(&rt_se->run_list, queue);
1286         }
1287 }
1288
1289 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1290 {
1291         struct sched_rt_entity *rt_se = &p->rt;
1292         struct rt_rq *rt_rq;
1293
1294         for_each_sched_rt_entity(rt_se) {
1295                 rt_rq = rt_rq_of_se(rt_se);
1296                 requeue_rt_entity(rt_rq, rt_se, head);
1297         }
1298 }
1299
1300 static void yield_task_rt(struct rq *rq)
1301 {
1302         requeue_task_rt(rq, rq->curr, 0);
1303 }
1304
1305 #ifdef CONFIG_SMP
1306 static int find_lowest_rq(struct task_struct *task);
1307
1308 static int
1309 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1310 {
1311         struct task_struct *curr;
1312         struct rq *rq;
1313
1314         /* For anything but wake ups, just return the task_cpu */
1315         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1316                 goto out;
1317
1318         rq = cpu_rq(cpu);
1319
1320         rcu_read_lock();
1321         curr = READ_ONCE(rq->curr); /* unlocked access */
1322
1323         /*
1324          * If the current task on @p's runqueue is an RT task, then
1325          * try to see if we can wake this RT task up on another
1326          * runqueue. Otherwise simply start this RT task
1327          * on its current runqueue.
1328          *
1329          * We want to avoid overloading runqueues. If the woken
1330          * task is a higher priority, then it will stay on this CPU
1331          * and the lower prio task should be moved to another CPU.
1332          * Even though this will probably make the lower prio task
1333          * lose its cache, we do not want to bounce a higher task
1334          * around just because it gave up its CPU, perhaps for a
1335          * lock?
1336          *
1337          * For equal prio tasks, we just let the scheduler sort it out.
1338          *
1339          * Otherwise, just let it ride on the affined RQ and the
1340          * post-schedule router will push the preempted task away
1341          *
1342          * This test is optimistic, if we get it wrong the load-balancer
1343          * will have to sort it out.
1344          */
1345         if (curr && unlikely(rt_task(curr)) &&
1346             (curr->nr_cpus_allowed < 2 ||
1347              curr->prio <= p->prio)) {
1348                 int target = find_lowest_rq(p);
1349
1350                 /*
1351                  * Don't bother moving it if the destination CPU is
1352                  * not running a lower priority task.
1353                  */
1354                 if (target != -1 &&
1355                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1356                         cpu = target;
1357         }
1358         rcu_read_unlock();
1359
1360 out:
1361         return cpu;
1362 }
1363
1364 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1365 {
1366         /*
1367          * Current can't be migrated, useless to reschedule,
1368          * let's hope p can move out.
1369          */
1370         if (rq->curr->nr_cpus_allowed == 1 ||
1371             !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1372                 return;
1373
1374         /*
1375          * p is migratable, so let's not schedule it and
1376          * see if it is pushed or pulled somewhere else.
1377          */
1378         if (p->nr_cpus_allowed != 1
1379             && cpupri_find(&rq->rd->cpupri, p, NULL))
1380                 return;
1381
1382         /*
1383          * There appears to be other cpus that can accept
1384          * current and none to run 'p', so lets reschedule
1385          * to try and push current away:
1386          */
1387         requeue_task_rt(rq, p, 1);
1388         resched_curr(rq);
1389 }
1390
1391 #endif /* CONFIG_SMP */
1392
1393 /*
1394  * Preempt the current task with a newly woken task if needed:
1395  */
1396 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1397 {
1398         if (p->prio < rq->curr->prio) {
1399                 resched_curr(rq);
1400                 return;
1401         }
1402
1403 #ifdef CONFIG_SMP
1404         /*
1405          * If:
1406          *
1407          * - the newly woken task is of equal priority to the current task
1408          * - the newly woken task is non-migratable while current is migratable
1409          * - current will be preempted on the next reschedule
1410          *
1411          * we should check to see if current can readily move to a different
1412          * cpu.  If so, we will reschedule to allow the push logic to try
1413          * to move current somewhere else, making room for our non-migratable
1414          * task.
1415          */
1416         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1417                 check_preempt_equal_prio(rq, p);
1418 #endif
1419 }
1420
1421 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1422                                                    struct rt_rq *rt_rq)
1423 {
1424         struct rt_prio_array *array = &rt_rq->active;
1425         struct sched_rt_entity *next = NULL;
1426         struct list_head *queue;
1427         int idx;
1428
1429         idx = sched_find_first_bit(array->bitmap);
1430         BUG_ON(idx >= MAX_RT_PRIO);
1431
1432         queue = array->queue + idx;
1433         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1434
1435         return next;
1436 }
1437
1438 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1439 {
1440         struct sched_rt_entity *rt_se;
1441         struct task_struct *p;
1442         struct rt_rq *rt_rq  = &rq->rt;
1443
1444         do {
1445                 rt_se = pick_next_rt_entity(rq, rt_rq);
1446                 BUG_ON(!rt_se);
1447                 rt_rq = group_rt_rq(rt_se);
1448         } while (rt_rq);
1449
1450         p = rt_task_of(rt_se);
1451         p->se.exec_start = rq_clock_task(rq);
1452
1453         return p;
1454 }
1455
1456 static struct task_struct *
1457 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1458 {
1459         struct task_struct *p;
1460         struct rt_rq *rt_rq = &rq->rt;
1461
1462         if (need_pull_rt_task(rq, prev)) {
1463                 /*
1464                  * This is OK, because current is on_cpu, which avoids it being
1465                  * picked for load-balance and preemption/IRQs are still
1466                  * disabled avoiding further scheduler activity on it and we're
1467                  * being very careful to re-start the picking loop.
1468                  */
1469                 lockdep_unpin_lock(&rq->lock);
1470                 pull_rt_task(rq);
1471                 lockdep_pin_lock(&rq->lock);
1472                 /*
1473                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1474                  * means a dl or stop task can slip in, in which case we need
1475                  * to re-start task selection.
1476                  */
1477                 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1478                              rq->dl.dl_nr_running))
1479                         return RETRY_TASK;
1480         }
1481
1482         /*
1483          * We may dequeue prev's rt_rq in put_prev_task().
1484          * So, we update time before rt_nr_running check.
1485          */
1486         if (prev->sched_class == &rt_sched_class)
1487                 update_curr_rt(rq);
1488
1489         if (!rt_rq->rt_queued)
1490                 return NULL;
1491
1492         put_prev_task(rq, prev);
1493
1494         p = _pick_next_task_rt(rq);
1495
1496         /* The running task is never eligible for pushing */
1497         dequeue_pushable_task(rq, p);
1498
1499         queue_push_tasks(rq);
1500
1501         return p;
1502 }
1503
1504 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1505 {
1506         update_curr_rt(rq);
1507
1508         /*
1509          * The previous task needs to be made eligible for pushing
1510          * if it is still active
1511          */
1512         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1513                 enqueue_pushable_task(rq, p);
1514 }
1515
1516 #ifdef CONFIG_SMP
1517
1518 /* Only try algorithms three times */
1519 #define RT_MAX_TRIES 3
1520
1521 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1522 {
1523         if (!task_running(rq, p) &&
1524             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1525                 return 1;
1526         return 0;
1527 }
1528
1529 /*
1530  * Return the highest pushable rq's task, which is suitable to be executed
1531  * on the cpu, NULL otherwise
1532  */
1533 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1534 {
1535         struct plist_head *head = &rq->rt.pushable_tasks;
1536         struct task_struct *p;
1537
1538         if (!has_pushable_tasks(rq))
1539                 return NULL;
1540
1541         plist_for_each_entry(p, head, pushable_tasks) {
1542                 if (pick_rt_task(rq, p, cpu))
1543                         return p;
1544         }
1545
1546         return NULL;
1547 }
1548
1549 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1550
1551 static int find_lowest_rq(struct task_struct *task)
1552 {
1553         struct sched_domain *sd;
1554         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1555         int this_cpu = smp_processor_id();
1556         int cpu      = task_cpu(task);
1557
1558         /* Make sure the mask is initialized first */
1559         if (unlikely(!lowest_mask))
1560                 return -1;
1561
1562         if (task->nr_cpus_allowed == 1)
1563                 return -1; /* No other targets possible */
1564
1565         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1566                 return -1; /* No targets found */
1567
1568         /*
1569          * At this point we have built a mask of cpus representing the
1570          * lowest priority tasks in the system.  Now we want to elect
1571          * the best one based on our affinity and topology.
1572          *
1573          * We prioritize the last cpu that the task executed on since
1574          * it is most likely cache-hot in that location.
1575          */
1576         if (cpumask_test_cpu(cpu, lowest_mask))
1577                 return cpu;
1578
1579         /*
1580          * Otherwise, we consult the sched_domains span maps to figure
1581          * out which cpu is logically closest to our hot cache data.
1582          */
1583         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1584                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1585
1586         rcu_read_lock();
1587         for_each_domain(cpu, sd) {
1588                 if (sd->flags & SD_WAKE_AFFINE) {
1589                         int best_cpu;
1590
1591                         /*
1592                          * "this_cpu" is cheaper to preempt than a
1593                          * remote processor.
1594                          */
1595                         if (this_cpu != -1 &&
1596                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1597                                 rcu_read_unlock();
1598                                 return this_cpu;
1599                         }
1600
1601                         best_cpu = cpumask_first_and(lowest_mask,
1602                                                      sched_domain_span(sd));
1603                         if (best_cpu < nr_cpu_ids) {
1604                                 rcu_read_unlock();
1605                                 return best_cpu;
1606                         }
1607                 }
1608         }
1609         rcu_read_unlock();
1610
1611         /*
1612          * And finally, if there were no matches within the domains
1613          * just give the caller *something* to work with from the compatible
1614          * locations.
1615          */
1616         if (this_cpu != -1)
1617                 return this_cpu;
1618
1619         cpu = cpumask_any(lowest_mask);
1620         if (cpu < nr_cpu_ids)
1621                 return cpu;
1622         return -1;
1623 }
1624
1625 /* Will lock the rq it finds */
1626 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1627 {
1628         struct rq *lowest_rq = NULL;
1629         int tries;
1630         int cpu;
1631
1632         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1633                 cpu = find_lowest_rq(task);
1634
1635                 if ((cpu == -1) || (cpu == rq->cpu))
1636                         break;
1637
1638                 lowest_rq = cpu_rq(cpu);
1639
1640                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1641                         /*
1642                          * Target rq has tasks of equal or higher priority,
1643                          * retrying does not release any lock and is unlikely
1644                          * to yield a different result.
1645                          */
1646                         lowest_rq = NULL;
1647                         break;
1648                 }
1649
1650                 /* if the prio of this runqueue changed, try again */
1651                 if (double_lock_balance(rq, lowest_rq)) {
1652                         /*
1653                          * We had to unlock the run queue. In
1654                          * the mean time, task could have
1655                          * migrated already or had its affinity changed.
1656                          * Also make sure that it wasn't scheduled on its rq.
1657                          */
1658                         if (unlikely(task_rq(task) != rq ||
1659                                      !cpumask_test_cpu(lowest_rq->cpu,
1660                                                        tsk_cpus_allowed(task)) ||
1661                                      task_running(rq, task) ||
1662                                      !task_on_rq_queued(task))) {
1663
1664                                 double_unlock_balance(rq, lowest_rq);
1665                                 lowest_rq = NULL;
1666                                 break;
1667                         }
1668                 }
1669
1670                 /* If this rq is still suitable use it. */
1671                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1672                         break;
1673
1674                 /* try again */
1675                 double_unlock_balance(rq, lowest_rq);
1676                 lowest_rq = NULL;
1677         }
1678
1679         return lowest_rq;
1680 }
1681
1682 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1683 {
1684         struct task_struct *p;
1685
1686         if (!has_pushable_tasks(rq))
1687                 return NULL;
1688
1689         p = plist_first_entry(&rq->rt.pushable_tasks,
1690                               struct task_struct, pushable_tasks);
1691
1692         BUG_ON(rq->cpu != task_cpu(p));
1693         BUG_ON(task_current(rq, p));
1694         BUG_ON(p->nr_cpus_allowed <= 1);
1695
1696         BUG_ON(!task_on_rq_queued(p));
1697         BUG_ON(!rt_task(p));
1698
1699         return p;
1700 }
1701
1702 /*
1703  * If the current CPU has more than one RT task, see if the non
1704  * running task can migrate over to a CPU that is running a task
1705  * of lesser priority.
1706  */
1707 static int push_rt_task(struct rq *rq)
1708 {
1709         struct task_struct *next_task;
1710         struct rq *lowest_rq;
1711         int ret = 0;
1712
1713         if (!rq->rt.overloaded)
1714                 return 0;
1715
1716         next_task = pick_next_pushable_task(rq);
1717         if (!next_task)
1718                 return 0;
1719
1720 retry:
1721         if (unlikely(next_task == rq->curr)) {
1722                 WARN_ON(1);
1723                 return 0;
1724         }
1725
1726         /*
1727          * It's possible that the next_task slipped in of
1728          * higher priority than current. If that's the case
1729          * just reschedule current.
1730          */
1731         if (unlikely(next_task->prio < rq->curr->prio)) {
1732                 resched_curr(rq);
1733                 return 0;
1734         }
1735
1736         /* We might release rq lock */
1737         get_task_struct(next_task);
1738
1739         /* find_lock_lowest_rq locks the rq if found */
1740         lowest_rq = find_lock_lowest_rq(next_task, rq);
1741         if (!lowest_rq) {
1742                 struct task_struct *task;
1743                 /*
1744                  * find_lock_lowest_rq releases rq->lock
1745                  * so it is possible that next_task has migrated.
1746                  *
1747                  * We need to make sure that the task is still on the same
1748                  * run-queue and is also still the next task eligible for
1749                  * pushing.
1750                  */
1751                 task = pick_next_pushable_task(rq);
1752                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1753                         /*
1754                          * The task hasn't migrated, and is still the next
1755                          * eligible task, but we failed to find a run-queue
1756                          * to push it to.  Do not retry in this case, since
1757                          * other cpus will pull from us when ready.
1758                          */
1759                         goto out;
1760                 }
1761
1762                 if (!task)
1763                         /* No more tasks, just exit */
1764                         goto out;
1765
1766                 /*
1767                  * Something has shifted, try again.
1768                  */
1769                 put_task_struct(next_task);
1770                 next_task = task;
1771                 goto retry;
1772         }
1773
1774         deactivate_task(rq, next_task, 0);
1775         set_task_cpu(next_task, lowest_rq->cpu);
1776         activate_task(lowest_rq, next_task, 0);
1777         ret = 1;
1778
1779         resched_curr(lowest_rq);
1780
1781         double_unlock_balance(rq, lowest_rq);
1782
1783 out:
1784         put_task_struct(next_task);
1785
1786         return ret;
1787 }
1788
1789 static void push_rt_tasks(struct rq *rq)
1790 {
1791         /* push_rt_task will return true if it moved an RT */
1792         while (push_rt_task(rq))
1793                 ;
1794 }
1795
1796 #ifdef HAVE_RT_PUSH_IPI
1797
1798 /*
1799  * When a high priority task schedules out from a CPU and a lower priority
1800  * task is scheduled in, a check is made to see if there's any RT tasks
1801  * on other CPUs that are waiting to run because a higher priority RT task
1802  * is currently running on its CPU. In this case, the CPU with multiple RT
1803  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1804  * up that may be able to run one of its non-running queued RT tasks.
1805  *
1806  * All CPUs with overloaded RT tasks need to be notified as there is currently
1807  * no way to know which of these CPUs have the highest priority task waiting
1808  * to run. Instead of trying to take a spinlock on each of these CPUs,
1809  * which has shown to cause large latency when done on machines with many
1810  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1811  * RT tasks waiting to run.
1812  *
1813  * Just sending an IPI to each of the CPUs is also an issue, as on large
1814  * count CPU machines, this can cause an IPI storm on a CPU, especially
1815  * if its the only CPU with multiple RT tasks queued, and a large number
1816  * of CPUs scheduling a lower priority task at the same time.
1817  *
1818  * Each root domain has its own irq work function that can iterate over
1819  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1820  * tassk must be checked if there's one or many CPUs that are lowering
1821  * their priority, there's a single irq work iterator that will try to
1822  * push off RT tasks that are waiting to run.
1823  *
1824  * When a CPU schedules a lower priority task, it will kick off the
1825  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1826  * As it only takes the first CPU that schedules a lower priority task
1827  * to start the process, the rto_start variable is incremented and if
1828  * the atomic result is one, then that CPU will try to take the rto_lock.
1829  * This prevents high contention on the lock as the process handles all
1830  * CPUs scheduling lower priority tasks.
1831  *
1832  * All CPUs that are scheduling a lower priority task will increment the
1833  * rt_loop_next variable. This will make sure that the irq work iterator
1834  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1835  * priority task, even if the iterator is in the middle of a scan. Incrementing
1836  * the rt_loop_next will cause the iterator to perform another scan.
1837  *
1838  */
1839 static int rto_next_cpu(struct root_domain *rd)
1840 {
1841         int next;
1842         int cpu;
1843
1844         /*
1845          * When starting the IPI RT pushing, the rto_cpu is set to -1,
1846          * rt_next_cpu() will simply return the first CPU found in
1847          * the rto_mask.
1848          *
1849          * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
1850          * will return the next CPU found in the rto_mask.
1851          *
1852          * If there are no more CPUs left in the rto_mask, then a check is made
1853          * against rto_loop and rto_loop_next. rto_loop is only updated with
1854          * the rto_lock held, but any CPU may increment the rto_loop_next
1855          * without any locking.
1856          */
1857         for (;;) {
1858
1859                 /* When rto_cpu is -1 this acts like cpumask_first() */
1860                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1861
1862                 rd->rto_cpu = cpu;
1863
1864                 if (cpu < nr_cpu_ids)
1865                         return cpu;
1866
1867                 rd->rto_cpu = -1;
1868
1869                 /*
1870                  * ACQUIRE ensures we see the @rto_mask changes
1871                  * made prior to the @next value observed.
1872                  *
1873                  * Matches WMB in rt_set_overload().
1874                  */
1875                 next = atomic_read_acquire(&rd->rto_loop_next);
1876
1877                 if (rd->rto_loop == next)
1878                         break;
1879
1880                 rd->rto_loop = next;
1881         }
1882
1883         return -1;
1884 }
1885
1886 static inline bool rto_start_trylock(atomic_t *v)
1887 {
1888         return !atomic_cmpxchg_acquire(v, 0, 1);
1889 }
1890
1891 static inline void rto_start_unlock(atomic_t *v)
1892 {
1893         atomic_set_release(v, 0);
1894 }
1895
1896 static void tell_cpu_to_push(struct rq *rq)
1897 {
1898         int cpu = -1;
1899
1900         /* Keep the loop going if the IPI is currently active */
1901         atomic_inc(&rq->rd->rto_loop_next);
1902
1903         /* Only one CPU can initiate a loop at a time */
1904         if (!rto_start_trylock(&rq->rd->rto_loop_start))
1905                 return;
1906
1907         raw_spin_lock(&rq->rd->rto_lock);
1908
1909         /*
1910          * The rto_cpu is updated under the lock, if it has a valid cpu
1911          * then the IPI is still running and will continue due to the
1912          * update to loop_next, and nothing needs to be done here.
1913          * Otherwise it is finishing up and an ipi needs to be sent.
1914          */
1915         if (rq->rd->rto_cpu < 0)
1916                 cpu = rto_next_cpu(rq->rd);
1917
1918         raw_spin_unlock(&rq->rd->rto_lock);
1919
1920         rto_start_unlock(&rq->rd->rto_loop_start);
1921
1922         if (cpu >= 0) {
1923                 /* Make sure the rd does not get freed while pushing */
1924                 sched_get_rd(rq->rd);
1925                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1926         }
1927 }
1928
1929 /* Called from hardirq context */
1930 void rto_push_irq_work_func(struct irq_work *work)
1931 {
1932         struct root_domain *rd =
1933                 container_of(work, struct root_domain, rto_push_work);
1934         struct rq *rq;
1935         int cpu;
1936
1937         rq = this_rq();
1938
1939         /*
1940          * We do not need to grab the lock to check for has_pushable_tasks.
1941          * When it gets updated, a check is made if a push is possible.
1942          */
1943         if (has_pushable_tasks(rq)) {
1944                 raw_spin_lock(&rq->lock);
1945                 push_rt_tasks(rq);
1946                 raw_spin_unlock(&rq->lock);
1947         }
1948
1949         raw_spin_lock(&rd->rto_lock);
1950
1951         /* Pass the IPI to the next rt overloaded queue */
1952         cpu = rto_next_cpu(rd);
1953
1954         raw_spin_unlock(&rd->rto_lock);
1955
1956         if (cpu < 0) {
1957                 sched_put_rd(rd);
1958                 return;
1959         }
1960
1961         /* Try the next RT overloaded CPU */
1962         irq_work_queue_on(&rd->rto_push_work, cpu);
1963 }
1964 #endif /* HAVE_RT_PUSH_IPI */
1965
1966 static void pull_rt_task(struct rq *this_rq)
1967 {
1968         int this_cpu = this_rq->cpu, cpu;
1969         bool resched = false;
1970         struct task_struct *p;
1971         struct rq *src_rq;
1972         int rt_overload_count = rt_overloaded(this_rq);
1973
1974         if (likely(!rt_overload_count))
1975                 return;
1976
1977         /*
1978          * Match the barrier from rt_set_overloaded; this guarantees that if we
1979          * see overloaded we must also see the rto_mask bit.
1980          */
1981         smp_rmb();
1982
1983         /* If we are the only overloaded CPU do nothing */
1984         if (rt_overload_count == 1 &&
1985             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
1986                 return;
1987
1988 #ifdef HAVE_RT_PUSH_IPI
1989         if (sched_feat(RT_PUSH_IPI)) {
1990                 tell_cpu_to_push(this_rq);
1991                 return;
1992         }
1993 #endif
1994
1995         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1996                 if (this_cpu == cpu)
1997                         continue;
1998
1999                 src_rq = cpu_rq(cpu);
2000
2001                 /*
2002                  * Don't bother taking the src_rq->lock if the next highest
2003                  * task is known to be lower-priority than our current task.
2004                  * This may look racy, but if this value is about to go
2005                  * logically higher, the src_rq will push this task away.
2006                  * And if its going logically lower, we do not care
2007                  */
2008                 if (src_rq->rt.highest_prio.next >=
2009                     this_rq->rt.highest_prio.curr)
2010                         continue;
2011
2012                 /*
2013                  * We can potentially drop this_rq's lock in
2014                  * double_lock_balance, and another CPU could
2015                  * alter this_rq
2016                  */
2017                 double_lock_balance(this_rq, src_rq);
2018
2019                 /*
2020                  * We can pull only a task, which is pushable
2021                  * on its rq, and no others.
2022                  */
2023                 p = pick_highest_pushable_task(src_rq, this_cpu);
2024
2025                 /*
2026                  * Do we have an RT task that preempts
2027                  * the to-be-scheduled task?
2028                  */
2029                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2030                         WARN_ON(p == src_rq->curr);
2031                         WARN_ON(!task_on_rq_queued(p));
2032
2033                         /*
2034                          * There's a chance that p is higher in priority
2035                          * than what's currently running on its cpu.
2036                          * This is just that p is wakeing up and hasn't
2037                          * had a chance to schedule. We only pull
2038                          * p if it is lower in priority than the
2039                          * current task on the run queue
2040                          */
2041                         if (p->prio < src_rq->curr->prio)
2042                                 goto skip;
2043
2044                         resched = true;
2045
2046                         deactivate_task(src_rq, p, 0);
2047                         set_task_cpu(p, this_cpu);
2048                         activate_task(this_rq, p, 0);
2049                         /*
2050                          * We continue with the search, just in
2051                          * case there's an even higher prio task
2052                          * in another runqueue. (low likelihood
2053                          * but possible)
2054                          */
2055                 }
2056 skip:
2057                 double_unlock_balance(this_rq, src_rq);
2058         }
2059
2060         if (resched)
2061                 resched_curr(this_rq);
2062 }
2063
2064 /*
2065  * If we are not running and we are not going to reschedule soon, we should
2066  * try to push tasks away now
2067  */
2068 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2069 {
2070         if (!task_running(rq, p) &&
2071             !test_tsk_need_resched(rq->curr) &&
2072             p->nr_cpus_allowed > 1 &&
2073             (dl_task(rq->curr) || rt_task(rq->curr)) &&
2074             (rq->curr->nr_cpus_allowed < 2 ||
2075              rq->curr->prio <= p->prio))
2076                 push_rt_tasks(rq);
2077 }
2078
2079 /* Assumes rq->lock is held */
2080 static void rq_online_rt(struct rq *rq)
2081 {
2082         if (rq->rt.overloaded)
2083                 rt_set_overload(rq);
2084
2085         __enable_runtime(rq);
2086
2087         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2088 }
2089
2090 /* Assumes rq->lock is held */
2091 static void rq_offline_rt(struct rq *rq)
2092 {
2093         if (rq->rt.overloaded)
2094                 rt_clear_overload(rq);
2095
2096         __disable_runtime(rq);
2097
2098         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2099 }
2100
2101 /*
2102  * When switch from the rt queue, we bring ourselves to a position
2103  * that we might want to pull RT tasks from other runqueues.
2104  */
2105 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2106 {
2107         /*
2108          * If there are other RT tasks then we will reschedule
2109          * and the scheduling of the other RT tasks will handle
2110          * the balancing. But if we are the last RT task
2111          * we may need to handle the pulling of RT tasks
2112          * now.
2113          */
2114         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2115                 return;
2116
2117         queue_pull_task(rq);
2118 }
2119
2120 void __init init_sched_rt_class(void)
2121 {
2122         unsigned int i;
2123
2124         for_each_possible_cpu(i) {
2125                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2126                                         GFP_KERNEL, cpu_to_node(i));
2127         }
2128 }
2129 #endif /* CONFIG_SMP */
2130
2131 /*
2132  * When switching a task to RT, we may overload the runqueue
2133  * with RT tasks. In this case we try to push them off to
2134  * other runqueues.
2135  */
2136 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2137 {
2138         /*
2139          * If we are already running, then there's nothing
2140          * that needs to be done. But if we are not running
2141          * we may need to preempt the current running task.
2142          * If that current running task is also an RT task
2143          * then see if we can move to another run queue.
2144          */
2145         if (task_on_rq_queued(p) && rq->curr != p) {
2146 #ifdef CONFIG_SMP
2147                 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2148                         queue_push_tasks(rq);
2149 #endif /* CONFIG_SMP */
2150                 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2151                         resched_curr(rq);
2152         }
2153 }
2154
2155 /*
2156  * Priority of the task has changed. This may cause
2157  * us to initiate a push or pull.
2158  */
2159 static void
2160 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2161 {
2162         if (!task_on_rq_queued(p))
2163                 return;
2164
2165         if (rq->curr == p) {
2166 #ifdef CONFIG_SMP
2167                 /*
2168                  * If our priority decreases while running, we
2169                  * may need to pull tasks to this runqueue.
2170                  */
2171                 if (oldprio < p->prio)
2172                         queue_pull_task(rq);
2173
2174                 /*
2175                  * If there's a higher priority task waiting to run
2176                  * then reschedule.
2177                  */
2178                 if (p->prio > rq->rt.highest_prio.curr)
2179                         resched_curr(rq);
2180 #else
2181                 /* For UP simply resched on drop of prio */
2182                 if (oldprio < p->prio)
2183                         resched_curr(rq);
2184 #endif /* CONFIG_SMP */
2185         } else {
2186                 /*
2187                  * This task is not running, but if it is
2188                  * greater than the current running task
2189                  * then reschedule.
2190                  */
2191                 if (p->prio < rq->curr->prio)
2192                         resched_curr(rq);
2193         }
2194 }
2195
2196 static void watchdog(struct rq *rq, struct task_struct *p)
2197 {
2198         unsigned long soft, hard;
2199
2200         /* max may change after cur was read, this will be fixed next tick */
2201         soft = task_rlimit(p, RLIMIT_RTTIME);
2202         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2203
2204         if (soft != RLIM_INFINITY) {
2205                 unsigned long next;
2206
2207                 if (p->rt.watchdog_stamp != jiffies) {
2208                         p->rt.timeout++;
2209                         p->rt.watchdog_stamp = jiffies;
2210                 }
2211
2212                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2213                 if (p->rt.timeout > next)
2214                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2215         }
2216 }
2217
2218 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2219 {
2220         struct sched_rt_entity *rt_se = &p->rt;
2221
2222         update_curr_rt(rq);
2223
2224         watchdog(rq, p);
2225
2226         /*
2227          * RR tasks need a special form of timeslice management.
2228          * FIFO tasks have no timeslices.
2229          */
2230         if (p->policy != SCHED_RR)
2231                 return;
2232
2233         if (--p->rt.time_slice)
2234                 return;
2235
2236         p->rt.time_slice = sched_rr_timeslice;
2237
2238         /*
2239          * Requeue to the end of queue if we (and all of our ancestors) are not
2240          * the only element on the queue
2241          */
2242         for_each_sched_rt_entity(rt_se) {
2243                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2244                         requeue_task_rt(rq, p, 0);
2245                         resched_curr(rq);
2246                         return;
2247                 }
2248         }
2249 }
2250
2251 static void set_curr_task_rt(struct rq *rq)
2252 {
2253         struct task_struct *p = rq->curr;
2254
2255         p->se.exec_start = rq_clock_task(rq);
2256
2257         /* The running task is never eligible for pushing */
2258         dequeue_pushable_task(rq, p);
2259 }
2260
2261 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2262 {
2263         /*
2264          * Time slice is 0 for SCHED_FIFO tasks
2265          */
2266         if (task->policy == SCHED_RR)
2267                 return sched_rr_timeslice;
2268         else
2269                 return 0;
2270 }
2271
2272 const struct sched_class rt_sched_class = {
2273         .next                   = &fair_sched_class,
2274         .enqueue_task           = enqueue_task_rt,
2275         .dequeue_task           = dequeue_task_rt,
2276         .yield_task             = yield_task_rt,
2277
2278         .check_preempt_curr     = check_preempt_curr_rt,
2279
2280         .pick_next_task         = pick_next_task_rt,
2281         .put_prev_task          = put_prev_task_rt,
2282
2283 #ifdef CONFIG_SMP
2284         .select_task_rq         = select_task_rq_rt,
2285
2286         .set_cpus_allowed       = set_cpus_allowed_common,
2287         .rq_online              = rq_online_rt,
2288         .rq_offline             = rq_offline_rt,
2289         .task_woken             = task_woken_rt,
2290         .switched_from          = switched_from_rt,
2291 #endif
2292
2293         .set_curr_task          = set_curr_task_rt,
2294         .task_tick              = task_tick_rt,
2295
2296         .get_rr_interval        = get_rr_interval_rt,
2297
2298         .prio_changed           = prio_changed_rt,
2299         .switched_to            = switched_to_rt,
2300
2301         .update_curr            = update_curr_rt,
2302 };
2303
2304 #ifdef CONFIG_SCHED_DEBUG
2305 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2306
2307 void print_rt_stats(struct seq_file *m, int cpu)
2308 {
2309         rt_rq_iter_t iter;
2310         struct rt_rq *rt_rq;
2311
2312         rcu_read_lock();
2313         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2314                 print_rt_rq(m, cpu, rt_rq);
2315         rcu_read_unlock();
2316 }
2317 #endif /* CONFIG_SCHED_DEBUG */