1 // SPDX-License-Identifier: GPL-2.0
3 * Deadline Scheduling Class (SCHED_DEADLINE)
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
21 struct dl_bandwidth def_dl_bandwidth;
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
25 return container_of(dl_se, struct task_struct, dl);
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
30 return container_of(dl_rq, struct rq, dl);
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
46 #ifdef CONFIG_RT_MUTEXES
47 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
52 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
54 return pi_of(dl_se) != dl_se;
57 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
62 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
69 static inline struct dl_bw *dl_bw_of(int i)
71 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
72 "sched RCU must be held");
73 return &cpu_rq(i)->rd->dl_bw;
76 static inline int dl_bw_cpus(int i)
78 struct root_domain *rd = cpu_rq(i)->rd;
81 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
82 "sched RCU must be held");
83 for_each_cpu_and(i, rd->span, cpu_active_mask)
89 static inline struct dl_bw *dl_bw_of(int i)
91 return &cpu_rq(i)->dl.dl_bw;
94 static inline int dl_bw_cpus(int i)
101 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
103 u64 old = dl_rq->running_bw;
105 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
106 dl_rq->running_bw += dl_bw;
107 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
108 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
109 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
110 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
114 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
116 u64 old = dl_rq->running_bw;
118 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
119 dl_rq->running_bw -= dl_bw;
120 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
121 if (dl_rq->running_bw > old)
122 dl_rq->running_bw = 0;
123 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
124 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
128 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
130 u64 old = dl_rq->this_bw;
132 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
133 dl_rq->this_bw += dl_bw;
134 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
138 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
140 u64 old = dl_rq->this_bw;
142 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
143 dl_rq->this_bw -= dl_bw;
144 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
145 if (dl_rq->this_bw > old)
147 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
151 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
153 if (!dl_entity_is_special(dl_se))
154 __add_rq_bw(dl_se->dl_bw, dl_rq);
158 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
160 if (!dl_entity_is_special(dl_se))
161 __sub_rq_bw(dl_se->dl_bw, dl_rq);
165 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
167 if (!dl_entity_is_special(dl_se))
168 __add_running_bw(dl_se->dl_bw, dl_rq);
172 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
174 if (!dl_entity_is_special(dl_se))
175 __sub_running_bw(dl_se->dl_bw, dl_rq);
178 void dl_change_utilization(struct task_struct *p, u64 new_bw)
182 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
184 if (task_on_rq_queued(p))
188 if (p->dl.dl_non_contending) {
189 sub_running_bw(&p->dl, &rq->dl);
190 p->dl.dl_non_contending = 0;
192 * If the timer handler is currently running and the
193 * timer cannot be cancelled, inactive_task_timer()
194 * will see that dl_not_contending is not set, and
195 * will not touch the rq's active utilization,
196 * so we are still safe.
198 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
201 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
202 __add_rq_bw(new_bw, &rq->dl);
206 * The utilization of a task cannot be immediately removed from
207 * the rq active utilization (running_bw) when the task blocks.
208 * Instead, we have to wait for the so called "0-lag time".
210 * If a task blocks before the "0-lag time", a timer (the inactive
211 * timer) is armed, and running_bw is decreased when the timer
214 * If the task wakes up again before the inactive timer fires,
215 * the timer is cancelled, whereas if the task wakes up after the
216 * inactive timer fired (and running_bw has been decreased) the
217 * task's utilization has to be added to running_bw again.
218 * A flag in the deadline scheduling entity (dl_non_contending)
219 * is used to avoid race conditions between the inactive timer handler
222 * The following diagram shows how running_bw is updated. A task is
223 * "ACTIVE" when its utilization contributes to running_bw; an
224 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
225 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
226 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
227 * time already passed, which does not contribute to running_bw anymore.
228 * +------------------+
230 * +------------------>+ contending |
231 * | add_running_bw | |
232 * | +----+------+------+
235 * +--------+-------+ | |
236 * | | t >= 0-lag | | wakeup
237 * | INACTIVE |<---------------+ |
238 * | | sub_running_bw | |
239 * +--------+-------+ | |
244 * | +----+------+------+
245 * | sub_running_bw | ACTIVE |
246 * +-------------------+ |
247 * inactive timer | non contending |
248 * fired +------------------+
250 * The task_non_contending() function is invoked when a task
251 * blocks, and checks if the 0-lag time already passed or
252 * not (in the first case, it directly updates running_bw;
253 * in the second case, it arms the inactive timer).
255 * The task_contending() function is invoked when a task wakes
256 * up, and checks if the task is still in the "ACTIVE non contending"
257 * state or not (in the second case, it updates running_bw).
259 static void task_non_contending(struct task_struct *p)
261 struct sched_dl_entity *dl_se = &p->dl;
262 struct hrtimer *timer = &dl_se->inactive_timer;
263 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
264 struct rq *rq = rq_of_dl_rq(dl_rq);
268 * If this is a non-deadline task that has been boosted,
271 if (dl_se->dl_runtime == 0)
274 if (dl_entity_is_special(dl_se))
277 WARN_ON(dl_se->dl_non_contending);
279 zerolag_time = dl_se->deadline -
280 div64_long((dl_se->runtime * dl_se->dl_period),
284 * Using relative times instead of the absolute "0-lag time"
285 * allows to simplify the code
287 zerolag_time -= rq_clock(rq);
290 * If the "0-lag time" already passed, decrease the active
291 * utilization now, instead of starting a timer
293 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
295 sub_running_bw(dl_se, dl_rq);
296 if (!dl_task(p) || p->state == TASK_DEAD) {
297 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
299 if (p->state == TASK_DEAD)
300 sub_rq_bw(&p->dl, &rq->dl);
301 raw_spin_lock(&dl_b->lock);
302 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
303 __dl_clear_params(p);
304 raw_spin_unlock(&dl_b->lock);
310 dl_se->dl_non_contending = 1;
312 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
315 static void task_contending(struct sched_dl_entity *dl_se, int flags)
317 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
320 * If this is a non-deadline task that has been boosted,
323 if (dl_se->dl_runtime == 0)
326 if (flags & ENQUEUE_MIGRATED)
327 add_rq_bw(dl_se, dl_rq);
329 if (dl_se->dl_non_contending) {
330 dl_se->dl_non_contending = 0;
332 * If the timer handler is currently running and the
333 * timer cannot be cancelled, inactive_task_timer()
334 * will see that dl_not_contending is not set, and
335 * will not touch the rq's active utilization,
336 * so we are still safe.
338 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
339 put_task_struct(dl_task_of(dl_se));
342 * Since "dl_non_contending" is not set, the
343 * task's utilization has already been removed from
344 * active utilization (either when the task blocked,
345 * when the "inactive timer" fired).
348 add_running_bw(dl_se, dl_rq);
352 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
354 struct sched_dl_entity *dl_se = &p->dl;
356 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
359 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
361 raw_spin_lock_init(&dl_b->dl_runtime_lock);
362 dl_b->dl_period = period;
363 dl_b->dl_runtime = runtime;
366 void init_dl_bw(struct dl_bw *dl_b)
368 raw_spin_lock_init(&dl_b->lock);
369 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
370 if (global_rt_runtime() == RUNTIME_INF)
373 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
374 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
378 void init_dl_rq(struct dl_rq *dl_rq)
380 dl_rq->root = RB_ROOT_CACHED;
383 /* zero means no -deadline tasks */
384 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
386 dl_rq->dl_nr_migratory = 0;
387 dl_rq->overloaded = 0;
388 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
390 init_dl_bw(&dl_rq->dl_bw);
393 dl_rq->running_bw = 0;
395 init_dl_rq_bw_ratio(dl_rq);
400 static inline int dl_overloaded(struct rq *rq)
402 return atomic_read(&rq->rd->dlo_count);
405 static inline void dl_set_overload(struct rq *rq)
410 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
412 * Must be visible before the overload count is
413 * set (as in sched_rt.c).
415 * Matched by the barrier in pull_dl_task().
418 atomic_inc(&rq->rd->dlo_count);
421 static inline void dl_clear_overload(struct rq *rq)
426 atomic_dec(&rq->rd->dlo_count);
427 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
430 static void update_dl_migration(struct dl_rq *dl_rq)
432 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
433 if (!dl_rq->overloaded) {
434 dl_set_overload(rq_of_dl_rq(dl_rq));
435 dl_rq->overloaded = 1;
437 } else if (dl_rq->overloaded) {
438 dl_clear_overload(rq_of_dl_rq(dl_rq));
439 dl_rq->overloaded = 0;
443 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
445 struct task_struct *p = dl_task_of(dl_se);
447 if (p->nr_cpus_allowed > 1)
448 dl_rq->dl_nr_migratory++;
450 update_dl_migration(dl_rq);
453 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
455 struct task_struct *p = dl_task_of(dl_se);
457 if (p->nr_cpus_allowed > 1)
458 dl_rq->dl_nr_migratory--;
460 update_dl_migration(dl_rq);
464 * The list of pushable -deadline task is not a plist, like in
465 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
467 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
469 struct dl_rq *dl_rq = &rq->dl;
470 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
471 struct rb_node *parent = NULL;
472 struct task_struct *entry;
473 bool leftmost = true;
475 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
479 entry = rb_entry(parent, struct task_struct,
481 if (dl_entity_preempt(&p->dl, &entry->dl))
482 link = &parent->rb_left;
484 link = &parent->rb_right;
490 dl_rq->earliest_dl.next = p->dl.deadline;
492 rb_link_node(&p->pushable_dl_tasks, parent, link);
493 rb_insert_color_cached(&p->pushable_dl_tasks,
494 &dl_rq->pushable_dl_tasks_root, leftmost);
497 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
499 struct dl_rq *dl_rq = &rq->dl;
501 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
504 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
505 struct rb_node *next_node;
507 next_node = rb_next(&p->pushable_dl_tasks);
509 dl_rq->earliest_dl.next = rb_entry(next_node,
510 struct task_struct, pushable_dl_tasks)->dl.deadline;
514 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
515 RB_CLEAR_NODE(&p->pushable_dl_tasks);
518 static inline int has_pushable_dl_tasks(struct rq *rq)
520 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
523 static int push_dl_task(struct rq *rq);
525 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
527 return dl_task(prev);
530 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
531 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
533 static void push_dl_tasks(struct rq *);
534 static void pull_dl_task(struct rq *);
536 static inline void deadline_queue_push_tasks(struct rq *rq)
538 if (!has_pushable_dl_tasks(rq))
541 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
544 static inline void deadline_queue_pull_task(struct rq *rq)
546 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
549 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
551 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
553 struct rq *later_rq = NULL;
556 later_rq = find_lock_later_rq(p, rq);
561 * If we cannot preempt any rq, fall back to pick any
564 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
565 if (cpu >= nr_cpu_ids) {
567 * Failed to find any suitable CPU.
568 * The task will never come back!
570 BUG_ON(dl_bandwidth_enabled());
573 * If admission control is disabled we
574 * try a little harder to let the task
577 cpu = cpumask_any(cpu_active_mask);
579 later_rq = cpu_rq(cpu);
580 double_lock_balance(rq, later_rq);
583 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
585 * Inactive timer is armed (or callback is running, but
586 * waiting for us to release rq locks). In any case, when it
587 * will fire (or continue), it will see running_bw of this
588 * task migrated to later_rq (and correctly handle it).
590 sub_running_bw(&p->dl, &rq->dl);
591 sub_rq_bw(&p->dl, &rq->dl);
593 add_rq_bw(&p->dl, &later_rq->dl);
594 add_running_bw(&p->dl, &later_rq->dl);
596 sub_rq_bw(&p->dl, &rq->dl);
597 add_rq_bw(&p->dl, &later_rq->dl);
601 * And we finally need to fixup root_domain(s) bandwidth accounting,
602 * since p is still hanging out in the old (now moved to default) root
605 dl_b = &rq->rd->dl_bw;
606 raw_spin_lock(&dl_b->lock);
607 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
608 raw_spin_unlock(&dl_b->lock);
610 dl_b = &later_rq->rd->dl_bw;
611 raw_spin_lock(&dl_b->lock);
612 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
613 raw_spin_unlock(&dl_b->lock);
615 set_task_cpu(p, later_rq->cpu);
616 double_unlock_balance(later_rq, rq);
624 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
629 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
634 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
639 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
643 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
648 static inline void pull_dl_task(struct rq *rq)
652 static inline void deadline_queue_push_tasks(struct rq *rq)
656 static inline void deadline_queue_pull_task(struct rq *rq)
659 #endif /* CONFIG_SMP */
661 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
662 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
663 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
666 * We are being explicitly informed that a new instance is starting,
667 * and this means that:
668 * - the absolute deadline of the entity has to be placed at
669 * current time + relative deadline;
670 * - the runtime of the entity has to be set to the maximum value.
672 * The capability of specifying such event is useful whenever a -deadline
673 * entity wants to (try to!) synchronize its behaviour with the scheduler's
674 * one, and to (try to!) reconcile itself with its own scheduling
677 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
679 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
680 struct rq *rq = rq_of_dl_rq(dl_rq);
682 WARN_ON(is_dl_boosted(dl_se));
683 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
686 * We are racing with the deadline timer. So, do nothing because
687 * the deadline timer handler will take care of properly recharging
688 * the runtime and postponing the deadline
690 if (dl_se->dl_throttled)
694 * We use the regular wall clock time to set deadlines in the
695 * future; in fact, we must consider execution overheads (time
696 * spent on hardirq context, etc.).
698 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
699 dl_se->runtime = dl_se->dl_runtime;
703 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
704 * possibility of a entity lasting more than what it declared, and thus
705 * exhausting its runtime.
707 * Here we are interested in making runtime overrun possible, but we do
708 * not want a entity which is misbehaving to affect the scheduling of all
710 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
711 * is used, in order to confine each entity within its own bandwidth.
713 * This function deals exactly with that, and ensures that when the runtime
714 * of a entity is replenished, its deadline is also postponed. That ensures
715 * the overrunning entity can't interfere with other entity in the system and
716 * can't make them miss their deadlines. Reasons why this kind of overruns
717 * could happen are, typically, a entity voluntarily trying to overcome its
718 * runtime, or it just underestimated it during sched_setattr().
720 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
722 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
723 struct rq *rq = rq_of_dl_rq(dl_rq);
725 BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
728 * This could be the case for a !-dl task that is boosted.
729 * Just go with full inherited parameters.
731 if (dl_se->dl_deadline == 0) {
732 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
733 dl_se->runtime = pi_of(dl_se)->dl_runtime;
736 if (dl_se->dl_yielded && dl_se->runtime > 0)
740 * We keep moving the deadline away until we get some
741 * available runtime for the entity. This ensures correct
742 * handling of situations where the runtime overrun is
745 while (dl_se->runtime <= 0) {
746 dl_se->deadline += pi_of(dl_se)->dl_period;
747 dl_se->runtime += pi_of(dl_se)->dl_runtime;
751 * At this point, the deadline really should be "in
752 * the future" with respect to rq->clock. If it's
753 * not, we are, for some reason, lagging too much!
754 * Anyway, after having warn userspace abut that,
755 * we still try to keep the things running by
756 * resetting the deadline and the budget of the
759 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
760 printk_deferred_once("sched: DL replenish lagged too much\n");
761 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
762 dl_se->runtime = pi_of(dl_se)->dl_runtime;
765 if (dl_se->dl_yielded)
766 dl_se->dl_yielded = 0;
767 if (dl_se->dl_throttled)
768 dl_se->dl_throttled = 0;
772 * Here we check if --at time t-- an entity (which is probably being
773 * [re]activated or, in general, enqueued) can use its remaining runtime
774 * and its current deadline _without_ exceeding the bandwidth it is
775 * assigned (function returns true if it can't). We are in fact applying
776 * one of the CBS rules: when a task wakes up, if the residual runtime
777 * over residual deadline fits within the allocated bandwidth, then we
778 * can keep the current (absolute) deadline and residual budget without
779 * disrupting the schedulability of the system. Otherwise, we should
780 * refill the runtime and set the deadline a period in the future,
781 * because keeping the current (absolute) deadline of the task would
782 * result in breaking guarantees promised to other tasks (refer to
783 * Documentation/scheduler/sched-deadline.rst for more information).
785 * This function returns true if:
787 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
789 * IOW we can't recycle current parameters.
791 * Notice that the bandwidth check is done against the deadline. For
792 * task with deadline equal to period this is the same of using
793 * dl_period instead of dl_deadline in the equation above.
795 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
800 * left and right are the two sides of the equation above,
801 * after a bit of shuffling to use multiplications instead
804 * Note that none of the time values involved in the two
805 * multiplications are absolute: dl_deadline and dl_runtime
806 * are the relative deadline and the maximum runtime of each
807 * instance, runtime is the runtime left for the last instance
808 * and (deadline - t), since t is rq->clock, is the time left
809 * to the (absolute) deadline. Even if overflowing the u64 type
810 * is very unlikely to occur in both cases, here we scale down
811 * as we want to avoid that risk at all. Scaling down by 10
812 * means that we reduce granularity to 1us. We are fine with it,
813 * since this is only a true/false check and, anyway, thinking
814 * of anything below microseconds resolution is actually fiction
815 * (but still we want to give the user that illusion >;).
817 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
818 right = ((dl_se->deadline - t) >> DL_SCALE) *
819 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
821 return dl_time_before(right, left);
825 * Revised wakeup rule [1]: For self-suspending tasks, rather then
826 * re-initializing task's runtime and deadline, the revised wakeup
827 * rule adjusts the task's runtime to avoid the task to overrun its
830 * Reasoning: a task may overrun the density if:
831 * runtime / (deadline - t) > dl_runtime / dl_deadline
833 * Therefore, runtime can be adjusted to:
834 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
836 * In such way that runtime will be equal to the maximum density
837 * the task can use without breaking any rule.
839 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
840 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
843 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
845 u64 laxity = dl_se->deadline - rq_clock(rq);
848 * If the task has deadline < period, and the deadline is in the past,
849 * it should already be throttled before this check.
851 * See update_dl_entity() comments for further details.
853 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
855 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
859 * Regarding the deadline, a task with implicit deadline has a relative
860 * deadline == relative period. A task with constrained deadline has a
861 * relative deadline <= relative period.
863 * We support constrained deadline tasks. However, there are some restrictions
864 * applied only for tasks which do not have an implicit deadline. See
865 * update_dl_entity() to know more about such restrictions.
867 * The dl_is_implicit() returns true if the task has an implicit deadline.
869 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
871 return dl_se->dl_deadline == dl_se->dl_period;
875 * When a deadline entity is placed in the runqueue, its runtime and deadline
876 * might need to be updated. This is done by a CBS wake up rule. There are two
877 * different rules: 1) the original CBS; and 2) the Revisited CBS.
879 * When the task is starting a new period, the Original CBS is used. In this
880 * case, the runtime is replenished and a new absolute deadline is set.
882 * When a task is queued before the begin of the next period, using the
883 * remaining runtime and deadline could make the entity to overflow, see
884 * dl_entity_overflow() to find more about runtime overflow. When such case
885 * is detected, the runtime and deadline need to be updated.
887 * If the task has an implicit deadline, i.e., deadline == period, the Original
888 * CBS is applied. the runtime is replenished and a new absolute deadline is
889 * set, as in the previous cases.
891 * However, the Original CBS does not work properly for tasks with
892 * deadline < period, which are said to have a constrained deadline. By
893 * applying the Original CBS, a constrained deadline task would be able to run
894 * runtime/deadline in a period. With deadline < period, the task would
895 * overrun the runtime/period allowed bandwidth, breaking the admission test.
897 * In order to prevent this misbehave, the Revisited CBS is used for
898 * constrained deadline tasks when a runtime overflow is detected. In the
899 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
900 * the remaining runtime of the task is reduced to avoid runtime overflow.
901 * Please refer to the comments update_dl_revised_wakeup() function to find
902 * more about the Revised CBS rule.
904 static void update_dl_entity(struct sched_dl_entity *dl_se)
906 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
907 struct rq *rq = rq_of_dl_rq(dl_rq);
909 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
910 dl_entity_overflow(dl_se, rq_clock(rq))) {
912 if (unlikely(!dl_is_implicit(dl_se) &&
913 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
914 !is_dl_boosted(dl_se))) {
915 update_dl_revised_wakeup(dl_se, rq);
919 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
920 dl_se->runtime = pi_of(dl_se)->dl_runtime;
924 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
926 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
930 * If the entity depleted all its runtime, and if we want it to sleep
931 * while waiting for some new execution time to become available, we
932 * set the bandwidth replenishment timer to the replenishment instant
933 * and try to activate it.
935 * Notice that it is important for the caller to know if the timer
936 * actually started or not (i.e., the replenishment instant is in
937 * the future or in the past).
939 static int start_dl_timer(struct task_struct *p)
941 struct sched_dl_entity *dl_se = &p->dl;
942 struct hrtimer *timer = &dl_se->dl_timer;
943 struct rq *rq = task_rq(p);
947 lockdep_assert_held(&rq->lock);
950 * We want the timer to fire at the deadline, but considering
951 * that it is actually coming from rq->clock and not from
952 * hrtimer's time base reading.
954 act = ns_to_ktime(dl_next_period(dl_se));
955 now = hrtimer_cb_get_time(timer);
956 delta = ktime_to_ns(now) - rq_clock(rq);
957 act = ktime_add_ns(act, delta);
960 * If the expiry time already passed, e.g., because the value
961 * chosen as the deadline is too small, don't even try to
962 * start the timer in the past!
964 if (ktime_us_delta(act, now) < 0)
968 * !enqueued will guarantee another callback; even if one is already in
969 * progress. This ensures a balanced {get,put}_task_struct().
971 * The race against __run_timer() clearing the enqueued state is
972 * harmless because we're holding task_rq()->lock, therefore the timer
973 * expiring after we've done the check will wait on its task_rq_lock()
974 * and observe our state.
976 if (!hrtimer_is_queued(timer)) {
978 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
985 * This is the bandwidth enforcement timer callback. If here, we know
986 * a task is not on its dl_rq, since the fact that the timer was running
987 * means the task is throttled and needs a runtime replenishment.
989 * However, what we actually do depends on the fact the task is active,
990 * (it is on its rq) or has been removed from there by a call to
991 * dequeue_task_dl(). In the former case we must issue the runtime
992 * replenishment and add the task back to the dl_rq; in the latter, we just
993 * do nothing but clearing dl_throttled, so that runtime and deadline
994 * updating (and the queueing back to dl_rq) will be done by the
995 * next call to enqueue_task_dl().
997 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
999 struct sched_dl_entity *dl_se = container_of(timer,
1000 struct sched_dl_entity,
1002 struct task_struct *p = dl_task_of(dl_se);
1006 rq = task_rq_lock(p, &rf);
1009 * The task might have changed its scheduling policy to something
1010 * different than SCHED_DEADLINE (through switched_from_dl()).
1016 * The task might have been boosted by someone else and might be in the
1017 * boosting/deboosting path, its not throttled.
1019 if (is_dl_boosted(dl_se))
1023 * Spurious timer due to start_dl_timer() race; or we already received
1024 * a replenishment from rt_mutex_setprio().
1026 if (!dl_se->dl_throttled)
1030 update_rq_clock(rq);
1033 * If the throttle happened during sched-out; like:
1040 * __dequeue_task_dl()
1043 * We can be both throttled and !queued. Replenish the counter
1044 * but do not enqueue -- wait for our wakeup to do that.
1046 if (!task_on_rq_queued(p)) {
1047 replenish_dl_entity(dl_se);
1052 if (unlikely(!rq->online)) {
1054 * If the runqueue is no longer available, migrate the
1055 * task elsewhere. This necessarily changes rq.
1057 lockdep_unpin_lock(&rq->lock, rf.cookie);
1058 rq = dl_task_offline_migration(rq, p);
1059 rf.cookie = lockdep_pin_lock(&rq->lock);
1060 update_rq_clock(rq);
1063 * Now that the task has been migrated to the new RQ and we
1064 * have that locked, proceed as normal and enqueue the task
1070 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1071 if (dl_task(rq->curr))
1072 check_preempt_curr_dl(rq, p, 0);
1078 * Queueing this task back might have overloaded rq, check if we need
1079 * to kick someone away.
1081 if (has_pushable_dl_tasks(rq)) {
1083 * Nothing relies on rq->lock after this, so its safe to drop
1086 rq_unpin_lock(rq, &rf);
1088 rq_repin_lock(rq, &rf);
1093 task_rq_unlock(rq, p, &rf);
1096 * This can free the task_struct, including this hrtimer, do not touch
1097 * anything related to that after this.
1101 return HRTIMER_NORESTART;
1104 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1106 struct hrtimer *timer = &dl_se->dl_timer;
1108 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1109 timer->function = dl_task_timer;
1113 * During the activation, CBS checks if it can reuse the current task's
1114 * runtime and period. If the deadline of the task is in the past, CBS
1115 * cannot use the runtime, and so it replenishes the task. This rule
1116 * works fine for implicit deadline tasks (deadline == period), and the
1117 * CBS was designed for implicit deadline tasks. However, a task with
1118 * constrained deadline (deadine < period) might be awakened after the
1119 * deadline, but before the next period. In this case, replenishing the
1120 * task would allow it to run for runtime / deadline. As in this case
1121 * deadline < period, CBS enables a task to run for more than the
1122 * runtime / period. In a very loaded system, this can cause a domino
1123 * effect, making other tasks miss their deadlines.
1125 * To avoid this problem, in the activation of a constrained deadline
1126 * task after the deadline but before the next period, throttle the
1127 * task and set the replenishing timer to the begin of the next period,
1128 * unless it is boosted.
1130 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1132 struct task_struct *p = dl_task_of(dl_se);
1133 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1135 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1136 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1137 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1139 dl_se->dl_throttled = 1;
1140 if (dl_se->runtime > 0)
1146 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1148 return (dl_se->runtime <= 0);
1151 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1154 * This function implements the GRUB accounting rule:
1155 * according to the GRUB reclaiming algorithm, the runtime is
1156 * not decreased as "dq = -dt", but as
1157 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1158 * where u is the utilization of the task, Umax is the maximum reclaimable
1159 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1160 * as the difference between the "total runqueue utilization" and the
1161 * runqueue active utilization, and Uextra is the (per runqueue) extra
1162 * reclaimable utilization.
1163 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1164 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1166 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1167 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1168 * Since delta is a 64 bit variable, to have an overflow its value
1169 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1170 * So, overflow is not an issue here.
1172 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1174 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1176 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1179 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1180 * we compare u_inact + rq->dl.extra_bw with
1181 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1182 * u_inact + rq->dl.extra_bw can be larger than
1183 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1184 * leading to wrong results)
1186 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1189 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1191 return (delta * u_act) >> BW_SHIFT;
1195 * Update the current task's runtime statistics (provided it is still
1196 * a -deadline task and has not been removed from the dl_rq).
1198 static void update_curr_dl(struct rq *rq)
1200 struct task_struct *curr = rq->curr;
1201 struct sched_dl_entity *dl_se = &curr->dl;
1202 u64 delta_exec, scaled_delta_exec;
1203 int cpu = cpu_of(rq);
1206 if (!dl_task(curr) || !on_dl_rq(dl_se))
1210 * Consumed budget is computed considering the time as
1211 * observed by schedulable tasks (excluding time spent
1212 * in hardirq context, etc.). Deadlines are instead
1213 * computed using hard walltime. This seems to be the more
1214 * natural solution, but the full ramifications of this
1215 * approach need further study.
1217 now = rq_clock_task(rq);
1218 delta_exec = now - curr->se.exec_start;
1219 if (unlikely((s64)delta_exec <= 0)) {
1220 if (unlikely(dl_se->dl_yielded))
1225 schedstat_set(curr->se.statistics.exec_max,
1226 max(curr->se.statistics.exec_max, delta_exec));
1228 curr->se.sum_exec_runtime += delta_exec;
1229 account_group_exec_runtime(curr, delta_exec);
1231 curr->se.exec_start = now;
1232 cgroup_account_cputime(curr, delta_exec);
1234 if (dl_entity_is_special(dl_se))
1238 * For tasks that participate in GRUB, we implement GRUB-PA: the
1239 * spare reclaimed bandwidth is used to clock down frequency.
1241 * For the others, we still need to scale reservation parameters
1242 * according to current frequency and CPU maximum capacity.
1244 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1245 scaled_delta_exec = grub_reclaim(delta_exec,
1249 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1250 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1252 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1253 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1256 dl_se->runtime -= scaled_delta_exec;
1259 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1260 dl_se->dl_throttled = 1;
1262 /* If requested, inform the user about runtime overruns. */
1263 if (dl_runtime_exceeded(dl_se) &&
1264 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1265 dl_se->dl_overrun = 1;
1267 __dequeue_task_dl(rq, curr, 0);
1268 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1269 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1271 if (!is_leftmost(curr, &rq->dl))
1276 * Because -- for now -- we share the rt bandwidth, we need to
1277 * account our runtime there too, otherwise actual rt tasks
1278 * would be able to exceed the shared quota.
1280 * Account to the root rt group for now.
1282 * The solution we're working towards is having the RT groups scheduled
1283 * using deadline servers -- however there's a few nasties to figure
1284 * out before that can happen.
1286 if (rt_bandwidth_enabled()) {
1287 struct rt_rq *rt_rq = &rq->rt;
1289 raw_spin_lock(&rt_rq->rt_runtime_lock);
1291 * We'll let actual RT tasks worry about the overflow here, we
1292 * have our own CBS to keep us inline; only account when RT
1293 * bandwidth is relevant.
1295 if (sched_rt_bandwidth_account(rt_rq))
1296 rt_rq->rt_time += delta_exec;
1297 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1301 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1303 struct sched_dl_entity *dl_se = container_of(timer,
1304 struct sched_dl_entity,
1306 struct task_struct *p = dl_task_of(dl_se);
1310 rq = task_rq_lock(p, &rf);
1313 update_rq_clock(rq);
1315 if (!dl_task(p) || p->state == TASK_DEAD) {
1316 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1318 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1319 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1320 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1321 dl_se->dl_non_contending = 0;
1324 raw_spin_lock(&dl_b->lock);
1325 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1326 raw_spin_unlock(&dl_b->lock);
1327 __dl_clear_params(p);
1331 if (dl_se->dl_non_contending == 0)
1334 sub_running_bw(dl_se, &rq->dl);
1335 dl_se->dl_non_contending = 0;
1337 task_rq_unlock(rq, p, &rf);
1340 return HRTIMER_NORESTART;
1343 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1345 struct hrtimer *timer = &dl_se->inactive_timer;
1347 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1348 timer->function = inactive_task_timer;
1353 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1355 struct rq *rq = rq_of_dl_rq(dl_rq);
1357 if (dl_rq->earliest_dl.curr == 0 ||
1358 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1359 dl_rq->earliest_dl.curr = deadline;
1360 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1364 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1366 struct rq *rq = rq_of_dl_rq(dl_rq);
1369 * Since we may have removed our earliest (and/or next earliest)
1370 * task we must recompute them.
1372 if (!dl_rq->dl_nr_running) {
1373 dl_rq->earliest_dl.curr = 0;
1374 dl_rq->earliest_dl.next = 0;
1375 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1377 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1378 struct sched_dl_entity *entry;
1380 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1381 dl_rq->earliest_dl.curr = entry->deadline;
1382 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1388 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1389 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1391 #endif /* CONFIG_SMP */
1394 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1396 int prio = dl_task_of(dl_se)->prio;
1397 u64 deadline = dl_se->deadline;
1399 WARN_ON(!dl_prio(prio));
1400 dl_rq->dl_nr_running++;
1401 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1403 inc_dl_deadline(dl_rq, deadline);
1404 inc_dl_migration(dl_se, dl_rq);
1408 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1410 int prio = dl_task_of(dl_se)->prio;
1412 WARN_ON(!dl_prio(prio));
1413 WARN_ON(!dl_rq->dl_nr_running);
1414 dl_rq->dl_nr_running--;
1415 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1417 dec_dl_deadline(dl_rq, dl_se->deadline);
1418 dec_dl_migration(dl_se, dl_rq);
1421 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1423 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1424 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1425 struct rb_node *parent = NULL;
1426 struct sched_dl_entity *entry;
1429 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1433 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1434 if (dl_time_before(dl_se->deadline, entry->deadline))
1435 link = &parent->rb_left;
1437 link = &parent->rb_right;
1442 rb_link_node(&dl_se->rb_node, parent, link);
1443 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1445 inc_dl_tasks(dl_se, dl_rq);
1448 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1450 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1452 if (RB_EMPTY_NODE(&dl_se->rb_node))
1455 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1456 RB_CLEAR_NODE(&dl_se->rb_node);
1458 dec_dl_tasks(dl_se, dl_rq);
1462 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1464 BUG_ON(on_dl_rq(dl_se));
1467 * If this is a wakeup or a new instance, the scheduling
1468 * parameters of the task might need updating. Otherwise,
1469 * we want a replenishment of its runtime.
1471 if (flags & ENQUEUE_WAKEUP) {
1472 task_contending(dl_se, flags);
1473 update_dl_entity(dl_se);
1474 } else if (flags & ENQUEUE_REPLENISH) {
1475 replenish_dl_entity(dl_se);
1476 } else if ((flags & ENQUEUE_RESTORE) &&
1477 dl_time_before(dl_se->deadline,
1478 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1479 setup_new_dl_entity(dl_se);
1482 __enqueue_dl_entity(dl_se);
1485 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1487 __dequeue_dl_entity(dl_se);
1490 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1492 if (is_dl_boosted(&p->dl)) {
1494 * Because of delays in the detection of the overrun of a
1495 * thread's runtime, it might be the case that a thread
1496 * goes to sleep in a rt mutex with negative runtime. As
1497 * a consequence, the thread will be throttled.
1499 * While waiting for the mutex, this thread can also be
1500 * boosted via PI, resulting in a thread that is throttled
1501 * and boosted at the same time.
1503 * In this case, the boost overrides the throttle.
1505 if (p->dl.dl_throttled) {
1507 * The replenish timer needs to be canceled. No
1508 * problem if it fires concurrently: boosted threads
1509 * are ignored in dl_task_timer().
1511 hrtimer_try_to_cancel(&p->dl.dl_timer);
1512 p->dl.dl_throttled = 0;
1514 } else if (!dl_prio(p->normal_prio)) {
1516 * Special case in which we have a !SCHED_DEADLINE task that is going
1517 * to be deboosted, but exceeds its runtime while doing so. No point in
1518 * replenishing it, as it's going to return back to its original
1519 * scheduling class after this. If it has been throttled, we need to
1520 * clear the flag, otherwise the task may wake up as throttled after
1521 * being boosted again with no means to replenish the runtime and clear
1524 p->dl.dl_throttled = 0;
1525 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1530 * Check if a constrained deadline task was activated
1531 * after the deadline but before the next period.
1532 * If that is the case, the task will be throttled and
1533 * the replenishment timer will be set to the next period.
1535 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1536 dl_check_constrained_dl(&p->dl);
1538 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1539 add_rq_bw(&p->dl, &rq->dl);
1540 add_running_bw(&p->dl, &rq->dl);
1544 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1545 * its budget it needs a replenishment and, since it now is on
1546 * its rq, the bandwidth timer callback (which clearly has not
1547 * run yet) will take care of this.
1548 * However, the active utilization does not depend on the fact
1549 * that the task is on the runqueue or not (but depends on the
1550 * task's state - in GRUB parlance, "inactive" vs "active contending").
1551 * In other words, even if a task is throttled its utilization must
1552 * be counted in the active utilization; hence, we need to call
1555 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1556 if (flags & ENQUEUE_WAKEUP)
1557 task_contending(&p->dl, flags);
1562 enqueue_dl_entity(&p->dl, flags);
1564 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1565 enqueue_pushable_dl_task(rq, p);
1568 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1570 dequeue_dl_entity(&p->dl);
1571 dequeue_pushable_dl_task(rq, p);
1574 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1577 __dequeue_task_dl(rq, p, flags);
1579 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1580 sub_running_bw(&p->dl, &rq->dl);
1581 sub_rq_bw(&p->dl, &rq->dl);
1585 * This check allows to start the inactive timer (or to immediately
1586 * decrease the active utilization, if needed) in two cases:
1587 * when the task blocks and when it is terminating
1588 * (p->state == TASK_DEAD). We can handle the two cases in the same
1589 * way, because from GRUB's point of view the same thing is happening
1590 * (the task moves from "active contending" to "active non contending"
1593 if (flags & DEQUEUE_SLEEP)
1594 task_non_contending(p);
1598 * Yield task semantic for -deadline tasks is:
1600 * get off from the CPU until our next instance, with
1601 * a new runtime. This is of little use now, since we
1602 * don't have a bandwidth reclaiming mechanism. Anyway,
1603 * bandwidth reclaiming is planned for the future, and
1604 * yield_task_dl will indicate that some spare budget
1605 * is available for other task instances to use it.
1607 static void yield_task_dl(struct rq *rq)
1610 * We make the task go to sleep until its current deadline by
1611 * forcing its runtime to zero. This way, update_curr_dl() stops
1612 * it and the bandwidth timer will wake it up and will give it
1613 * new scheduling parameters (thanks to dl_yielded=1).
1615 rq->curr->dl.dl_yielded = 1;
1617 update_rq_clock(rq);
1620 * Tell update_rq_clock() that we've just updated,
1621 * so we don't do microscopic update in schedule()
1622 * and double the fastpath cost.
1624 rq_clock_skip_update(rq);
1629 static int find_later_rq(struct task_struct *task);
1632 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1634 struct task_struct *curr;
1637 if (sd_flag != SD_BALANCE_WAKE)
1643 curr = READ_ONCE(rq->curr); /* unlocked access */
1646 * If we are dealing with a -deadline task, we must
1647 * decide where to wake it up.
1648 * If it has a later deadline and the current task
1649 * on this rq can't move (provided the waking task
1650 * can!) we prefer to send it somewhere else. On the
1651 * other hand, if it has a shorter deadline, we
1652 * try to make it stay here, it might be important.
1654 if (unlikely(dl_task(curr)) &&
1655 (curr->nr_cpus_allowed < 2 ||
1656 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1657 (p->nr_cpus_allowed > 1)) {
1658 int target = find_later_rq(p);
1661 (dl_time_before(p->dl.deadline,
1662 cpu_rq(target)->dl.earliest_dl.curr) ||
1663 (cpu_rq(target)->dl.dl_nr_running == 0)))
1672 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1676 if (p->state != TASK_WAKING)
1681 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1682 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1683 * rq->lock is not... So, lock it
1685 raw_spin_lock(&rq->lock);
1686 if (p->dl.dl_non_contending) {
1687 update_rq_clock(rq);
1688 sub_running_bw(&p->dl, &rq->dl);
1689 p->dl.dl_non_contending = 0;
1691 * If the timer handler is currently running and the
1692 * timer cannot be cancelled, inactive_task_timer()
1693 * will see that dl_not_contending is not set, and
1694 * will not touch the rq's active utilization,
1695 * so we are still safe.
1697 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1700 sub_rq_bw(&p->dl, &rq->dl);
1701 raw_spin_unlock(&rq->lock);
1704 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1707 * Current can't be migrated, useless to reschedule,
1708 * let's hope p can move out.
1710 if (rq->curr->nr_cpus_allowed == 1 ||
1711 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1715 * p is migratable, so let's not schedule it and
1716 * see if it is pushed or pulled somewhere else.
1718 if (p->nr_cpus_allowed != 1 &&
1719 cpudl_find(&rq->rd->cpudl, p, NULL))
1725 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1727 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1729 * This is OK, because current is on_cpu, which avoids it being
1730 * picked for load-balance and preemption/IRQs are still
1731 * disabled avoiding further scheduler activity on it and we've
1732 * not yet started the picking loop.
1734 rq_unpin_lock(rq, rf);
1736 rq_repin_lock(rq, rf);
1739 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1741 #endif /* CONFIG_SMP */
1744 * Only called when both the current and waking task are -deadline
1747 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1750 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1757 * In the unlikely case current and p have the same deadline
1758 * let us try to decide what's the best thing to do...
1760 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1761 !test_tsk_need_resched(rq->curr))
1762 check_preempt_equal_dl(rq, p);
1763 #endif /* CONFIG_SMP */
1766 #ifdef CONFIG_SCHED_HRTICK
1767 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1769 hrtick_start(rq, p->dl.runtime);
1771 #else /* !CONFIG_SCHED_HRTICK */
1772 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1777 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1779 p->se.exec_start = rq_clock_task(rq);
1781 /* You can't push away the running task */
1782 dequeue_pushable_dl_task(rq, p);
1787 if (hrtick_enabled(rq))
1788 start_hrtick_dl(rq, p);
1790 if (rq->curr->sched_class != &dl_sched_class)
1791 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1793 deadline_queue_push_tasks(rq);
1796 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
1798 struct rb_node *left = rb_first_cached(&dl_rq->root);
1803 return rb_entry(left, struct sched_dl_entity, rb_node);
1806 static struct task_struct *
1807 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1809 struct sched_dl_entity *dl_se;
1810 struct dl_rq *dl_rq = &rq->dl;
1811 struct task_struct *p;
1813 WARN_ON_ONCE(prev || rf);
1815 if (!sched_dl_runnable(rq))
1818 dl_se = pick_next_dl_entity(dl_rq);
1820 p = dl_task_of(dl_se);
1821 set_next_task_dl(rq, p, true);
1825 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1829 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1830 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1831 enqueue_pushable_dl_task(rq, p);
1835 * scheduler tick hitting a task of our scheduling class.
1837 * NOTE: This function can be called remotely by the tick offload that
1838 * goes along full dynticks. Therefore no local assumption can be made
1839 * and everything must be accessed through the @rq and @curr passed in
1842 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1846 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1848 * Even when we have runtime, update_curr_dl() might have resulted in us
1849 * not being the leftmost task anymore. In that case NEED_RESCHED will
1850 * be set and schedule() will start a new hrtick for the next task.
1852 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1853 is_leftmost(p, &rq->dl))
1854 start_hrtick_dl(rq, p);
1857 static void task_fork_dl(struct task_struct *p)
1860 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1867 /* Only try algorithms three times */
1868 #define DL_MAX_TRIES 3
1870 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1872 if (!task_running(rq, p) &&
1873 cpumask_test_cpu(cpu, p->cpus_ptr))
1879 * Return the earliest pushable rq's task, which is suitable to be executed
1880 * on the CPU, NULL otherwise:
1882 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1884 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1885 struct task_struct *p = NULL;
1887 if (!has_pushable_dl_tasks(rq))
1892 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1894 if (pick_dl_task(rq, p, cpu))
1897 next_node = rb_next(next_node);
1904 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1906 static int find_later_rq(struct task_struct *task)
1908 struct sched_domain *sd;
1909 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1910 int this_cpu = smp_processor_id();
1911 int cpu = task_cpu(task);
1913 /* Make sure the mask is initialized first */
1914 if (unlikely(!later_mask))
1917 if (task->nr_cpus_allowed == 1)
1921 * We have to consider system topology and task affinity
1922 * first, then we can look for a suitable CPU.
1924 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1928 * If we are here, some targets have been found, including
1929 * the most suitable which is, among the runqueues where the
1930 * current tasks have later deadlines than the task's one, the
1931 * rq with the latest possible one.
1933 * Now we check how well this matches with task's
1934 * affinity and system topology.
1936 * The last CPU where the task run is our first
1937 * guess, since it is most likely cache-hot there.
1939 if (cpumask_test_cpu(cpu, later_mask))
1942 * Check if this_cpu is to be skipped (i.e., it is
1943 * not in the mask) or not.
1945 if (!cpumask_test_cpu(this_cpu, later_mask))
1949 for_each_domain(cpu, sd) {
1950 if (sd->flags & SD_WAKE_AFFINE) {
1954 * If possible, preempting this_cpu is
1955 * cheaper than migrating.
1957 if (this_cpu != -1 &&
1958 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1963 best_cpu = cpumask_first_and(later_mask,
1964 sched_domain_span(sd));
1966 * Last chance: if a CPU being in both later_mask
1967 * and current sd span is valid, that becomes our
1968 * choice. Of course, the latest possible CPU is
1969 * already under consideration through later_mask.
1971 if (best_cpu < nr_cpu_ids) {
1980 * At this point, all our guesses failed, we just return
1981 * 'something', and let the caller sort the things out.
1986 cpu = cpumask_any(later_mask);
1987 if (cpu < nr_cpu_ids)
1993 /* Locks the rq it finds */
1994 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1996 struct rq *later_rq = NULL;
2000 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2001 cpu = find_later_rq(task);
2003 if ((cpu == -1) || (cpu == rq->cpu))
2006 later_rq = cpu_rq(cpu);
2008 if (later_rq->dl.dl_nr_running &&
2009 !dl_time_before(task->dl.deadline,
2010 later_rq->dl.earliest_dl.curr)) {
2012 * Target rq has tasks of equal or earlier deadline,
2013 * retrying does not release any lock and is unlikely
2014 * to yield a different result.
2020 /* Retry if something changed. */
2021 if (double_lock_balance(rq, later_rq)) {
2022 if (unlikely(task_rq(task) != rq ||
2023 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
2024 task_running(rq, task) ||
2026 !task_on_rq_queued(task))) {
2027 double_unlock_balance(rq, later_rq);
2034 * If the rq we found has no -deadline task, or
2035 * its earliest one has a later deadline than our
2036 * task, the rq is a good one.
2038 if (!later_rq->dl.dl_nr_running ||
2039 dl_time_before(task->dl.deadline,
2040 later_rq->dl.earliest_dl.curr))
2043 /* Otherwise we try again. */
2044 double_unlock_balance(rq, later_rq);
2051 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2053 struct task_struct *p;
2055 if (!has_pushable_dl_tasks(rq))
2058 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2059 struct task_struct, pushable_dl_tasks);
2061 BUG_ON(rq->cpu != task_cpu(p));
2062 BUG_ON(task_current(rq, p));
2063 BUG_ON(p->nr_cpus_allowed <= 1);
2065 BUG_ON(!task_on_rq_queued(p));
2066 BUG_ON(!dl_task(p));
2072 * See if the non running -deadline tasks on this rq
2073 * can be sent to some other CPU where they can preempt
2074 * and start executing.
2076 static int push_dl_task(struct rq *rq)
2078 struct task_struct *next_task;
2079 struct rq *later_rq;
2082 if (!rq->dl.overloaded)
2085 next_task = pick_next_pushable_dl_task(rq);
2090 if (WARN_ON(next_task == rq->curr))
2094 * If next_task preempts rq->curr, and rq->curr
2095 * can move away, it makes sense to just reschedule
2096 * without going further in pushing next_task.
2098 if (dl_task(rq->curr) &&
2099 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2100 rq->curr->nr_cpus_allowed > 1) {
2105 /* We might release rq lock */
2106 get_task_struct(next_task);
2108 /* Will lock the rq it'll find */
2109 later_rq = find_lock_later_rq(next_task, rq);
2111 struct task_struct *task;
2114 * We must check all this again, since
2115 * find_lock_later_rq releases rq->lock and it is
2116 * then possible that next_task has migrated.
2118 task = pick_next_pushable_dl_task(rq);
2119 if (task == next_task) {
2121 * The task is still there. We don't try
2122 * again, some other CPU will pull it when ready.
2131 put_task_struct(next_task);
2136 deactivate_task(rq, next_task, 0);
2137 set_task_cpu(next_task, later_rq->cpu);
2140 * Update the later_rq clock here, because the clock is used
2141 * by the cpufreq_update_util() inside __add_running_bw().
2143 update_rq_clock(later_rq);
2144 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2147 resched_curr(later_rq);
2149 double_unlock_balance(rq, later_rq);
2152 put_task_struct(next_task);
2157 static void push_dl_tasks(struct rq *rq)
2159 /* push_dl_task() will return true if it moved a -deadline task */
2160 while (push_dl_task(rq))
2164 static void pull_dl_task(struct rq *this_rq)
2166 int this_cpu = this_rq->cpu, cpu;
2167 struct task_struct *p;
2168 bool resched = false;
2170 u64 dmin = LONG_MAX;
2172 if (likely(!dl_overloaded(this_rq)))
2176 * Match the barrier from dl_set_overloaded; this guarantees that if we
2177 * see overloaded we must also see the dlo_mask bit.
2181 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2182 if (this_cpu == cpu)
2185 src_rq = cpu_rq(cpu);
2188 * It looks racy, abd it is! However, as in sched_rt.c,
2189 * we are fine with this.
2191 if (this_rq->dl.dl_nr_running &&
2192 dl_time_before(this_rq->dl.earliest_dl.curr,
2193 src_rq->dl.earliest_dl.next))
2196 /* Might drop this_rq->lock */
2197 double_lock_balance(this_rq, src_rq);
2200 * If there are no more pullable tasks on the
2201 * rq, we're done with it.
2203 if (src_rq->dl.dl_nr_running <= 1)
2206 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2209 * We found a task to be pulled if:
2210 * - it preempts our current (if there's one),
2211 * - it will preempt the last one we pulled (if any).
2213 if (p && dl_time_before(p->dl.deadline, dmin) &&
2214 (!this_rq->dl.dl_nr_running ||
2215 dl_time_before(p->dl.deadline,
2216 this_rq->dl.earliest_dl.curr))) {
2217 WARN_ON(p == src_rq->curr);
2218 WARN_ON(!task_on_rq_queued(p));
2221 * Then we pull iff p has actually an earlier
2222 * deadline than the current task of its runqueue.
2224 if (dl_time_before(p->dl.deadline,
2225 src_rq->curr->dl.deadline))
2230 deactivate_task(src_rq, p, 0);
2231 set_task_cpu(p, this_cpu);
2232 activate_task(this_rq, p, 0);
2233 dmin = p->dl.deadline;
2235 /* Is there any other task even earlier? */
2238 double_unlock_balance(this_rq, src_rq);
2242 resched_curr(this_rq);
2246 * Since the task is not running and a reschedule is not going to happen
2247 * anytime soon on its runqueue, we try pushing it away now.
2249 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2251 if (!task_running(rq, p) &&
2252 !test_tsk_need_resched(rq->curr) &&
2253 p->nr_cpus_allowed > 1 &&
2254 dl_task(rq->curr) &&
2255 (rq->curr->nr_cpus_allowed < 2 ||
2256 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2261 static void set_cpus_allowed_dl(struct task_struct *p,
2262 const struct cpumask *new_mask)
2264 struct root_domain *src_rd;
2267 BUG_ON(!dl_task(p));
2272 * Migrating a SCHED_DEADLINE task between exclusive
2273 * cpusets (different root_domains) entails a bandwidth
2274 * update. We already made space for us in the destination
2275 * domain (see cpuset_can_attach()).
2277 if (!cpumask_intersects(src_rd->span, new_mask)) {
2278 struct dl_bw *src_dl_b;
2280 src_dl_b = dl_bw_of(cpu_of(rq));
2282 * We now free resources of the root_domain we are migrating
2283 * off. In the worst case, sched_setattr() may temporary fail
2284 * until we complete the update.
2286 raw_spin_lock(&src_dl_b->lock);
2287 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2288 raw_spin_unlock(&src_dl_b->lock);
2291 set_cpus_allowed_common(p, new_mask);
2294 /* Assumes rq->lock is held */
2295 static void rq_online_dl(struct rq *rq)
2297 if (rq->dl.overloaded)
2298 dl_set_overload(rq);
2300 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2301 if (rq->dl.dl_nr_running > 0)
2302 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2305 /* Assumes rq->lock is held */
2306 static void rq_offline_dl(struct rq *rq)
2308 if (rq->dl.overloaded)
2309 dl_clear_overload(rq);
2311 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2312 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2315 void __init init_sched_dl_class(void)
2319 for_each_possible_cpu(i)
2320 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2321 GFP_KERNEL, cpu_to_node(i));
2324 void dl_add_task_root_domain(struct task_struct *p)
2330 rq = task_rq_lock(p, &rf);
2334 dl_b = &rq->rd->dl_bw;
2335 raw_spin_lock(&dl_b->lock);
2337 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2339 raw_spin_unlock(&dl_b->lock);
2342 task_rq_unlock(rq, p, &rf);
2345 void dl_clear_root_domain(struct root_domain *rd)
2347 unsigned long flags;
2349 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2350 rd->dl_bw.total_bw = 0;
2351 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2354 #endif /* CONFIG_SMP */
2356 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2359 * task_non_contending() can start the "inactive timer" (if the 0-lag
2360 * time is in the future). If the task switches back to dl before
2361 * the "inactive timer" fires, it can continue to consume its current
2362 * runtime using its current deadline. If it stays outside of
2363 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2364 * will reset the task parameters.
2366 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2367 task_non_contending(p);
2369 if (!task_on_rq_queued(p)) {
2371 * Inactive timer is armed. However, p is leaving DEADLINE and
2372 * might migrate away from this rq while continuing to run on
2373 * some other class. We need to remove its contribution from
2374 * this rq running_bw now, or sub_rq_bw (below) will complain.
2376 if (p->dl.dl_non_contending)
2377 sub_running_bw(&p->dl, &rq->dl);
2378 sub_rq_bw(&p->dl, &rq->dl);
2382 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2383 * at the 0-lag time, because the task could have been migrated
2384 * while SCHED_OTHER in the meanwhile.
2386 if (p->dl.dl_non_contending)
2387 p->dl.dl_non_contending = 0;
2390 * Since this might be the only -deadline task on the rq,
2391 * this is the right place to try to pull some other one
2392 * from an overloaded CPU, if any.
2394 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2397 deadline_queue_pull_task(rq);
2401 * When switching to -deadline, we may overload the rq, then
2402 * we try to push someone off, if possible.
2404 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2406 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2409 /* If p is not queued we will update its parameters at next wakeup. */
2410 if (!task_on_rq_queued(p)) {
2411 add_rq_bw(&p->dl, &rq->dl);
2416 if (rq->curr != p) {
2418 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2419 deadline_queue_push_tasks(rq);
2421 if (dl_task(rq->curr))
2422 check_preempt_curr_dl(rq, p, 0);
2426 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2431 * If the scheduling parameters of a -deadline task changed,
2432 * a push or pull operation might be needed.
2434 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2437 if (task_on_rq_queued(p) || rq->curr == p) {
2440 * This might be too much, but unfortunately
2441 * we don't have the old deadline value, and
2442 * we can't argue if the task is increasing
2443 * or lowering its prio, so...
2445 if (!rq->dl.overloaded)
2446 deadline_queue_pull_task(rq);
2449 * If we now have a earlier deadline task than p,
2450 * then reschedule, provided p is still on this
2453 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2457 * Again, we don't know if p has a earlier
2458 * or later deadline, so let's blindly set a
2459 * (maybe not needed) rescheduling point.
2462 #endif /* CONFIG_SMP */
2466 const struct sched_class dl_sched_class = {
2467 .next = &rt_sched_class,
2468 .enqueue_task = enqueue_task_dl,
2469 .dequeue_task = dequeue_task_dl,
2470 .yield_task = yield_task_dl,
2472 .check_preempt_curr = check_preempt_curr_dl,
2474 .pick_next_task = pick_next_task_dl,
2475 .put_prev_task = put_prev_task_dl,
2476 .set_next_task = set_next_task_dl,
2479 .balance = balance_dl,
2480 .select_task_rq = select_task_rq_dl,
2481 .migrate_task_rq = migrate_task_rq_dl,
2482 .set_cpus_allowed = set_cpus_allowed_dl,
2483 .rq_online = rq_online_dl,
2484 .rq_offline = rq_offline_dl,
2485 .task_woken = task_woken_dl,
2488 .task_tick = task_tick_dl,
2489 .task_fork = task_fork_dl,
2491 .prio_changed = prio_changed_dl,
2492 .switched_from = switched_from_dl,
2493 .switched_to = switched_to_dl,
2495 .update_curr = update_curr_dl,
2498 int sched_dl_global_validate(void)
2500 u64 runtime = global_rt_runtime();
2501 u64 period = global_rt_period();
2502 u64 new_bw = to_ratio(period, runtime);
2504 int cpu, cpus, ret = 0;
2505 unsigned long flags;
2508 * Here we want to check the bandwidth not being set to some
2509 * value smaller than the currently allocated bandwidth in
2510 * any of the root_domains.
2512 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2513 * cycling on root_domains... Discussion on different/better
2514 * solutions is welcome!
2516 for_each_possible_cpu(cpu) {
2517 rcu_read_lock_sched();
2518 dl_b = dl_bw_of(cpu);
2519 cpus = dl_bw_cpus(cpu);
2521 raw_spin_lock_irqsave(&dl_b->lock, flags);
2522 if (new_bw * cpus < dl_b->total_bw)
2524 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2526 rcu_read_unlock_sched();
2535 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2537 if (global_rt_runtime() == RUNTIME_INF) {
2538 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2539 dl_rq->extra_bw = 1 << BW_SHIFT;
2541 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2542 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2543 dl_rq->extra_bw = to_ratio(global_rt_period(),
2544 global_rt_runtime());
2548 void sched_dl_do_global(void)
2553 unsigned long flags;
2555 def_dl_bandwidth.dl_period = global_rt_period();
2556 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2558 if (global_rt_runtime() != RUNTIME_INF)
2559 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2562 * FIXME: As above...
2564 for_each_possible_cpu(cpu) {
2565 rcu_read_lock_sched();
2566 dl_b = dl_bw_of(cpu);
2568 raw_spin_lock_irqsave(&dl_b->lock, flags);
2570 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2572 rcu_read_unlock_sched();
2573 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2578 * We must be sure that accepting a new task (or allowing changing the
2579 * parameters of an existing one) is consistent with the bandwidth
2580 * constraints. If yes, this function also accordingly updates the currently
2581 * allocated bandwidth to reflect the new situation.
2583 * This function is called while holding p's rq->lock.
2585 int sched_dl_overflow(struct task_struct *p, int policy,
2586 const struct sched_attr *attr)
2588 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2589 u64 period = attr->sched_period ?: attr->sched_deadline;
2590 u64 runtime = attr->sched_runtime;
2591 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2594 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2597 /* !deadline task may carry old deadline bandwidth */
2598 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2602 * Either if a task, enters, leave, or stays -deadline but changes
2603 * its parameters, we may need to update accordingly the total
2604 * allocated bandwidth of the container.
2606 raw_spin_lock(&dl_b->lock);
2607 cpus = dl_bw_cpus(task_cpu(p));
2608 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2609 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2610 if (hrtimer_active(&p->dl.inactive_timer))
2611 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2612 __dl_add(dl_b, new_bw, cpus);
2614 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2615 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2617 * XXX this is slightly incorrect: when the task
2618 * utilization decreases, we should delay the total
2619 * utilization change until the task's 0-lag point.
2620 * But this would require to set the task's "inactive
2621 * timer" when the task is not inactive.
2623 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2624 __dl_add(dl_b, new_bw, cpus);
2625 dl_change_utilization(p, new_bw);
2627 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2629 * Do not decrease the total deadline utilization here,
2630 * switched_from_dl() will take care to do it at the correct
2635 raw_spin_unlock(&dl_b->lock);
2641 * This function initializes the sched_dl_entity of a newly becoming
2642 * SCHED_DEADLINE task.
2644 * Only the static values are considered here, the actual runtime and the
2645 * absolute deadline will be properly calculated when the task is enqueued
2646 * for the first time with its new policy.
2648 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2650 struct sched_dl_entity *dl_se = &p->dl;
2652 dl_se->dl_runtime = attr->sched_runtime;
2653 dl_se->dl_deadline = attr->sched_deadline;
2654 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2655 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
2656 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2657 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2660 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2662 struct sched_dl_entity *dl_se = &p->dl;
2664 attr->sched_priority = p->rt_priority;
2665 attr->sched_runtime = dl_se->dl_runtime;
2666 attr->sched_deadline = dl_se->dl_deadline;
2667 attr->sched_period = dl_se->dl_period;
2668 attr->sched_flags &= ~SCHED_DL_FLAGS;
2669 attr->sched_flags |= dl_se->flags;
2673 * This function validates the new parameters of a -deadline task.
2674 * We ask for the deadline not being zero, and greater or equal
2675 * than the runtime, as well as the period of being zero or
2676 * greater than deadline. Furthermore, we have to be sure that
2677 * user parameters are above the internal resolution of 1us (we
2678 * check sched_runtime only since it is always the smaller one) and
2679 * below 2^63 ns (we have to check both sched_deadline and
2680 * sched_period, as the latter can be zero).
2682 bool __checkparam_dl(const struct sched_attr *attr)
2684 /* special dl tasks don't actually use any parameter */
2685 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2689 if (attr->sched_deadline == 0)
2693 * Since we truncate DL_SCALE bits, make sure we're at least
2696 if (attr->sched_runtime < (1ULL << DL_SCALE))
2700 * Since we use the MSB for wrap-around and sign issues, make
2701 * sure it's not set (mind that period can be equal to zero).
2703 if (attr->sched_deadline & (1ULL << 63) ||
2704 attr->sched_period & (1ULL << 63))
2707 /* runtime <= deadline <= period (if period != 0) */
2708 if ((attr->sched_period != 0 &&
2709 attr->sched_period < attr->sched_deadline) ||
2710 attr->sched_deadline < attr->sched_runtime)
2717 * This function clears the sched_dl_entity static params.
2719 void __dl_clear_params(struct task_struct *p)
2721 struct sched_dl_entity *dl_se = &p->dl;
2723 dl_se->dl_runtime = 0;
2724 dl_se->dl_deadline = 0;
2725 dl_se->dl_period = 0;
2728 dl_se->dl_density = 0;
2730 dl_se->dl_throttled = 0;
2731 dl_se->dl_yielded = 0;
2732 dl_se->dl_non_contending = 0;
2733 dl_se->dl_overrun = 0;
2735 #ifdef CONFIG_RT_MUTEXES
2736 dl_se->pi_se = dl_se;
2740 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2742 struct sched_dl_entity *dl_se = &p->dl;
2744 if (dl_se->dl_runtime != attr->sched_runtime ||
2745 dl_se->dl_deadline != attr->sched_deadline ||
2746 dl_se->dl_period != attr->sched_period ||
2747 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
2754 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2756 unsigned int dest_cpu;
2760 unsigned long flags;
2762 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2764 rcu_read_lock_sched();
2765 dl_b = dl_bw_of(dest_cpu);
2766 raw_spin_lock_irqsave(&dl_b->lock, flags);
2767 cpus = dl_bw_cpus(dest_cpu);
2768 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2773 * We reserve space for this task in the destination
2774 * root_domain, as we can't fail after this point.
2775 * We will free resources in the source root_domain
2776 * later on (see set_cpus_allowed_dl()).
2778 __dl_add(dl_b, p->dl.dl_bw, cpus);
2781 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2782 rcu_read_unlock_sched();
2787 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2788 const struct cpumask *trial)
2790 int ret = 1, trial_cpus;
2791 struct dl_bw *cur_dl_b;
2792 unsigned long flags;
2794 rcu_read_lock_sched();
2795 cur_dl_b = dl_bw_of(cpumask_any(cur));
2796 trial_cpus = cpumask_weight(trial);
2798 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2799 if (cur_dl_b->bw != -1 &&
2800 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2802 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2803 rcu_read_unlock_sched();
2808 bool dl_cpu_busy(unsigned int cpu)
2810 unsigned long flags;
2815 rcu_read_lock_sched();
2816 dl_b = dl_bw_of(cpu);
2817 raw_spin_lock_irqsave(&dl_b->lock, flags);
2818 cpus = dl_bw_cpus(cpu);
2819 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2820 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2821 rcu_read_unlock_sched();
2827 #ifdef CONFIG_SCHED_DEBUG
2828 void print_dl_stats(struct seq_file *m, int cpu)
2830 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2832 #endif /* CONFIG_SCHED_DEBUG */