2 * linux/kernel/time/tick-common.c
4 * This file contains the base functions to manage periodic tick
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
14 #include <linux/cpu.h>
15 #include <linux/err.h>
16 #include <linux/hrtimer.h>
17 #include <linux/interrupt.h>
18 #include <linux/nmi.h>
19 #include <linux/percpu.h>
20 #include <linux/profile.h>
21 #include <linux/sched.h>
22 #include <linux/module.h>
23 #include <trace/events/power.h>
25 #include <asm/irq_regs.h>
27 #include "tick-internal.h"
32 DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
34 * Tick next event: keeps track of the tick time
36 ktime_t tick_next_period;
40 * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
41 * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
42 * variable has two functions:
44 * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
45 * timekeeping lock all at once. Only the CPU which is assigned to do the
46 * update is handling it.
48 * 2) Hand off the duty in the NOHZ idle case by setting the value to
49 * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
50 * at it will take over and keep the time keeping alive. The handover
51 * procedure also covers cpu hotplug.
53 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
56 * Debugging: see timer_list.c
58 struct tick_device *tick_get_device(int cpu)
60 return &per_cpu(tick_cpu_device, cpu);
64 * tick_is_oneshot_available - check for a oneshot capable event device
66 int tick_is_oneshot_available(void)
68 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
70 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
72 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
74 return tick_broadcast_oneshot_available();
80 static void tick_periodic(int cpu)
82 if (tick_do_timer_cpu == cpu) {
83 write_seqlock(&jiffies_lock);
85 /* Keep track of the next tick event */
86 tick_next_period = ktime_add(tick_next_period, tick_period);
89 write_sequnlock(&jiffies_lock);
93 update_process_times(user_mode(get_irq_regs()));
94 profile_tick(CPU_PROFILING);
98 * Event handler for periodic ticks
100 void tick_handle_periodic(struct clock_event_device *dev)
102 int cpu = smp_processor_id();
103 ktime_t next = dev->next_event;
107 #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
109 * The cpu might have transitioned to HIGHRES or NOHZ mode via
110 * update_process_times() -> run_local_timers() ->
111 * hrtimer_run_queues().
113 if (dev->event_handler != tick_handle_periodic)
117 if (!clockevent_state_oneshot(dev))
121 * Setup the next period for devices, which do not have
124 next = ktime_add(next, tick_period);
126 if (!clockevents_program_event(dev, next, false))
129 * Have to be careful here. If we're in oneshot mode,
130 * before we call tick_periodic() in a loop, we need
131 * to be sure we're using a real hardware clocksource.
132 * Otherwise we could get trapped in an infinite
133 * loop, as the tick_periodic() increments jiffies,
134 * which then will increment time, possibly causing
135 * the loop to trigger again and again.
137 if (timekeeping_valid_for_hres())
143 * Setup the device for a periodic tick
145 void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
147 tick_set_periodic_handler(dev, broadcast);
149 /* Broadcast setup ? */
150 if (!tick_device_is_functional(dev))
153 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
154 !tick_broadcast_oneshot_active()) {
155 clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
161 seq = read_seqbegin(&jiffies_lock);
162 next = tick_next_period;
163 } while (read_seqretry(&jiffies_lock, seq));
165 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
168 if (!clockevents_program_event(dev, next, false))
170 next = ktime_add(next, tick_period);
176 * Setup the tick device
178 static void tick_setup_device(struct tick_device *td,
179 struct clock_event_device *newdev, int cpu,
180 const struct cpumask *cpumask)
182 void (*handler)(struct clock_event_device *) = NULL;
183 ktime_t next_event = 0;
186 * First device setup ?
190 * If no cpu took the do_timer update, assign it to
193 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
194 if (!tick_nohz_full_cpu(cpu))
195 tick_do_timer_cpu = cpu;
197 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
198 tick_next_period = ktime_get();
199 tick_period = NSEC_PER_SEC / HZ;
203 * Startup in periodic mode first.
205 td->mode = TICKDEV_MODE_PERIODIC;
207 handler = td->evtdev->event_handler;
208 next_event = td->evtdev->next_event;
209 td->evtdev->event_handler = clockevents_handle_noop;
215 * When the device is not per cpu, pin the interrupt to the
218 if (!cpumask_equal(newdev->cpumask, cpumask))
219 irq_set_affinity(newdev->irq, cpumask);
222 * When global broadcasting is active, check if the current
223 * device is registered as a placeholder for broadcast mode.
224 * This allows us to handle this x86 misfeature in a generic
225 * way. This function also returns !=0 when we keep the
226 * current active broadcast state for this CPU.
228 if (tick_device_uses_broadcast(newdev, cpu))
231 if (td->mode == TICKDEV_MODE_PERIODIC)
232 tick_setup_periodic(newdev, 0);
234 tick_setup_oneshot(newdev, handler, next_event);
237 void tick_install_replacement(struct clock_event_device *newdev)
239 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
240 int cpu = smp_processor_id();
242 clockevents_exchange_device(td->evtdev, newdev);
243 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
244 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
245 tick_oneshot_notify();
248 static bool tick_check_percpu(struct clock_event_device *curdev,
249 struct clock_event_device *newdev, int cpu)
251 if (!cpumask_test_cpu(cpu, newdev->cpumask))
253 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
255 /* Check if irq affinity can be set */
256 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
258 /* Prefer an existing cpu local device */
259 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
264 static bool tick_check_preferred(struct clock_event_device *curdev,
265 struct clock_event_device *newdev)
267 /* Prefer oneshot capable device */
268 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
269 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
271 if (tick_oneshot_mode_active())
276 * Use the higher rated one, but prefer a CPU local device with a lower
277 * rating than a non-CPU local device
280 newdev->rating > curdev->rating ||
281 !cpumask_equal(curdev->cpumask, newdev->cpumask);
285 * Check whether the new device is a better fit than curdev. curdev
288 bool tick_check_replacement(struct clock_event_device *curdev,
289 struct clock_event_device *newdev)
291 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
294 return tick_check_preferred(curdev, newdev);
298 * Check, if the new registered device should be used. Called with
299 * clockevents_lock held and interrupts disabled.
301 void tick_check_new_device(struct clock_event_device *newdev)
303 struct clock_event_device *curdev;
304 struct tick_device *td;
307 cpu = smp_processor_id();
308 td = &per_cpu(tick_cpu_device, cpu);
311 /* cpu local device ? */
312 if (!tick_check_percpu(curdev, newdev, cpu))
315 /* Preference decision */
316 if (!tick_check_preferred(curdev, newdev))
319 if (!try_module_get(newdev->owner))
323 * Replace the eventually existing device by the new
324 * device. If the current device is the broadcast device, do
325 * not give it back to the clockevents layer !
327 if (tick_is_broadcast_device(curdev)) {
328 clockevents_shutdown(curdev);
331 clockevents_exchange_device(curdev, newdev);
332 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
333 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
334 tick_oneshot_notify();
339 * Can the new device be used as a broadcast device ?
341 tick_install_broadcast_device(newdev);
345 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
346 * @state: The target state (enter/exit)
348 * The system enters/leaves a state, where affected devices might stop
349 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
351 * Called with interrupts disabled, so clockevents_lock is not
352 * required here because the local clock event device cannot go away
355 int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
357 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
359 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
362 return __tick_broadcast_oneshot_control(state);
364 EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
366 #ifdef CONFIG_HOTPLUG_CPU
368 * Transfer the do_timer job away from a dying cpu.
370 * Called with interrupts disabled. Not locking required. If
371 * tick_do_timer_cpu is owned by this cpu, nothing can change it.
373 void tick_handover_do_timer(void)
375 if (tick_do_timer_cpu == smp_processor_id()) {
376 int cpu = cpumask_first(cpu_online_mask);
378 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
384 * Shutdown an event device on a given cpu:
386 * This is called on a life CPU, when a CPU is dead. So we cannot
387 * access the hardware device itself.
388 * We just set the mode and remove it from the lists.
390 void tick_shutdown(unsigned int cpu)
392 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
393 struct clock_event_device *dev = td->evtdev;
395 td->mode = TICKDEV_MODE_PERIODIC;
398 * Prevent that the clock events layer tries to call
399 * the set mode function!
401 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
402 clockevents_exchange_device(dev, NULL);
403 dev->event_handler = clockevents_handle_noop;
410 * tick_suspend_local - Suspend the local tick device
412 * Called from the local cpu for freeze with interrupts disabled.
414 * No locks required. Nothing can change the per cpu device.
416 void tick_suspend_local(void)
418 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
420 clockevents_shutdown(td->evtdev);
424 * tick_resume_local - Resume the local tick device
426 * Called from the local CPU for unfreeze or XEN resume magic.
428 * No locks required. Nothing can change the per cpu device.
430 void tick_resume_local(void)
432 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
433 bool broadcast = tick_resume_check_broadcast();
435 clockevents_tick_resume(td->evtdev);
437 if (td->mode == TICKDEV_MODE_PERIODIC)
438 tick_setup_periodic(td->evtdev, 0);
440 tick_resume_oneshot();
445 * tick_suspend - Suspend the tick and the broadcast device
447 * Called from syscore_suspend() via timekeeping_suspend with only one
448 * CPU online and interrupts disabled or from tick_unfreeze() under
451 * No locks required. Nothing can change the per cpu device.
453 void tick_suspend(void)
455 tick_suspend_local();
456 tick_suspend_broadcast();
460 * tick_resume - Resume the tick and the broadcast device
462 * Called from syscore_resume() via timekeeping_resume with only one
463 * CPU online and interrupts disabled.
465 * No locks required. Nothing can change the per cpu device.
467 void tick_resume(void)
469 tick_resume_broadcast();
473 #ifdef CONFIG_SUSPEND
474 static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
475 static unsigned int tick_freeze_depth;
478 * tick_freeze - Suspend the local tick and (possibly) timekeeping.
480 * Check if this is the last online CPU executing the function and if so,
481 * suspend timekeeping. Otherwise suspend the local tick.
483 * Call with interrupts disabled. Must be balanced with %tick_unfreeze().
484 * Interrupts must not be enabled before the subsequent %tick_unfreeze().
486 void tick_freeze(void)
488 raw_spin_lock(&tick_freeze_lock);
491 if (tick_freeze_depth == num_online_cpus()) {
492 trace_suspend_resume(TPS("timekeeping_freeze"),
493 smp_processor_id(), true);
494 system_state = SYSTEM_SUSPEND;
495 sched_clock_suspend();
496 timekeeping_suspend();
498 tick_suspend_local();
501 raw_spin_unlock(&tick_freeze_lock);
505 * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
507 * Check if this is the first CPU executing the function and if so, resume
508 * timekeeping. Otherwise resume the local tick.
510 * Call with interrupts disabled. Must be balanced with %tick_freeze().
511 * Interrupts must not be enabled after the preceding %tick_freeze().
513 void tick_unfreeze(void)
515 raw_spin_lock(&tick_freeze_lock);
517 if (tick_freeze_depth == num_online_cpus()) {
518 timekeeping_resume();
519 sched_clock_resume();
520 system_state = SYSTEM_RUNNING;
521 trace_suspend_resume(TPS("timekeeping_freeze"),
522 smp_processor_id(), false);
524 touch_softlockup_watchdog();
530 raw_spin_unlock(&tick_freeze_lock);
532 #endif /* CONFIG_SUSPEND */
535 * tick_init - initialize the tick control
537 void __init tick_init(void)
539 tick_broadcast_init();