GNU Linux-libre 4.19.242-gnu1
[releases.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched/mm.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15
16 #include "../base.h"
17 #include "power.h"
18
19 typedef int (*pm_callback_t)(struct device *);
20
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23         pm_callback_t cb;
24         const struct dev_pm_ops *ops;
25
26         if (dev->pm_domain)
27                 ops = &dev->pm_domain->ops;
28         else if (dev->type && dev->type->pm)
29                 ops = dev->type->pm;
30         else if (dev->class && dev->class->pm)
31                 ops = dev->class->pm;
32         else if (dev->bus && dev->bus->pm)
33                 ops = dev->bus->pm;
34         else
35                 ops = NULL;
36
37         if (ops)
38                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39         else
40                 cb = NULL;
41
42         if (!cb && dev->driver && dev->driver->pm)
43                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45         return cb;
46 }
47
48 #define RPM_GET_CALLBACK(dev, callback) \
49                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53
54 /**
55  * update_pm_runtime_accounting - Update the time accounting of power states
56  * @dev: Device to update the accounting for
57  *
58  * In order to be able to have time accounting of the various power states
59  * (as used by programs such as PowerTOP to show the effectiveness of runtime
60  * PM), we need to track the time spent in each state.
61  * update_pm_runtime_accounting must be called each time before the
62  * runtime_status field is updated, to account the time in the old state
63  * correctly.
64  */
65 void update_pm_runtime_accounting(struct device *dev)
66 {
67         unsigned long now = jiffies;
68         unsigned long delta;
69
70         delta = now - dev->power.accounting_timestamp;
71
72         dev->power.accounting_timestamp = now;
73
74         if (dev->power.disable_depth > 0)
75                 return;
76
77         if (dev->power.runtime_status == RPM_SUSPENDED)
78                 dev->power.suspended_jiffies += delta;
79         else
80                 dev->power.active_jiffies += delta;
81 }
82
83 static void __update_runtime_status(struct device *dev, enum rpm_status status)
84 {
85         update_pm_runtime_accounting(dev);
86         dev->power.runtime_status = status;
87 }
88
89 /**
90  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
91  * @dev: Device to handle.
92  */
93 static void pm_runtime_deactivate_timer(struct device *dev)
94 {
95         if (dev->power.timer_expires > 0) {
96                 del_timer(&dev->power.suspend_timer);
97                 dev->power.timer_expires = 0;
98         }
99 }
100
101 /**
102  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
103  * @dev: Device to handle.
104  */
105 static void pm_runtime_cancel_pending(struct device *dev)
106 {
107         pm_runtime_deactivate_timer(dev);
108         /*
109          * In case there's a request pending, make sure its work function will
110          * return without doing anything.
111          */
112         dev->power.request = RPM_REQ_NONE;
113 }
114
115 /*
116  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
117  * @dev: Device to handle.
118  *
119  * Compute the autosuspend-delay expiration time based on the device's
120  * power.last_busy time.  If the delay has already expired or is disabled
121  * (negative) or the power.use_autosuspend flag isn't set, return 0.
122  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
123  *
124  * This function may be called either with or without dev->power.lock held.
125  * Either way it can be racy, since power.last_busy may be updated at any time.
126  */
127 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128 {
129         int autosuspend_delay;
130         long elapsed;
131         unsigned long last_busy;
132         unsigned long expires = 0;
133
134         if (!dev->power.use_autosuspend)
135                 goto out;
136
137         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
138         if (autosuspend_delay < 0)
139                 goto out;
140
141         last_busy = READ_ONCE(dev->power.last_busy);
142         elapsed = jiffies - last_busy;
143         if (elapsed < 0)
144                 goto out;       /* jiffies has wrapped around. */
145
146         /*
147          * If the autosuspend_delay is >= 1 second, align the timer by rounding
148          * up to the nearest second.
149          */
150         expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151         if (autosuspend_delay >= 1000)
152                 expires = round_jiffies(expires);
153         expires += !expires;
154         if (elapsed >= expires - last_busy)
155                 expires = 0;    /* Already expired. */
156
157  out:
158         return expires;
159 }
160 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161
162 static int dev_memalloc_noio(struct device *dev, void *data)
163 {
164         return dev->power.memalloc_noio;
165 }
166
167 /*
168  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
169  * @dev: Device to handle.
170  * @enable: True for setting the flag and False for clearing the flag.
171  *
172  * Set the flag for all devices in the path from the device to the
173  * root device in the device tree if @enable is true, otherwise clear
174  * the flag for devices in the path whose siblings don't set the flag.
175  *
176  * The function should only be called by block device, or network
177  * device driver for solving the deadlock problem during runtime
178  * resume/suspend:
179  *
180  *     If memory allocation with GFP_KERNEL is called inside runtime
181  *     resume/suspend callback of any one of its ancestors(or the
182  *     block device itself), the deadlock may be triggered inside the
183  *     memory allocation since it might not complete until the block
184  *     device becomes active and the involed page I/O finishes. The
185  *     situation is pointed out first by Alan Stern. Network device
186  *     are involved in iSCSI kind of situation.
187  *
188  * The lock of dev_hotplug_mutex is held in the function for handling
189  * hotplug race because pm_runtime_set_memalloc_noio() may be called
190  * in async probe().
191  *
192  * The function should be called between device_add() and device_del()
193  * on the affected device(block/network device).
194  */
195 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196 {
197         static DEFINE_MUTEX(dev_hotplug_mutex);
198
199         mutex_lock(&dev_hotplug_mutex);
200         for (;;) {
201                 bool enabled;
202
203                 /* hold power lock since bitfield is not SMP-safe. */
204                 spin_lock_irq(&dev->power.lock);
205                 enabled = dev->power.memalloc_noio;
206                 dev->power.memalloc_noio = enable;
207                 spin_unlock_irq(&dev->power.lock);
208
209                 /*
210                  * not need to enable ancestors any more if the device
211                  * has been enabled.
212                  */
213                 if (enabled && enable)
214                         break;
215
216                 dev = dev->parent;
217
218                 /*
219                  * clear flag of the parent device only if all the
220                  * children don't set the flag because ancestor's
221                  * flag was set by any one of the descendants.
222                  */
223                 if (!dev || (!enable &&
224                              device_for_each_child(dev, NULL,
225                                                    dev_memalloc_noio)))
226                         break;
227         }
228         mutex_unlock(&dev_hotplug_mutex);
229 }
230 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231
232 /**
233  * rpm_check_suspend_allowed - Test whether a device may be suspended.
234  * @dev: Device to test.
235  */
236 static int rpm_check_suspend_allowed(struct device *dev)
237 {
238         int retval = 0;
239
240         if (dev->power.runtime_error)
241                 retval = -EINVAL;
242         else if (dev->power.disable_depth > 0)
243                 retval = -EACCES;
244         else if (atomic_read(&dev->power.usage_count) > 0)
245                 retval = -EAGAIN;
246         else if (!dev->power.ignore_children &&
247                         atomic_read(&dev->power.child_count))
248                 retval = -EBUSY;
249
250         /* Pending resume requests take precedence over suspends. */
251         else if ((dev->power.deferred_resume
252                         && dev->power.runtime_status == RPM_SUSPENDING)
253             || (dev->power.request_pending
254                         && dev->power.request == RPM_REQ_RESUME))
255                 retval = -EAGAIN;
256         else if (__dev_pm_qos_read_value(dev) == 0)
257                 retval = -EPERM;
258         else if (dev->power.runtime_status == RPM_SUSPENDED)
259                 retval = 1;
260
261         return retval;
262 }
263
264 static int rpm_get_suppliers(struct device *dev)
265 {
266         struct device_link *link;
267
268         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
269                 int retval;
270
271                 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
272                     READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
273                         continue;
274
275                 retval = pm_runtime_get_sync(link->supplier);
276                 /* Ignore suppliers with disabled runtime PM. */
277                 if (retval < 0 && retval != -EACCES) {
278                         pm_runtime_put_noidle(link->supplier);
279                         return retval;
280                 }
281                 refcount_inc(&link->rpm_active);
282         }
283         return 0;
284 }
285
286 static void rpm_put_suppliers(struct device *dev)
287 {
288         struct device_link *link;
289
290         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
291                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
292                         continue;
293
294                 while (refcount_dec_not_one(&link->rpm_active))
295                         pm_runtime_put(link->supplier);
296         }
297 }
298
299 /**
300  * __rpm_callback - Run a given runtime PM callback for a given device.
301  * @cb: Runtime PM callback to run.
302  * @dev: Device to run the callback for.
303  */
304 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
305         __releases(&dev->power.lock) __acquires(&dev->power.lock)
306 {
307         int retval, idx;
308         bool use_links = dev->power.links_count > 0;
309
310         if (dev->power.irq_safe) {
311                 spin_unlock(&dev->power.lock);
312         } else {
313                 spin_unlock_irq(&dev->power.lock);
314
315                 /*
316                  * Resume suppliers if necessary.
317                  *
318                  * The device's runtime PM status cannot change until this
319                  * routine returns, so it is safe to read the status outside of
320                  * the lock.
321                  */
322                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
323                         idx = device_links_read_lock();
324
325                         retval = rpm_get_suppliers(dev);
326                         if (retval)
327                                 goto fail;
328
329                         device_links_read_unlock(idx);
330                 }
331         }
332
333         retval = cb(dev);
334
335         if (dev->power.irq_safe) {
336                 spin_lock(&dev->power.lock);
337         } else {
338                 /*
339                  * If the device is suspending and the callback has returned
340                  * success, drop the usage counters of the suppliers that have
341                  * been reference counted on its resume.
342                  *
343                  * Do that if resume fails too.
344                  */
345                 if (use_links
346                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
347                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
348                         idx = device_links_read_lock();
349
350  fail:
351                         rpm_put_suppliers(dev);
352
353                         device_links_read_unlock(idx);
354                 }
355
356                 spin_lock_irq(&dev->power.lock);
357         }
358
359         return retval;
360 }
361
362 /**
363  * rpm_idle - Notify device bus type if the device can be suspended.
364  * @dev: Device to notify the bus type about.
365  * @rpmflags: Flag bits.
366  *
367  * Check if the device's runtime PM status allows it to be suspended.  If
368  * another idle notification has been started earlier, return immediately.  If
369  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
370  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
371  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
372  *
373  * This function must be called under dev->power.lock with interrupts disabled.
374  */
375 static int rpm_idle(struct device *dev, int rpmflags)
376 {
377         int (*callback)(struct device *);
378         int retval;
379
380         trace_rpm_idle_rcuidle(dev, rpmflags);
381         retval = rpm_check_suspend_allowed(dev);
382         if (retval < 0)
383                 ;       /* Conditions are wrong. */
384
385         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
386         else if (dev->power.runtime_status != RPM_ACTIVE)
387                 retval = -EAGAIN;
388
389         /*
390          * Any pending request other than an idle notification takes
391          * precedence over us, except that the timer may be running.
392          */
393         else if (dev->power.request_pending &&
394             dev->power.request > RPM_REQ_IDLE)
395                 retval = -EAGAIN;
396
397         /* Act as though RPM_NOWAIT is always set. */
398         else if (dev->power.idle_notification)
399                 retval = -EINPROGRESS;
400         if (retval)
401                 goto out;
402
403         /* Pending requests need to be canceled. */
404         dev->power.request = RPM_REQ_NONE;
405
406         if (dev->power.no_callbacks)
407                 goto out;
408
409         /* Carry out an asynchronous or a synchronous idle notification. */
410         if (rpmflags & RPM_ASYNC) {
411                 dev->power.request = RPM_REQ_IDLE;
412                 if (!dev->power.request_pending) {
413                         dev->power.request_pending = true;
414                         queue_work(pm_wq, &dev->power.work);
415                 }
416                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
417                 return 0;
418         }
419
420         dev->power.idle_notification = true;
421
422         callback = RPM_GET_CALLBACK(dev, runtime_idle);
423
424         if (callback)
425                 retval = __rpm_callback(callback, dev);
426
427         dev->power.idle_notification = false;
428         wake_up_all(&dev->power.wait_queue);
429
430  out:
431         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
432         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
433 }
434
435 /**
436  * rpm_callback - Run a given runtime PM callback for a given device.
437  * @cb: Runtime PM callback to run.
438  * @dev: Device to run the callback for.
439  */
440 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
441 {
442         int retval;
443
444         if (!cb)
445                 return -ENOSYS;
446
447         if (dev->power.memalloc_noio) {
448                 unsigned int noio_flag;
449
450                 /*
451                  * Deadlock might be caused if memory allocation with
452                  * GFP_KERNEL happens inside runtime_suspend and
453                  * runtime_resume callbacks of one block device's
454                  * ancestor or the block device itself. Network
455                  * device might be thought as part of iSCSI block
456                  * device, so network device and its ancestor should
457                  * be marked as memalloc_noio too.
458                  */
459                 noio_flag = memalloc_noio_save();
460                 retval = __rpm_callback(cb, dev);
461                 memalloc_noio_restore(noio_flag);
462         } else {
463                 retval = __rpm_callback(cb, dev);
464         }
465
466         dev->power.runtime_error = retval;
467         return retval != -EACCES ? retval : -EIO;
468 }
469
470 /**
471  * rpm_suspend - Carry out runtime suspend of given device.
472  * @dev: Device to suspend.
473  * @rpmflags: Flag bits.
474  *
475  * Check if the device's runtime PM status allows it to be suspended.
476  * Cancel a pending idle notification, autosuspend or suspend. If
477  * another suspend has been started earlier, either return immediately
478  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
479  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
480  * otherwise run the ->runtime_suspend() callback directly. When
481  * ->runtime_suspend succeeded, if a deferred resume was requested while
482  * the callback was running then carry it out, otherwise send an idle
483  * notification for its parent (if the suspend succeeded and both
484  * ignore_children of parent->power and irq_safe of dev->power are not set).
485  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
486  * flag is set and the next autosuspend-delay expiration time is in the
487  * future, schedule another autosuspend attempt.
488  *
489  * This function must be called under dev->power.lock with interrupts disabled.
490  */
491 static int rpm_suspend(struct device *dev, int rpmflags)
492         __releases(&dev->power.lock) __acquires(&dev->power.lock)
493 {
494         int (*callback)(struct device *);
495         struct device *parent = NULL;
496         int retval;
497
498         trace_rpm_suspend_rcuidle(dev, rpmflags);
499
500  repeat:
501         retval = rpm_check_suspend_allowed(dev);
502
503         if (retval < 0)
504                 ;       /* Conditions are wrong. */
505
506         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
507         else if (dev->power.runtime_status == RPM_RESUMING &&
508             !(rpmflags & RPM_ASYNC))
509                 retval = -EAGAIN;
510         if (retval)
511                 goto out;
512
513         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
514         if ((rpmflags & RPM_AUTO)
515             && dev->power.runtime_status != RPM_SUSPENDING) {
516                 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
517
518                 if (expires != 0) {
519                         /* Pending requests need to be canceled. */
520                         dev->power.request = RPM_REQ_NONE;
521
522                         /*
523                          * Optimization: If the timer is already running and is
524                          * set to expire at or before the autosuspend delay,
525                          * avoid the overhead of resetting it.  Just let it
526                          * expire; pm_suspend_timer_fn() will take care of the
527                          * rest.
528                          */
529                         if (!(dev->power.timer_expires && time_before_eq(
530                             dev->power.timer_expires, expires))) {
531                                 dev->power.timer_expires = expires;
532                                 mod_timer(&dev->power.suspend_timer, expires);
533                         }
534                         dev->power.timer_autosuspends = 1;
535                         goto out;
536                 }
537         }
538
539         /* Other scheduled or pending requests need to be canceled. */
540         pm_runtime_cancel_pending(dev);
541
542         if (dev->power.runtime_status == RPM_SUSPENDING) {
543                 DEFINE_WAIT(wait);
544
545                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
546                         retval = -EINPROGRESS;
547                         goto out;
548                 }
549
550                 if (dev->power.irq_safe) {
551                         spin_unlock(&dev->power.lock);
552
553                         cpu_relax();
554
555                         spin_lock(&dev->power.lock);
556                         goto repeat;
557                 }
558
559                 /* Wait for the other suspend running in parallel with us. */
560                 for (;;) {
561                         prepare_to_wait(&dev->power.wait_queue, &wait,
562                                         TASK_UNINTERRUPTIBLE);
563                         if (dev->power.runtime_status != RPM_SUSPENDING)
564                                 break;
565
566                         spin_unlock_irq(&dev->power.lock);
567
568                         schedule();
569
570                         spin_lock_irq(&dev->power.lock);
571                 }
572                 finish_wait(&dev->power.wait_queue, &wait);
573                 goto repeat;
574         }
575
576         if (dev->power.no_callbacks)
577                 goto no_callback;       /* Assume success. */
578
579         /* Carry out an asynchronous or a synchronous suspend. */
580         if (rpmflags & RPM_ASYNC) {
581                 dev->power.request = (rpmflags & RPM_AUTO) ?
582                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
583                 if (!dev->power.request_pending) {
584                         dev->power.request_pending = true;
585                         queue_work(pm_wq, &dev->power.work);
586                 }
587                 goto out;
588         }
589
590         __update_runtime_status(dev, RPM_SUSPENDING);
591
592         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
593
594         dev_pm_enable_wake_irq_check(dev, true);
595         retval = rpm_callback(callback, dev);
596         if (retval)
597                 goto fail;
598
599  no_callback:
600         __update_runtime_status(dev, RPM_SUSPENDED);
601         pm_runtime_deactivate_timer(dev);
602
603         if (dev->parent) {
604                 parent = dev->parent;
605                 atomic_add_unless(&parent->power.child_count, -1, 0);
606         }
607         wake_up_all(&dev->power.wait_queue);
608
609         if (dev->power.deferred_resume) {
610                 dev->power.deferred_resume = false;
611                 rpm_resume(dev, 0);
612                 retval = -EAGAIN;
613                 goto out;
614         }
615
616         /* Maybe the parent is now able to suspend. */
617         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
618                 spin_unlock(&dev->power.lock);
619
620                 spin_lock(&parent->power.lock);
621                 rpm_idle(parent, RPM_ASYNC);
622                 spin_unlock(&parent->power.lock);
623
624                 spin_lock(&dev->power.lock);
625         }
626
627  out:
628         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
629
630         return retval;
631
632  fail:
633         dev_pm_disable_wake_irq_check(dev);
634         __update_runtime_status(dev, RPM_ACTIVE);
635         dev->power.deferred_resume = false;
636         wake_up_all(&dev->power.wait_queue);
637
638         if (retval == -EAGAIN || retval == -EBUSY) {
639                 dev->power.runtime_error = 0;
640
641                 /*
642                  * If the callback routine failed an autosuspend, and
643                  * if the last_busy time has been updated so that there
644                  * is a new autosuspend expiration time, automatically
645                  * reschedule another autosuspend.
646                  */
647                 if ((rpmflags & RPM_AUTO) &&
648                     pm_runtime_autosuspend_expiration(dev) != 0)
649                         goto repeat;
650         } else {
651                 pm_runtime_cancel_pending(dev);
652         }
653         goto out;
654 }
655
656 /**
657  * rpm_resume - Carry out runtime resume of given device.
658  * @dev: Device to resume.
659  * @rpmflags: Flag bits.
660  *
661  * Check if the device's runtime PM status allows it to be resumed.  Cancel
662  * any scheduled or pending requests.  If another resume has been started
663  * earlier, either return immediately or wait for it to finish, depending on the
664  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
665  * parallel with this function, either tell the other process to resume after
666  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
667  * flag is set then queue a resume request; otherwise run the
668  * ->runtime_resume() callback directly.  Queue an idle notification for the
669  * device if the resume succeeded.
670  *
671  * This function must be called under dev->power.lock with interrupts disabled.
672  */
673 static int rpm_resume(struct device *dev, int rpmflags)
674         __releases(&dev->power.lock) __acquires(&dev->power.lock)
675 {
676         int (*callback)(struct device *);
677         struct device *parent = NULL;
678         int retval = 0;
679
680         trace_rpm_resume_rcuidle(dev, rpmflags);
681
682  repeat:
683         if (dev->power.runtime_error)
684                 retval = -EINVAL;
685         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
686             && dev->power.runtime_status == RPM_ACTIVE)
687                 retval = 1;
688         else if (dev->power.disable_depth > 0)
689                 retval = -EACCES;
690         if (retval)
691                 goto out;
692
693         /*
694          * Other scheduled or pending requests need to be canceled.  Small
695          * optimization: If an autosuspend timer is running, leave it running
696          * rather than cancelling it now only to restart it again in the near
697          * future.
698          */
699         dev->power.request = RPM_REQ_NONE;
700         if (!dev->power.timer_autosuspends)
701                 pm_runtime_deactivate_timer(dev);
702
703         if (dev->power.runtime_status == RPM_ACTIVE) {
704                 retval = 1;
705                 goto out;
706         }
707
708         if (dev->power.runtime_status == RPM_RESUMING
709             || dev->power.runtime_status == RPM_SUSPENDING) {
710                 DEFINE_WAIT(wait);
711
712                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
713                         if (dev->power.runtime_status == RPM_SUSPENDING)
714                                 dev->power.deferred_resume = true;
715                         else
716                                 retval = -EINPROGRESS;
717                         goto out;
718                 }
719
720                 if (dev->power.irq_safe) {
721                         spin_unlock(&dev->power.lock);
722
723                         cpu_relax();
724
725                         spin_lock(&dev->power.lock);
726                         goto repeat;
727                 }
728
729                 /* Wait for the operation carried out in parallel with us. */
730                 for (;;) {
731                         prepare_to_wait(&dev->power.wait_queue, &wait,
732                                         TASK_UNINTERRUPTIBLE);
733                         if (dev->power.runtime_status != RPM_RESUMING
734                             && dev->power.runtime_status != RPM_SUSPENDING)
735                                 break;
736
737                         spin_unlock_irq(&dev->power.lock);
738
739                         schedule();
740
741                         spin_lock_irq(&dev->power.lock);
742                 }
743                 finish_wait(&dev->power.wait_queue, &wait);
744                 goto repeat;
745         }
746
747         /*
748          * See if we can skip waking up the parent.  This is safe only if
749          * power.no_callbacks is set, because otherwise we don't know whether
750          * the resume will actually succeed.
751          */
752         if (dev->power.no_callbacks && !parent && dev->parent) {
753                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
754                 if (dev->parent->power.disable_depth > 0
755                     || dev->parent->power.ignore_children
756                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
757                         atomic_inc(&dev->parent->power.child_count);
758                         spin_unlock(&dev->parent->power.lock);
759                         retval = 1;
760                         goto no_callback;       /* Assume success. */
761                 }
762                 spin_unlock(&dev->parent->power.lock);
763         }
764
765         /* Carry out an asynchronous or a synchronous resume. */
766         if (rpmflags & RPM_ASYNC) {
767                 dev->power.request = RPM_REQ_RESUME;
768                 if (!dev->power.request_pending) {
769                         dev->power.request_pending = true;
770                         queue_work(pm_wq, &dev->power.work);
771                 }
772                 retval = 0;
773                 goto out;
774         }
775
776         if (!parent && dev->parent) {
777                 /*
778                  * Increment the parent's usage counter and resume it if
779                  * necessary.  Not needed if dev is irq-safe; then the
780                  * parent is permanently resumed.
781                  */
782                 parent = dev->parent;
783                 if (dev->power.irq_safe)
784                         goto skip_parent;
785                 spin_unlock(&dev->power.lock);
786
787                 pm_runtime_get_noresume(parent);
788
789                 spin_lock(&parent->power.lock);
790                 /*
791                  * Resume the parent if it has runtime PM enabled and not been
792                  * set to ignore its children.
793                  */
794                 if (!parent->power.disable_depth
795                     && !parent->power.ignore_children) {
796                         rpm_resume(parent, 0);
797                         if (parent->power.runtime_status != RPM_ACTIVE)
798                                 retval = -EBUSY;
799                 }
800                 spin_unlock(&parent->power.lock);
801
802                 spin_lock(&dev->power.lock);
803                 if (retval)
804                         goto out;
805                 goto repeat;
806         }
807  skip_parent:
808
809         if (dev->power.no_callbacks)
810                 goto no_callback;       /* Assume success. */
811
812         __update_runtime_status(dev, RPM_RESUMING);
813
814         callback = RPM_GET_CALLBACK(dev, runtime_resume);
815
816         dev_pm_disable_wake_irq_check(dev);
817         retval = rpm_callback(callback, dev);
818         if (retval) {
819                 __update_runtime_status(dev, RPM_SUSPENDED);
820                 pm_runtime_cancel_pending(dev);
821                 dev_pm_enable_wake_irq_check(dev, false);
822         } else {
823  no_callback:
824                 __update_runtime_status(dev, RPM_ACTIVE);
825                 pm_runtime_mark_last_busy(dev);
826                 if (parent)
827                         atomic_inc(&parent->power.child_count);
828         }
829         wake_up_all(&dev->power.wait_queue);
830
831         if (retval >= 0)
832                 rpm_idle(dev, RPM_ASYNC);
833
834  out:
835         if (parent && !dev->power.irq_safe) {
836                 spin_unlock_irq(&dev->power.lock);
837
838                 pm_runtime_put(parent);
839
840                 spin_lock_irq(&dev->power.lock);
841         }
842
843         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
844
845         return retval;
846 }
847
848 /**
849  * pm_runtime_work - Universal runtime PM work function.
850  * @work: Work structure used for scheduling the execution of this function.
851  *
852  * Use @work to get the device object the work is to be done for, determine what
853  * is to be done and execute the appropriate runtime PM function.
854  */
855 static void pm_runtime_work(struct work_struct *work)
856 {
857         struct device *dev = container_of(work, struct device, power.work);
858         enum rpm_request req;
859
860         spin_lock_irq(&dev->power.lock);
861
862         if (!dev->power.request_pending)
863                 goto out;
864
865         req = dev->power.request;
866         dev->power.request = RPM_REQ_NONE;
867         dev->power.request_pending = false;
868
869         switch (req) {
870         case RPM_REQ_NONE:
871                 break;
872         case RPM_REQ_IDLE:
873                 rpm_idle(dev, RPM_NOWAIT);
874                 break;
875         case RPM_REQ_SUSPEND:
876                 rpm_suspend(dev, RPM_NOWAIT);
877                 break;
878         case RPM_REQ_AUTOSUSPEND:
879                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
880                 break;
881         case RPM_REQ_RESUME:
882                 rpm_resume(dev, RPM_NOWAIT);
883                 break;
884         }
885
886  out:
887         spin_unlock_irq(&dev->power.lock);
888 }
889
890 /**
891  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
892  * @data: Device pointer passed by pm_schedule_suspend().
893  *
894  * Check if the time is right and queue a suspend request.
895  */
896 static void pm_suspend_timer_fn(struct timer_list *t)
897 {
898         struct device *dev = from_timer(dev, t, power.suspend_timer);
899         unsigned long flags;
900         unsigned long expires;
901
902         spin_lock_irqsave(&dev->power.lock, flags);
903
904         expires = dev->power.timer_expires;
905         /* If 'expire' is after 'jiffies' we've been called too early. */
906         if (expires > 0 && !time_after(expires, jiffies)) {
907                 dev->power.timer_expires = 0;
908                 rpm_suspend(dev, dev->power.timer_autosuspends ?
909                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
910         }
911
912         spin_unlock_irqrestore(&dev->power.lock, flags);
913 }
914
915 /**
916  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
917  * @dev: Device to suspend.
918  * @delay: Time to wait before submitting a suspend request, in milliseconds.
919  */
920 int pm_schedule_suspend(struct device *dev, unsigned int delay)
921 {
922         unsigned long flags;
923         int retval;
924
925         spin_lock_irqsave(&dev->power.lock, flags);
926
927         if (!delay) {
928                 retval = rpm_suspend(dev, RPM_ASYNC);
929                 goto out;
930         }
931
932         retval = rpm_check_suspend_allowed(dev);
933         if (retval)
934                 goto out;
935
936         /* Other scheduled or pending requests need to be canceled. */
937         pm_runtime_cancel_pending(dev);
938
939         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
940         dev->power.timer_expires += !dev->power.timer_expires;
941         dev->power.timer_autosuspends = 0;
942         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
943
944  out:
945         spin_unlock_irqrestore(&dev->power.lock, flags);
946
947         return retval;
948 }
949 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
950
951 /**
952  * __pm_runtime_idle - Entry point for runtime idle operations.
953  * @dev: Device to send idle notification for.
954  * @rpmflags: Flag bits.
955  *
956  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
957  * return immediately if it is larger than zero.  Then carry out an idle
958  * notification, either synchronous or asynchronous.
959  *
960  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
961  * or if pm_runtime_irq_safe() has been called.
962  */
963 int __pm_runtime_idle(struct device *dev, int rpmflags)
964 {
965         unsigned long flags;
966         int retval;
967
968         if (rpmflags & RPM_GET_PUT) {
969                 if (!atomic_dec_and_test(&dev->power.usage_count))
970                         return 0;
971         }
972
973         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
974
975         spin_lock_irqsave(&dev->power.lock, flags);
976         retval = rpm_idle(dev, rpmflags);
977         spin_unlock_irqrestore(&dev->power.lock, flags);
978
979         return retval;
980 }
981 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
982
983 /**
984  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
985  * @dev: Device to suspend.
986  * @rpmflags: Flag bits.
987  *
988  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
989  * return immediately if it is larger than zero.  Then carry out a suspend,
990  * either synchronous or asynchronous.
991  *
992  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
993  * or if pm_runtime_irq_safe() has been called.
994  */
995 int __pm_runtime_suspend(struct device *dev, int rpmflags)
996 {
997         unsigned long flags;
998         int retval;
999
1000         if (rpmflags & RPM_GET_PUT) {
1001                 if (!atomic_dec_and_test(&dev->power.usage_count))
1002                         return 0;
1003         }
1004
1005         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1006
1007         spin_lock_irqsave(&dev->power.lock, flags);
1008         retval = rpm_suspend(dev, rpmflags);
1009         spin_unlock_irqrestore(&dev->power.lock, flags);
1010
1011         return retval;
1012 }
1013 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1014
1015 /**
1016  * __pm_runtime_resume - Entry point for runtime resume operations.
1017  * @dev: Device to resume.
1018  * @rpmflags: Flag bits.
1019  *
1020  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1021  * carry out a resume, either synchronous or asynchronous.
1022  *
1023  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1024  * or if pm_runtime_irq_safe() has been called.
1025  */
1026 int __pm_runtime_resume(struct device *dev, int rpmflags)
1027 {
1028         unsigned long flags;
1029         int retval;
1030
1031         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1032                         dev->power.runtime_status != RPM_ACTIVE);
1033
1034         if (rpmflags & RPM_GET_PUT)
1035                 atomic_inc(&dev->power.usage_count);
1036
1037         spin_lock_irqsave(&dev->power.lock, flags);
1038         retval = rpm_resume(dev, rpmflags);
1039         spin_unlock_irqrestore(&dev->power.lock, flags);
1040
1041         return retval;
1042 }
1043 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1044
1045 /**
1046  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
1047  * @dev: Device to handle.
1048  *
1049  * Return -EINVAL if runtime PM is disabled for the device.
1050  *
1051  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
1052  * and the runtime PM usage counter is nonzero, increment the counter and
1053  * return 1.  Otherwise return 0 without changing the counter.
1054  */
1055 int pm_runtime_get_if_in_use(struct device *dev)
1056 {
1057         unsigned long flags;
1058         int retval;
1059
1060         spin_lock_irqsave(&dev->power.lock, flags);
1061         retval = dev->power.disable_depth > 0 ? -EINVAL :
1062                 dev->power.runtime_status == RPM_ACTIVE
1063                         && atomic_inc_not_zero(&dev->power.usage_count);
1064         spin_unlock_irqrestore(&dev->power.lock, flags);
1065         return retval;
1066 }
1067 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1068
1069 /**
1070  * __pm_runtime_set_status - Set runtime PM status of a device.
1071  * @dev: Device to handle.
1072  * @status: New runtime PM status of the device.
1073  *
1074  * If runtime PM of the device is disabled or its power.runtime_error field is
1075  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1076  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1077  * However, if the device has a parent and the parent is not active, and the
1078  * parent's power.ignore_children flag is unset, the device's status cannot be
1079  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1080  *
1081  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1082  * and the device parent's counter of unsuspended children is modified to
1083  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1084  * notification request for the parent is submitted.
1085  */
1086 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1087 {
1088         struct device *parent = dev->parent;
1089         unsigned long flags;
1090         bool notify_parent = false;
1091         int error = 0;
1092
1093         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1094                 return -EINVAL;
1095
1096         spin_lock_irqsave(&dev->power.lock, flags);
1097
1098         if (!dev->power.runtime_error && !dev->power.disable_depth) {
1099                 error = -EAGAIN;
1100                 goto out;
1101         }
1102
1103         if (dev->power.runtime_status == status || !parent)
1104                 goto out_set;
1105
1106         if (status == RPM_SUSPENDED) {
1107                 atomic_add_unless(&parent->power.child_count, -1, 0);
1108                 notify_parent = !parent->power.ignore_children;
1109         } else {
1110                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1111
1112                 /*
1113                  * It is invalid to put an active child under a parent that is
1114                  * not active, has runtime PM enabled and the
1115                  * 'power.ignore_children' flag unset.
1116                  */
1117                 if (!parent->power.disable_depth
1118                     && !parent->power.ignore_children
1119                     && parent->power.runtime_status != RPM_ACTIVE) {
1120                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1121                                 dev_name(dev),
1122                                 dev_name(parent));
1123                         error = -EBUSY;
1124                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1125                         atomic_inc(&parent->power.child_count);
1126                 }
1127
1128                 spin_unlock(&parent->power.lock);
1129
1130                 if (error)
1131                         goto out;
1132         }
1133
1134  out_set:
1135         __update_runtime_status(dev, status);
1136         dev->power.runtime_error = 0;
1137  out:
1138         spin_unlock_irqrestore(&dev->power.lock, flags);
1139
1140         if (notify_parent)
1141                 pm_request_idle(parent);
1142
1143         return error;
1144 }
1145 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1146
1147 /**
1148  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1149  * @dev: Device to handle.
1150  *
1151  * Flush all pending requests for the device from pm_wq and wait for all
1152  * runtime PM operations involving the device in progress to complete.
1153  *
1154  * Should be called under dev->power.lock with interrupts disabled.
1155  */
1156 static void __pm_runtime_barrier(struct device *dev)
1157 {
1158         pm_runtime_deactivate_timer(dev);
1159
1160         if (dev->power.request_pending) {
1161                 dev->power.request = RPM_REQ_NONE;
1162                 spin_unlock_irq(&dev->power.lock);
1163
1164                 cancel_work_sync(&dev->power.work);
1165
1166                 spin_lock_irq(&dev->power.lock);
1167                 dev->power.request_pending = false;
1168         }
1169
1170         if (dev->power.runtime_status == RPM_SUSPENDING
1171             || dev->power.runtime_status == RPM_RESUMING
1172             || dev->power.idle_notification) {
1173                 DEFINE_WAIT(wait);
1174
1175                 /* Suspend, wake-up or idle notification in progress. */
1176                 for (;;) {
1177                         prepare_to_wait(&dev->power.wait_queue, &wait,
1178                                         TASK_UNINTERRUPTIBLE);
1179                         if (dev->power.runtime_status != RPM_SUSPENDING
1180                             && dev->power.runtime_status != RPM_RESUMING
1181                             && !dev->power.idle_notification)
1182                                 break;
1183                         spin_unlock_irq(&dev->power.lock);
1184
1185                         schedule();
1186
1187                         spin_lock_irq(&dev->power.lock);
1188                 }
1189                 finish_wait(&dev->power.wait_queue, &wait);
1190         }
1191 }
1192
1193 /**
1194  * pm_runtime_barrier - Flush pending requests and wait for completions.
1195  * @dev: Device to handle.
1196  *
1197  * Prevent the device from being suspended by incrementing its usage counter and
1198  * if there's a pending resume request for the device, wake the device up.
1199  * Next, make sure that all pending requests for the device have been flushed
1200  * from pm_wq and wait for all runtime PM operations involving the device in
1201  * progress to complete.
1202  *
1203  * Return value:
1204  * 1, if there was a resume request pending and the device had to be woken up,
1205  * 0, otherwise
1206  */
1207 int pm_runtime_barrier(struct device *dev)
1208 {
1209         int retval = 0;
1210
1211         pm_runtime_get_noresume(dev);
1212         spin_lock_irq(&dev->power.lock);
1213
1214         if (dev->power.request_pending
1215             && dev->power.request == RPM_REQ_RESUME) {
1216                 rpm_resume(dev, 0);
1217                 retval = 1;
1218         }
1219
1220         __pm_runtime_barrier(dev);
1221
1222         spin_unlock_irq(&dev->power.lock);
1223         pm_runtime_put_noidle(dev);
1224
1225         return retval;
1226 }
1227 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1228
1229 /**
1230  * __pm_runtime_disable - Disable runtime PM of a device.
1231  * @dev: Device to handle.
1232  * @check_resume: If set, check if there's a resume request for the device.
1233  *
1234  * Increment power.disable_depth for the device and if it was zero previously,
1235  * cancel all pending runtime PM requests for the device and wait for all
1236  * operations in progress to complete.  The device can be either active or
1237  * suspended after its runtime PM has been disabled.
1238  *
1239  * If @check_resume is set and there's a resume request pending when
1240  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1241  * function will wake up the device before disabling its runtime PM.
1242  */
1243 void __pm_runtime_disable(struct device *dev, bool check_resume)
1244 {
1245         spin_lock_irq(&dev->power.lock);
1246
1247         if (dev->power.disable_depth > 0) {
1248                 dev->power.disable_depth++;
1249                 goto out;
1250         }
1251
1252         /*
1253          * Wake up the device if there's a resume request pending, because that
1254          * means there probably is some I/O to process and disabling runtime PM
1255          * shouldn't prevent the device from processing the I/O.
1256          */
1257         if (check_resume && dev->power.request_pending
1258             && dev->power.request == RPM_REQ_RESUME) {
1259                 /*
1260                  * Prevent suspends and idle notifications from being carried
1261                  * out after we have woken up the device.
1262                  */
1263                 pm_runtime_get_noresume(dev);
1264
1265                 rpm_resume(dev, 0);
1266
1267                 pm_runtime_put_noidle(dev);
1268         }
1269
1270         if (!dev->power.disable_depth++)
1271                 __pm_runtime_barrier(dev);
1272
1273  out:
1274         spin_unlock_irq(&dev->power.lock);
1275 }
1276 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1277
1278 /**
1279  * pm_runtime_enable - Enable runtime PM of a device.
1280  * @dev: Device to handle.
1281  */
1282 void pm_runtime_enable(struct device *dev)
1283 {
1284         unsigned long flags;
1285
1286         spin_lock_irqsave(&dev->power.lock, flags);
1287
1288         if (dev->power.disable_depth > 0)
1289                 dev->power.disable_depth--;
1290         else
1291                 dev_warn(dev, "Unbalanced %s!\n", __func__);
1292
1293         WARN(!dev->power.disable_depth &&
1294              dev->power.runtime_status == RPM_SUSPENDED &&
1295              !dev->power.ignore_children &&
1296              atomic_read(&dev->power.child_count) > 0,
1297              "Enabling runtime PM for inactive device (%s) with active children\n",
1298              dev_name(dev));
1299
1300         spin_unlock_irqrestore(&dev->power.lock, flags);
1301 }
1302 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1303
1304 /**
1305  * pm_runtime_forbid - Block runtime PM of a device.
1306  * @dev: Device to handle.
1307  *
1308  * Increase the device's usage count and clear its power.runtime_auto flag,
1309  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1310  * for it.
1311  */
1312 void pm_runtime_forbid(struct device *dev)
1313 {
1314         spin_lock_irq(&dev->power.lock);
1315         if (!dev->power.runtime_auto)
1316                 goto out;
1317
1318         dev->power.runtime_auto = false;
1319         atomic_inc(&dev->power.usage_count);
1320         rpm_resume(dev, 0);
1321
1322  out:
1323         spin_unlock_irq(&dev->power.lock);
1324 }
1325 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1326
1327 /**
1328  * pm_runtime_allow - Unblock runtime PM of a device.
1329  * @dev: Device to handle.
1330  *
1331  * Decrease the device's usage count and set its power.runtime_auto flag.
1332  */
1333 void pm_runtime_allow(struct device *dev)
1334 {
1335         spin_lock_irq(&dev->power.lock);
1336         if (dev->power.runtime_auto)
1337                 goto out;
1338
1339         dev->power.runtime_auto = true;
1340         if (atomic_dec_and_test(&dev->power.usage_count))
1341                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1342
1343  out:
1344         spin_unlock_irq(&dev->power.lock);
1345 }
1346 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1347
1348 /**
1349  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1350  * @dev: Device to handle.
1351  *
1352  * Set the power.no_callbacks flag, which tells the PM core that this
1353  * device is power-managed through its parent and has no runtime PM
1354  * callbacks of its own.  The runtime sysfs attributes will be removed.
1355  */
1356 void pm_runtime_no_callbacks(struct device *dev)
1357 {
1358         spin_lock_irq(&dev->power.lock);
1359         dev->power.no_callbacks = 1;
1360         spin_unlock_irq(&dev->power.lock);
1361         if (device_is_registered(dev))
1362                 rpm_sysfs_remove(dev);
1363 }
1364 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1365
1366 /**
1367  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1368  * @dev: Device to handle
1369  *
1370  * Set the power.irq_safe flag, which tells the PM core that the
1371  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1372  * always be invoked with the spinlock held and interrupts disabled.  It also
1373  * causes the parent's usage counter to be permanently incremented, preventing
1374  * the parent from runtime suspending -- otherwise an irq-safe child might have
1375  * to wait for a non-irq-safe parent.
1376  */
1377 void pm_runtime_irq_safe(struct device *dev)
1378 {
1379         if (dev->parent)
1380                 pm_runtime_get_sync(dev->parent);
1381         spin_lock_irq(&dev->power.lock);
1382         dev->power.irq_safe = 1;
1383         spin_unlock_irq(&dev->power.lock);
1384 }
1385 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1386
1387 /**
1388  * update_autosuspend - Handle a change to a device's autosuspend settings.
1389  * @dev: Device to handle.
1390  * @old_delay: The former autosuspend_delay value.
1391  * @old_use: The former use_autosuspend value.
1392  *
1393  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1394  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1395  *
1396  * This function must be called under dev->power.lock with interrupts disabled.
1397  */
1398 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1399 {
1400         int delay = dev->power.autosuspend_delay;
1401
1402         /* Should runtime suspend be prevented now? */
1403         if (dev->power.use_autosuspend && delay < 0) {
1404
1405                 /* If it used to be allowed then prevent it. */
1406                 if (!old_use || old_delay >= 0) {
1407                         atomic_inc(&dev->power.usage_count);
1408                         rpm_resume(dev, 0);
1409                 }
1410         }
1411
1412         /* Runtime suspend should be allowed now. */
1413         else {
1414
1415                 /* If it used to be prevented then allow it. */
1416                 if (old_use && old_delay < 0)
1417                         atomic_dec(&dev->power.usage_count);
1418
1419                 /* Maybe we can autosuspend now. */
1420                 rpm_idle(dev, RPM_AUTO);
1421         }
1422 }
1423
1424 /**
1425  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1426  * @dev: Device to handle.
1427  * @delay: Value of the new delay in milliseconds.
1428  *
1429  * Set the device's power.autosuspend_delay value.  If it changes to negative
1430  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1431  * changes the other way, allow runtime suspends.
1432  */
1433 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1434 {
1435         int old_delay, old_use;
1436
1437         spin_lock_irq(&dev->power.lock);
1438         old_delay = dev->power.autosuspend_delay;
1439         old_use = dev->power.use_autosuspend;
1440         dev->power.autosuspend_delay = delay;
1441         update_autosuspend(dev, old_delay, old_use);
1442         spin_unlock_irq(&dev->power.lock);
1443 }
1444 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1445
1446 /**
1447  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1448  * @dev: Device to handle.
1449  * @use: New value for use_autosuspend.
1450  *
1451  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1452  * suspends as needed.
1453  */
1454 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1455 {
1456         int old_delay, old_use;
1457
1458         spin_lock_irq(&dev->power.lock);
1459         old_delay = dev->power.autosuspend_delay;
1460         old_use = dev->power.use_autosuspend;
1461         dev->power.use_autosuspend = use;
1462         update_autosuspend(dev, old_delay, old_use);
1463         spin_unlock_irq(&dev->power.lock);
1464 }
1465 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1466
1467 /**
1468  * pm_runtime_init - Initialize runtime PM fields in given device object.
1469  * @dev: Device object to initialize.
1470  */
1471 void pm_runtime_init(struct device *dev)
1472 {
1473         dev->power.runtime_status = RPM_SUSPENDED;
1474         dev->power.idle_notification = false;
1475
1476         dev->power.disable_depth = 1;
1477         atomic_set(&dev->power.usage_count, 0);
1478
1479         dev->power.runtime_error = 0;
1480
1481         atomic_set(&dev->power.child_count, 0);
1482         pm_suspend_ignore_children(dev, false);
1483         dev->power.runtime_auto = true;
1484
1485         dev->power.request_pending = false;
1486         dev->power.request = RPM_REQ_NONE;
1487         dev->power.deferred_resume = false;
1488         dev->power.accounting_timestamp = jiffies;
1489         INIT_WORK(&dev->power.work, pm_runtime_work);
1490
1491         dev->power.timer_expires = 0;
1492         timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1493
1494         init_waitqueue_head(&dev->power.wait_queue);
1495 }
1496
1497 /**
1498  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1499  * @dev: Device object to re-initialize.
1500  */
1501 void pm_runtime_reinit(struct device *dev)
1502 {
1503         if (!pm_runtime_enabled(dev)) {
1504                 if (dev->power.runtime_status == RPM_ACTIVE)
1505                         pm_runtime_set_suspended(dev);
1506                 if (dev->power.irq_safe) {
1507                         spin_lock_irq(&dev->power.lock);
1508                         dev->power.irq_safe = 0;
1509                         spin_unlock_irq(&dev->power.lock);
1510                         if (dev->parent)
1511                                 pm_runtime_put(dev->parent);
1512                 }
1513         }
1514 }
1515
1516 /**
1517  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1518  * @dev: Device object being removed from device hierarchy.
1519  */
1520 void pm_runtime_remove(struct device *dev)
1521 {
1522         __pm_runtime_disable(dev, false);
1523         pm_runtime_reinit(dev);
1524 }
1525
1526 /**
1527  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
1528  * @dev: Device whose driver is going to be removed.
1529  *
1530  * Check links from this device to any consumers and if any of them have active
1531  * runtime PM references to the device, drop the usage counter of the device
1532  * (as many times as needed).
1533  *
1534  * Links with the DL_FLAG_MANAGED flag unset are ignored.
1535  *
1536  * Since the device is guaranteed to be runtime-active at the point this is
1537  * called, nothing else needs to be done here.
1538  *
1539  * Moreover, this is called after device_links_busy() has returned 'false', so
1540  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
1541  * therefore rpm_active can't be manipulated concurrently.
1542  */
1543 void pm_runtime_clean_up_links(struct device *dev)
1544 {
1545         struct device_link *link;
1546         int idx;
1547
1548         idx = device_links_read_lock();
1549
1550         list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1551                 if (!(link->flags & DL_FLAG_MANAGED))
1552                         continue;
1553
1554                 while (refcount_dec_not_one(&link->rpm_active))
1555                         pm_runtime_put_noidle(dev);
1556         }
1557
1558         device_links_read_unlock(idx);
1559 }
1560
1561 /**
1562  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1563  * @dev: Consumer device.
1564  */
1565 void pm_runtime_get_suppliers(struct device *dev)
1566 {
1567         struct device_link *link;
1568         int idx;
1569
1570         idx = device_links_read_lock();
1571
1572         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1573                 if (link->flags & DL_FLAG_PM_RUNTIME) {
1574                         link->supplier_preactivated = true;
1575                         pm_runtime_get_sync(link->supplier);
1576                         refcount_inc(&link->rpm_active);
1577                 }
1578
1579         device_links_read_unlock(idx);
1580 }
1581
1582 /**
1583  * pm_runtime_put_suppliers - Drop references to supplier devices.
1584  * @dev: Consumer device.
1585  */
1586 void pm_runtime_put_suppliers(struct device *dev)
1587 {
1588         struct device_link *link;
1589         unsigned long flags;
1590         bool put;
1591         int idx;
1592
1593         idx = device_links_read_lock();
1594
1595         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1596                 if (link->supplier_preactivated) {
1597                         link->supplier_preactivated = false;
1598                         spin_lock_irqsave(&dev->power.lock, flags);
1599                         put = pm_runtime_status_suspended(dev) &&
1600                               refcount_dec_not_one(&link->rpm_active);
1601                         spin_unlock_irqrestore(&dev->power.lock, flags);
1602                         if (put)
1603                                 pm_runtime_put(link->supplier);
1604                 }
1605
1606         device_links_read_unlock(idx);
1607 }
1608
1609 void pm_runtime_new_link(struct device *dev)
1610 {
1611         spin_lock_irq(&dev->power.lock);
1612         dev->power.links_count++;
1613         spin_unlock_irq(&dev->power.lock);
1614 }
1615
1616 void pm_runtime_drop_link(struct device *dev)
1617 {
1618         spin_lock_irq(&dev->power.lock);
1619         WARN_ON(dev->power.links_count == 0);
1620         dev->power.links_count--;
1621         spin_unlock_irq(&dev->power.lock);
1622 }
1623
1624 static bool pm_runtime_need_not_resume(struct device *dev)
1625 {
1626         return atomic_read(&dev->power.usage_count) <= 1 &&
1627                 (atomic_read(&dev->power.child_count) == 0 ||
1628                  dev->power.ignore_children);
1629 }
1630
1631 /**
1632  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1633  * @dev: Device to suspend.
1634  *
1635  * Disable runtime PM so we safely can check the device's runtime PM status and
1636  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1637  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1638  * usage and children counters don't indicate that the device was in use before
1639  * the system-wide transition under way, decrement its parent's children counter
1640  * (if there is a parent).  Keep runtime PM disabled to preserve the state
1641  * unless we encounter errors.
1642  *
1643  * Typically this function may be invoked from a system suspend callback to make
1644  * sure the device is put into low power state and it should only be used during
1645  * system-wide PM transitions to sleep states.  It assumes that the analogous
1646  * pm_runtime_force_resume() will be used to resume the device.
1647  */
1648 int pm_runtime_force_suspend(struct device *dev)
1649 {
1650         int (*callback)(struct device *);
1651         int ret;
1652
1653         pm_runtime_disable(dev);
1654         if (pm_runtime_status_suspended(dev))
1655                 return 0;
1656
1657         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1658
1659         ret = callback ? callback(dev) : 0;
1660         if (ret)
1661                 goto err;
1662
1663         /*
1664          * If the device can stay in suspend after the system-wide transition
1665          * to the working state that will follow, drop the children counter of
1666          * its parent, but set its status to RPM_SUSPENDED anyway in case this
1667          * function will be called again for it in the meantime.
1668          */
1669         if (pm_runtime_need_not_resume(dev))
1670                 pm_runtime_set_suspended(dev);
1671         else
1672                 __update_runtime_status(dev, RPM_SUSPENDED);
1673
1674         return 0;
1675
1676 err:
1677         pm_runtime_enable(dev);
1678         return ret;
1679 }
1680 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1681
1682 /**
1683  * pm_runtime_force_resume - Force a device into resume state if needed.
1684  * @dev: Device to resume.
1685  *
1686  * Prior invoking this function we expect the user to have brought the device
1687  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1688  * those actions and bring the device into full power, if it is expected to be
1689  * used on system resume.  In the other case, we defer the resume to be managed
1690  * via runtime PM.
1691  *
1692  * Typically this function may be invoked from a system resume callback.
1693  */
1694 int pm_runtime_force_resume(struct device *dev)
1695 {
1696         int (*callback)(struct device *);
1697         int ret = 0;
1698
1699         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1700                 goto out;
1701
1702         /*
1703          * The value of the parent's children counter is correct already, so
1704          * just update the status of the device.
1705          */
1706         __update_runtime_status(dev, RPM_ACTIVE);
1707
1708         callback = RPM_GET_CALLBACK(dev, runtime_resume);
1709
1710         ret = callback ? callback(dev) : 0;
1711         if (ret) {
1712                 pm_runtime_set_suspended(dev);
1713                 goto out;
1714         }
1715
1716         pm_runtime_mark_last_busy(dev);
1717 out:
1718         pm_runtime_enable(dev);
1719         return ret;
1720 }
1721 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);