GNU Linux-libre 4.9.308-gnu1
[releases.git] / kernel / power / process.c
1 /*
2  * drivers/power/process.c - Functions for starting/stopping processes on 
3  *                           suspend transitions.
4  *
5  * Originally from swsusp.
6  */
7
8
9 #undef DEBUG
10
11 #include <linux/interrupt.h>
12 #include <linux/oom.h>
13 #include <linux/suspend.h>
14 #include <linux/module.h>
15 #include <linux/syscalls.h>
16 #include <linux/freezer.h>
17 #include <linux/delay.h>
18 #include <linux/workqueue.h>
19 #include <linux/kmod.h>
20 #include <trace/events/power.h>
21 #include <linux/cpuset.h>
22
23 /*
24  * Timeout for stopping processes
25  */
26 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
27
28 static int try_to_freeze_tasks(bool user_only)
29 {
30         struct task_struct *g, *p;
31         unsigned long end_time;
32         unsigned int todo;
33         bool wq_busy = false;
34         ktime_t start, end, elapsed;
35         unsigned int elapsed_msecs;
36         bool wakeup = false;
37         int sleep_usecs = USEC_PER_MSEC;
38
39         start = ktime_get_boottime();
40
41         end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
42
43         if (!user_only)
44                 freeze_workqueues_begin();
45
46         while (true) {
47                 todo = 0;
48                 read_lock(&tasklist_lock);
49                 for_each_process_thread(g, p) {
50                         if (p == current || !freeze_task(p))
51                                 continue;
52
53                         if (!freezer_should_skip(p))
54                                 todo++;
55                 }
56                 read_unlock(&tasklist_lock);
57
58                 if (!user_only) {
59                         wq_busy = freeze_workqueues_busy();
60                         todo += wq_busy;
61                 }
62
63                 if (!todo || time_after(jiffies, end_time))
64                         break;
65
66                 if (pm_wakeup_pending()) {
67                         wakeup = true;
68                         break;
69                 }
70
71                 /*
72                  * We need to retry, but first give the freezing tasks some
73                  * time to enter the refrigerator.  Start with an initial
74                  * 1 ms sleep followed by exponential backoff until 8 ms.
75                  */
76                 usleep_range(sleep_usecs / 2, sleep_usecs);
77                 if (sleep_usecs < 8 * USEC_PER_MSEC)
78                         sleep_usecs *= 2;
79         }
80
81         end = ktime_get_boottime();
82         elapsed = ktime_sub(end, start);
83         elapsed_msecs = ktime_to_ms(elapsed);
84
85         if (todo) {
86                 pr_cont("\n");
87                 pr_err("Freezing of tasks %s after %d.%03d seconds "
88                        "(%d tasks refusing to freeze, wq_busy=%d):\n",
89                        wakeup ? "aborted" : "failed",
90                        elapsed_msecs / 1000, elapsed_msecs % 1000,
91                        todo - wq_busy, wq_busy);
92
93                 if (wq_busy)
94                         show_workqueue_state();
95
96                 if (!wakeup) {
97                         read_lock(&tasklist_lock);
98                         for_each_process_thread(g, p) {
99                                 if (p != current && !freezer_should_skip(p)
100                                     && freezing(p) && !frozen(p))
101                                         sched_show_task(p);
102                         }
103                         read_unlock(&tasklist_lock);
104                 }
105         } else {
106                 pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
107                         elapsed_msecs % 1000);
108         }
109
110         return todo ? -EBUSY : 0;
111 }
112
113 /**
114  * freeze_processes - Signal user space processes to enter the refrigerator.
115  * The current thread will not be frozen.  The same process that calls
116  * freeze_processes must later call thaw_processes.
117  *
118  * On success, returns 0.  On failure, -errno and system is fully thawed.
119  */
120 int freeze_processes(void)
121 {
122         int error;
123
124         error = __usermodehelper_disable(UMH_FREEZING);
125         if (error)
126                 return error;
127
128         /* Make sure this task doesn't get frozen */
129         current->flags |= PF_SUSPEND_TASK;
130
131         if (!pm_freezing)
132                 atomic_inc(&system_freezing_cnt);
133
134         pm_wakeup_clear();
135         pr_info("Freezing user space processes ... ");
136         pm_freezing = true;
137         error = try_to_freeze_tasks(true);
138         if (!error) {
139                 __usermodehelper_set_disable_depth(UMH_DISABLED);
140                 pr_cont("done.");
141         }
142         pr_cont("\n");
143         BUG_ON(in_atomic());
144
145         /*
146          * Now that the whole userspace is frozen we need to disbale
147          * the OOM killer to disallow any further interference with
148          * killable tasks. There is no guarantee oom victims will
149          * ever reach a point they go away we have to wait with a timeout.
150          */
151         if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
152                 error = -EBUSY;
153
154         if (error)
155                 thaw_processes();
156         return error;
157 }
158
159 /**
160  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
161  *
162  * On success, returns 0.  On failure, -errno and only the kernel threads are
163  * thawed, so as to give a chance to the caller to do additional cleanups
164  * (if any) before thawing the userspace tasks. So, it is the responsibility
165  * of the caller to thaw the userspace tasks, when the time is right.
166  */
167 int freeze_kernel_threads(void)
168 {
169         int error;
170
171         pr_info("Freezing remaining freezable tasks ... ");
172
173         pm_nosig_freezing = true;
174         error = try_to_freeze_tasks(false);
175         if (!error)
176                 pr_cont("done.");
177
178         pr_cont("\n");
179         BUG_ON(in_atomic());
180
181         if (error)
182                 thaw_kernel_threads();
183         return error;
184 }
185
186 void thaw_processes(void)
187 {
188         struct task_struct *g, *p;
189         struct task_struct *curr = current;
190
191         trace_suspend_resume(TPS("thaw_processes"), 0, true);
192         if (pm_freezing)
193                 atomic_dec(&system_freezing_cnt);
194         pm_freezing = false;
195         pm_nosig_freezing = false;
196
197         oom_killer_enable();
198
199         pr_info("Restarting tasks ... ");
200
201         __usermodehelper_set_disable_depth(UMH_FREEZING);
202         thaw_workqueues();
203
204         cpuset_wait_for_hotplug();
205
206         read_lock(&tasklist_lock);
207         for_each_process_thread(g, p) {
208                 /* No other threads should have PF_SUSPEND_TASK set */
209                 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
210                 __thaw_task(p);
211         }
212         read_unlock(&tasklist_lock);
213
214         WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
215         curr->flags &= ~PF_SUSPEND_TASK;
216
217         usermodehelper_enable();
218
219         schedule();
220         pr_cont("done.\n");
221         trace_suspend_resume(TPS("thaw_processes"), 0, false);
222 }
223
224 void thaw_kernel_threads(void)
225 {
226         struct task_struct *g, *p;
227
228         pm_nosig_freezing = false;
229         pr_info("Restarting kernel threads ... ");
230
231         thaw_workqueues();
232
233         read_lock(&tasklist_lock);
234         for_each_process_thread(g, p) {
235                 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
236                         __thaw_task(p);
237         }
238         read_unlock(&tasklist_lock);
239
240         schedule();
241         pr_cont("done.\n");
242 }