arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / kernel / power / power.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/suspend.h>
3 #include <linux/suspend_ioctls.h>
4 #include <linux/utsname.h>
5 #include <linux/freezer.h>
6 #include <linux/compiler.h>
7 #include <linux/cpu.h>
8 #include <linux/cpuidle.h>
9
10 struct swsusp_info {
11         struct new_utsname      uts;
12         u32                     version_code;
13         unsigned long           num_physpages;
14         int                     cpus;
15         unsigned long           image_pages;
16         unsigned long           pages;
17         unsigned long           size;
18 } __aligned(PAGE_SIZE);
19
20 #ifdef CONFIG_HIBERNATION
21 /* kernel/power/snapshot.c */
22 extern void __init hibernate_reserved_size_init(void);
23 extern void __init hibernate_image_size_init(void);
24
25 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
26 /* Maximum size of architecture specific data in a hibernation header */
27 #define MAX_ARCH_HEADER_SIZE    (sizeof(struct new_utsname) + 4)
28
29 static inline int init_header_complete(struct swsusp_info *info)
30 {
31         return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
32 }
33
34 static inline const char *check_image_kernel(struct swsusp_info *info)
35 {
36         return arch_hibernation_header_restore(info) ?
37                         "architecture specific data" : NULL;
38 }
39 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
40
41 /*
42  * Keep some memory free so that I/O operations can succeed without paging
43  * [Might this be more than 4 MB?]
44  */
45 #define PAGES_FOR_IO    ((4096 * 1024) >> PAGE_SHIFT)
46
47 /*
48  * Keep 1 MB of memory free so that device drivers can allocate some pages in
49  * their .suspend() routines without breaking the suspend to disk.
50  */
51 #define SPARE_PAGES     ((1024 * 1024) >> PAGE_SHIFT)
52
53 asmlinkage int swsusp_save(void);
54
55 /* kernel/power/hibernate.c */
56 extern bool freezer_test_done;
57
58 extern int hibernation_snapshot(int platform_mode);
59 extern int hibernation_restore(int platform_mode);
60 extern int hibernation_platform_enter(void);
61
62 #ifdef CONFIG_STRICT_KERNEL_RWX
63 /* kernel/power/snapshot.c */
64 extern void enable_restore_image_protection(void);
65 #else
66 static inline void enable_restore_image_protection(void) {}
67 #endif /* CONFIG_STRICT_KERNEL_RWX */
68
69 #else /* !CONFIG_HIBERNATION */
70
71 static inline void hibernate_reserved_size_init(void) {}
72 static inline void hibernate_image_size_init(void) {}
73 #endif /* !CONFIG_HIBERNATION */
74
75 #define power_attr(_name) \
76 static struct kobj_attribute _name##_attr = {   \
77         .attr   = {                             \
78                 .name = __stringify(_name),     \
79                 .mode = 0644,                   \
80         },                                      \
81         .show   = _name##_show,                 \
82         .store  = _name##_store,                \
83 }
84
85 #define power_attr_ro(_name) \
86 static struct kobj_attribute _name##_attr = {   \
87         .attr   = {                             \
88                 .name = __stringify(_name),     \
89                 .mode = S_IRUGO,                \
90         },                                      \
91         .show   = _name##_show,                 \
92 }
93
94 /* Preferred image size in bytes (default 500 MB) */
95 extern unsigned long image_size;
96 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
97 extern unsigned long reserved_size;
98 extern int in_suspend;
99 extern dev_t swsusp_resume_device;
100 extern sector_t swsusp_resume_block;
101
102 extern int create_basic_memory_bitmaps(void);
103 extern void free_basic_memory_bitmaps(void);
104 extern int hibernate_preallocate_memory(void);
105
106 extern void clear_or_poison_free_pages(void);
107
108 /**
109  *      Auxiliary structure used for reading the snapshot image data and
110  *      metadata from and writing them to the list of page backup entries
111  *      (PBEs) which is the main data structure of swsusp.
112  *
113  *      Using struct snapshot_handle we can transfer the image, including its
114  *      metadata, as a continuous sequence of bytes with the help of
115  *      snapshot_read_next() and snapshot_write_next().
116  *
117  *      The code that writes the image to a storage or transfers it to
118  *      the user land is required to use snapshot_read_next() for this
119  *      purpose and it should not make any assumptions regarding the internal
120  *      structure of the image.  Similarly, the code that reads the image from
121  *      a storage or transfers it from the user land is required to use
122  *      snapshot_write_next().
123  *
124  *      This may allow us to change the internal structure of the image
125  *      in the future with considerably less effort.
126  */
127
128 struct snapshot_handle {
129         unsigned int    cur;    /* number of the block of PAGE_SIZE bytes the
130                                  * next operation will refer to (ie. current)
131                                  */
132         void            *buffer;        /* address of the block to read from
133                                          * or write to
134                                          */
135         int             sync_read;      /* Set to one to notify the caller of
136                                          * snapshot_write_next() that it may
137                                          * need to call wait_on_bio_chain()
138                                          */
139 };
140
141 /* This macro returns the address from/to which the caller of
142  * snapshot_read_next()/snapshot_write_next() is allowed to
143  * read/write data after the function returns
144  */
145 #define data_of(handle) ((handle).buffer)
146
147 extern unsigned int snapshot_additional_pages(struct zone *zone);
148 extern unsigned long snapshot_get_image_size(void);
149 extern int snapshot_read_next(struct snapshot_handle *handle);
150 extern int snapshot_write_next(struct snapshot_handle *handle);
151 extern void snapshot_write_finalize(struct snapshot_handle *handle);
152 extern int snapshot_image_loaded(struct snapshot_handle *handle);
153
154 extern bool hibernate_acquire(void);
155 extern void hibernate_release(void);
156
157 extern sector_t alloc_swapdev_block(int swap);
158 extern void free_all_swap_pages(int swap);
159 extern int swsusp_swap_in_use(void);
160
161 /*
162  * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
163  * the image header.
164  */
165 #define SF_PLATFORM_MODE        1
166 #define SF_NOCOMPRESS_MODE      2
167 #define SF_CRC32_MODE           4
168 #define SF_HW_SIG               8
169
170 /* kernel/power/hibernate.c */
171 int swsusp_check(bool exclusive);
172 extern void swsusp_free(void);
173 extern int swsusp_read(unsigned int *flags_p);
174 extern int swsusp_write(unsigned int flags);
175 void swsusp_close(void);
176 #ifdef CONFIG_SUSPEND
177 extern int swsusp_unmark(void);
178 #endif
179
180 struct __kernel_old_timeval;
181 /* kernel/power/swsusp.c */
182 extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
183
184 #ifdef CONFIG_SUSPEND
185 /* kernel/power/suspend.c */
186 extern const char * const pm_labels[];
187 extern const char *pm_states[];
188 extern const char *mem_sleep_states[];
189
190 extern int suspend_devices_and_enter(suspend_state_t state);
191 #else /* !CONFIG_SUSPEND */
192 #define mem_sleep_current       PM_SUSPEND_ON
193
194 static inline int suspend_devices_and_enter(suspend_state_t state)
195 {
196         return -ENOSYS;
197 }
198 #endif /* !CONFIG_SUSPEND */
199
200 #ifdef CONFIG_PM_TEST_SUSPEND
201 /* kernel/power/suspend_test.c */
202 extern void suspend_test_start(void);
203 extern void suspend_test_finish(const char *label);
204 #else /* !CONFIG_PM_TEST_SUSPEND */
205 static inline void suspend_test_start(void) {}
206 static inline void suspend_test_finish(const char *label) {}
207 #endif /* !CONFIG_PM_TEST_SUSPEND */
208
209 #ifdef CONFIG_PM_SLEEP
210 /* kernel/power/main.c */
211 extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
212 extern int pm_notifier_call_chain(unsigned long val);
213 void pm_restrict_gfp_mask(void);
214 void pm_restore_gfp_mask(void);
215 #else
216 static inline void pm_restrict_gfp_mask(void) {}
217 static inline void pm_restore_gfp_mask(void) {}
218 #endif
219
220 #ifdef CONFIG_HIGHMEM
221 int restore_highmem(void);
222 #else
223 static inline unsigned int count_highmem_pages(void) { return 0; }
224 static inline int restore_highmem(void) { return 0; }
225 #endif
226
227 /*
228  * Suspend test levels
229  */
230 enum {
231         /* keep first */
232         TEST_NONE,
233         TEST_CORE,
234         TEST_CPUS,
235         TEST_PLATFORM,
236         TEST_DEVICES,
237         TEST_FREEZER,
238         /* keep last */
239         __TEST_AFTER_LAST
240 };
241
242 #define TEST_FIRST      TEST_NONE
243 #define TEST_MAX        (__TEST_AFTER_LAST - 1)
244
245 #ifdef CONFIG_PM_SLEEP_DEBUG
246 extern int pm_test_level;
247 #else
248 #define pm_test_level   (TEST_NONE)
249 #endif
250
251 #ifdef CONFIG_SUSPEND_FREEZER
252 static inline int suspend_freeze_processes(void)
253 {
254         int error;
255
256         error = freeze_processes();
257         /*
258          * freeze_processes() automatically thaws every task if freezing
259          * fails. So we need not do anything extra upon error.
260          */
261         if (error)
262                 return error;
263
264         error = freeze_kernel_threads();
265         /*
266          * freeze_kernel_threads() thaws only kernel threads upon freezing
267          * failure. So we have to thaw the userspace tasks ourselves.
268          */
269         if (error)
270                 thaw_processes();
271
272         return error;
273 }
274
275 static inline void suspend_thaw_processes(void)
276 {
277         thaw_processes();
278 }
279 #else
280 static inline int suspend_freeze_processes(void)
281 {
282         return 0;
283 }
284
285 static inline void suspend_thaw_processes(void)
286 {
287 }
288 #endif
289
290 #ifdef CONFIG_PM_AUTOSLEEP
291
292 /* kernel/power/autosleep.c */
293 extern int pm_autosleep_init(void);
294 extern int pm_autosleep_lock(void);
295 extern void pm_autosleep_unlock(void);
296 extern suspend_state_t pm_autosleep_state(void);
297 extern int pm_autosleep_set_state(suspend_state_t state);
298
299 #else /* !CONFIG_PM_AUTOSLEEP */
300
301 static inline int pm_autosleep_init(void) { return 0; }
302 static inline int pm_autosleep_lock(void) { return 0; }
303 static inline void pm_autosleep_unlock(void) {}
304 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
305
306 #endif /* !CONFIG_PM_AUTOSLEEP */
307
308 #ifdef CONFIG_PM_WAKELOCKS
309
310 /* kernel/power/wakelock.c */
311 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
312 extern int pm_wake_lock(const char *buf);
313 extern int pm_wake_unlock(const char *buf);
314
315 #endif /* !CONFIG_PM_WAKELOCKS */
316
317 static inline int pm_sleep_disable_secondary_cpus(void)
318 {
319         cpuidle_pause();
320         return suspend_disable_secondary_cpus();
321 }
322
323 static inline void pm_sleep_enable_secondary_cpus(void)
324 {
325         suspend_enable_secondary_cpus();
326         cpuidle_resume();
327 }