GNU Linux-libre 4.19.207-gnu1
[releases.git] / include / linux / percpu-defs.h
1 /*
2  * linux/percpu-defs.h - basic definitions for percpu areas
3  *
4  * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
5  *
6  * This file is separate from linux/percpu.h to avoid cyclic inclusion
7  * dependency from arch header files.  Only to be included from
8  * asm/percpu.h.
9  *
10  * This file includes macros necessary to declare percpu sections and
11  * variables, and definitions of percpu accessors and operations.  It
12  * should provide enough percpu features to arch header files even when
13  * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
14  */
15
16 #ifndef _LINUX_PERCPU_DEFS_H
17 #define _LINUX_PERCPU_DEFS_H
18
19 #ifdef CONFIG_SMP
20
21 #ifdef MODULE
22 #define PER_CPU_SHARED_ALIGNED_SECTION ""
23 #define PER_CPU_ALIGNED_SECTION ""
24 #else
25 #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
26 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
27 #endif
28 #define PER_CPU_FIRST_SECTION "..first"
29
30 #else
31
32 #define PER_CPU_SHARED_ALIGNED_SECTION ""
33 #define PER_CPU_ALIGNED_SECTION "..shared_aligned"
34 #define PER_CPU_FIRST_SECTION ""
35
36 #endif
37
38 /*
39  * Base implementations of per-CPU variable declarations and definitions, where
40  * the section in which the variable is to be placed is provided by the
41  * 'sec' argument.  This may be used to affect the parameters governing the
42  * variable's storage.
43  *
44  * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest
45  * linkage errors occur due the compiler generating the wrong code to access
46  * that section.
47  */
48 #define __PCPU_ATTRS(sec)                                               \
49         __percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))     \
50         PER_CPU_ATTRIBUTES
51
52 #define __PCPU_DUMMY_ATTRS                                              \
53         __attribute__((section(".discard"), unused))
54
55 /*
56  * s390 and alpha modules require percpu variables to be defined as
57  * weak to force the compiler to generate GOT based external
58  * references for them.  This is necessary because percpu sections
59  * will be located outside of the usually addressable area.
60  *
61  * This definition puts the following two extra restrictions when
62  * defining percpu variables.
63  *
64  * 1. The symbol must be globally unique, even the static ones.
65  * 2. Static percpu variables cannot be defined inside a function.
66  *
67  * Archs which need weak percpu definitions should define
68  * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
69  *
70  * To ensure that the generic code observes the above two
71  * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
72  * definition is used for all cases.
73  */
74 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
75 /*
76  * __pcpu_scope_* dummy variable is used to enforce scope.  It
77  * receives the static modifier when it's used in front of
78  * DEFINE_PER_CPU() and will trigger build failure if
79  * DECLARE_PER_CPU() is used for the same variable.
80  *
81  * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
82  * such that hidden weak symbol collision, which will cause unrelated
83  * variables to share the same address, can be detected during build.
84  */
85 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
86         extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;             \
87         extern __PCPU_ATTRS(sec) __typeof__(type) name
88
89 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
90         __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;                    \
91         extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;            \
92         __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;                   \
93         extern __PCPU_ATTRS(sec) __typeof__(type) name;                 \
94         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak                 \
95         __typeof__(type) name
96 #else
97 /*
98  * Normal declaration and definition macros.
99  */
100 #define DECLARE_PER_CPU_SECTION(type, name, sec)                        \
101         extern __PCPU_ATTRS(sec) __typeof__(type) name
102
103 #define DEFINE_PER_CPU_SECTION(type, name, sec)                         \
104         __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES                        \
105         __typeof__(type) name
106 #endif
107
108 /*
109  * Variant on the per-CPU variable declaration/definition theme used for
110  * ordinary per-CPU variables.
111  */
112 #define DECLARE_PER_CPU(type, name)                                     \
113         DECLARE_PER_CPU_SECTION(type, name, "")
114
115 #define DEFINE_PER_CPU(type, name)                                      \
116         DEFINE_PER_CPU_SECTION(type, name, "")
117
118 /*
119  * Declaration/definition used for per-CPU variables that must come first in
120  * the set of variables.
121  */
122 #define DECLARE_PER_CPU_FIRST(type, name)                               \
123         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
124
125 #define DEFINE_PER_CPU_FIRST(type, name)                                \
126         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
127
128 /*
129  * Declaration/definition used for per-CPU variables that must be cacheline
130  * aligned under SMP conditions so that, whilst a particular instance of the
131  * data corresponds to a particular CPU, inefficiencies due to direct access by
132  * other CPUs are reduced by preventing the data from unnecessarily spanning
133  * cachelines.
134  *
135  * An example of this would be statistical data, where each CPU's set of data
136  * is updated by that CPU alone, but the data from across all CPUs is collated
137  * by a CPU processing a read from a proc file.
138  */
139 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)                      \
140         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
141         ____cacheline_aligned_in_smp
142
143 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                       \
144         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
145         ____cacheline_aligned_in_smp
146
147 #define DECLARE_PER_CPU_ALIGNED(type, name)                             \
148         DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)    \
149         ____cacheline_aligned
150
151 #define DEFINE_PER_CPU_ALIGNED(type, name)                              \
152         DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)     \
153         ____cacheline_aligned
154
155 /*
156  * Declaration/definition used for per-CPU variables that must be page aligned.
157  */
158 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)                        \
159         DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")           \
160         __aligned(PAGE_SIZE)
161
162 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                         \
163         DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")            \
164         __aligned(PAGE_SIZE)
165
166 /*
167  * Declaration/definition used for per-CPU variables that must be read mostly.
168  */
169 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)                 \
170         DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
171
172 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                          \
173         DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
174
175 /*
176  * Declaration/definition used for per-CPU variables that should be accessed
177  * as decrypted when memory encryption is enabled in the guest.
178  */
179 #ifdef CONFIG_AMD_MEM_ENCRYPT
180 #define DECLARE_PER_CPU_DECRYPTED(type, name)                           \
181         DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
182
183 #define DEFINE_PER_CPU_DECRYPTED(type, name)                            \
184         DEFINE_PER_CPU_SECTION(type, name, "..decrypted")
185 #else
186 #define DEFINE_PER_CPU_DECRYPTED(type, name)    DEFINE_PER_CPU(type, name)
187 #endif
188
189 /*
190  * Intermodule exports for per-CPU variables.  sparse forgets about
191  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
192  * noop if __CHECKER__.
193  */
194 #ifndef __CHECKER__
195 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
196 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
197 #else
198 #define EXPORT_PER_CPU_SYMBOL(var)
199 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
200 #endif
201
202 /*
203  * Accessors and operations.
204  */
205 #ifndef __ASSEMBLY__
206
207 /*
208  * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
209  * @ptr and is invoked once before a percpu area is accessed by all
210  * accessors and operations.  This is performed in the generic part of
211  * percpu and arch overrides don't need to worry about it; however, if an
212  * arch wants to implement an arch-specific percpu accessor or operation,
213  * it may use __verify_pcpu_ptr() to verify the parameters.
214  *
215  * + 0 is required in order to convert the pointer type from a
216  * potential array type to a pointer to a single item of the array.
217  */
218 #define __verify_pcpu_ptr(ptr)                                          \
219 do {                                                                    \
220         const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
221         (void)__vpp_verify;                                             \
222 } while (0)
223
224 #ifdef CONFIG_SMP
225
226 /*
227  * Add an offset to a pointer but keep the pointer as-is.  Use RELOC_HIDE()
228  * to prevent the compiler from making incorrect assumptions about the
229  * pointer value.  The weird cast keeps both GCC and sparse happy.
230  */
231 #define SHIFT_PERCPU_PTR(__p, __offset)                                 \
232         RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
233
234 #define per_cpu_ptr(ptr, cpu)                                           \
235 ({                                                                      \
236         __verify_pcpu_ptr(ptr);                                         \
237         SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));                 \
238 })
239
240 #define raw_cpu_ptr(ptr)                                                \
241 ({                                                                      \
242         __verify_pcpu_ptr(ptr);                                         \
243         arch_raw_cpu_ptr(ptr);                                          \
244 })
245
246 #ifdef CONFIG_DEBUG_PREEMPT
247 #define this_cpu_ptr(ptr)                                               \
248 ({                                                                      \
249         __verify_pcpu_ptr(ptr);                                         \
250         SHIFT_PERCPU_PTR(ptr, my_cpu_offset);                           \
251 })
252 #else
253 #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
254 #endif
255
256 #else   /* CONFIG_SMP */
257
258 #define VERIFY_PERCPU_PTR(__p)                                          \
259 ({                                                                      \
260         __verify_pcpu_ptr(__p);                                         \
261         (typeof(*(__p)) __kernel __force *)(__p);                       \
262 })
263
264 #define per_cpu_ptr(ptr, cpu)   ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
265 #define raw_cpu_ptr(ptr)        per_cpu_ptr(ptr, 0)
266 #define this_cpu_ptr(ptr)       raw_cpu_ptr(ptr)
267
268 #endif  /* CONFIG_SMP */
269
270 #define per_cpu(var, cpu)       (*per_cpu_ptr(&(var), cpu))
271
272 /*
273  * Must be an lvalue. Since @var must be a simple identifier,
274  * we force a syntax error here if it isn't.
275  */
276 #define get_cpu_var(var)                                                \
277 (*({                                                                    \
278         preempt_disable();                                              \
279         this_cpu_ptr(&var);                                             \
280 }))
281
282 /*
283  * The weird & is necessary because sparse considers (void)(var) to be
284  * a direct dereference of percpu variable (var).
285  */
286 #define put_cpu_var(var)                                                \
287 do {                                                                    \
288         (void)&(var);                                                   \
289         preempt_enable();                                               \
290 } while (0)
291
292 #define get_cpu_ptr(var)                                                \
293 ({                                                                      \
294         preempt_disable();                                              \
295         this_cpu_ptr(var);                                              \
296 })
297
298 #define put_cpu_ptr(var)                                                \
299 do {                                                                    \
300         (void)(var);                                                    \
301         preempt_enable();                                               \
302 } while (0)
303
304 /*
305  * Branching function to split up a function into a set of functions that
306  * are called for different scalar sizes of the objects handled.
307  */
308
309 extern void __bad_size_call_parameter(void);
310
311 #ifdef CONFIG_DEBUG_PREEMPT
312 extern void __this_cpu_preempt_check(const char *op);
313 #else
314 static inline void __this_cpu_preempt_check(const char *op) { }
315 #endif
316
317 #define __pcpu_size_call_return(stem, variable)                         \
318 ({                                                                      \
319         typeof(variable) pscr_ret__;                                    \
320         __verify_pcpu_ptr(&(variable));                                 \
321         switch(sizeof(variable)) {                                      \
322         case 1: pscr_ret__ = stem##1(variable); break;                  \
323         case 2: pscr_ret__ = stem##2(variable); break;                  \
324         case 4: pscr_ret__ = stem##4(variable); break;                  \
325         case 8: pscr_ret__ = stem##8(variable); break;                  \
326         default:                                                        \
327                 __bad_size_call_parameter(); break;                     \
328         }                                                               \
329         pscr_ret__;                                                     \
330 })
331
332 #define __pcpu_size_call_return2(stem, variable, ...)                   \
333 ({                                                                      \
334         typeof(variable) pscr2_ret__;                                   \
335         __verify_pcpu_ptr(&(variable));                                 \
336         switch(sizeof(variable)) {                                      \
337         case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
338         case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
339         case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
340         case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
341         default:                                                        \
342                 __bad_size_call_parameter(); break;                     \
343         }                                                               \
344         pscr2_ret__;                                                    \
345 })
346
347 /*
348  * Special handling for cmpxchg_double.  cmpxchg_double is passed two
349  * percpu variables.  The first has to be aligned to a double word
350  * boundary and the second has to follow directly thereafter.
351  * We enforce this on all architectures even if they don't support
352  * a double cmpxchg instruction, since it's a cheap requirement, and it
353  * avoids breaking the requirement for architectures with the instruction.
354  */
355 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)           \
356 ({                                                                      \
357         bool pdcrb_ret__;                                               \
358         __verify_pcpu_ptr(&(pcp1));                                     \
359         BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
360         VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1)));       \
361         VM_BUG_ON((unsigned long)(&(pcp2)) !=                           \
362                   (unsigned long)(&(pcp1)) + sizeof(pcp1));             \
363         switch(sizeof(pcp1)) {                                          \
364         case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
365         case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
366         case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
367         case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
368         default:                                                        \
369                 __bad_size_call_parameter(); break;                     \
370         }                                                               \
371         pdcrb_ret__;                                                    \
372 })
373
374 #define __pcpu_size_call(stem, variable, ...)                           \
375 do {                                                                    \
376         __verify_pcpu_ptr(&(variable));                                 \
377         switch(sizeof(variable)) {                                      \
378                 case 1: stem##1(variable, __VA_ARGS__);break;           \
379                 case 2: stem##2(variable, __VA_ARGS__);break;           \
380                 case 4: stem##4(variable, __VA_ARGS__);break;           \
381                 case 8: stem##8(variable, __VA_ARGS__);break;           \
382                 default:                                                \
383                         __bad_size_call_parameter();break;              \
384         }                                                               \
385 } while (0)
386
387 /*
388  * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
389  *
390  * Optimized manipulation for memory allocated through the per cpu
391  * allocator or for addresses of per cpu variables.
392  *
393  * These operation guarantee exclusivity of access for other operations
394  * on the *same* processor. The assumption is that per cpu data is only
395  * accessed by a single processor instance (the current one).
396  *
397  * The arch code can provide optimized implementation by defining macros
398  * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
399  * cpu atomic operations for 2 byte sized RMW actions. If arch code does
400  * not provide operations for a scalar size then the fallback in the
401  * generic code will be used.
402  *
403  * cmpxchg_double replaces two adjacent scalars at once.  The first two
404  * parameters are per cpu variables which have to be of the same size.  A
405  * truth value is returned to indicate success or failure (since a double
406  * register result is difficult to handle).  There is very limited hardware
407  * support for these operations, so only certain sizes may work.
408  */
409
410 /*
411  * Operations for contexts where we do not want to do any checks for
412  * preemptions.  Unless strictly necessary, always use [__]this_cpu_*()
413  * instead.
414  *
415  * If there is no other protection through preempt disable and/or disabling
416  * interupts then one of these RMW operations can show unexpected behavior
417  * because the execution thread was rescheduled on another processor or an
418  * interrupt occurred and the same percpu variable was modified from the
419  * interrupt context.
420  */
421 #define raw_cpu_read(pcp)               __pcpu_size_call_return(raw_cpu_read_, pcp)
422 #define raw_cpu_write(pcp, val)         __pcpu_size_call(raw_cpu_write_, pcp, val)
423 #define raw_cpu_add(pcp, val)           __pcpu_size_call(raw_cpu_add_, pcp, val)
424 #define raw_cpu_and(pcp, val)           __pcpu_size_call(raw_cpu_and_, pcp, val)
425 #define raw_cpu_or(pcp, val)            __pcpu_size_call(raw_cpu_or_, pcp, val)
426 #define raw_cpu_add_return(pcp, val)    __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
427 #define raw_cpu_xchg(pcp, nval)         __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
428 #define raw_cpu_cmpxchg(pcp, oval, nval) \
429         __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
430 #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
431         __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
432
433 #define raw_cpu_sub(pcp, val)           raw_cpu_add(pcp, -(val))
434 #define raw_cpu_inc(pcp)                raw_cpu_add(pcp, 1)
435 #define raw_cpu_dec(pcp)                raw_cpu_sub(pcp, 1)
436 #define raw_cpu_sub_return(pcp, val)    raw_cpu_add_return(pcp, -(typeof(pcp))(val))
437 #define raw_cpu_inc_return(pcp)         raw_cpu_add_return(pcp, 1)
438 #define raw_cpu_dec_return(pcp)         raw_cpu_add_return(pcp, -1)
439
440 /*
441  * Operations for contexts that are safe from preemption/interrupts.  These
442  * operations verify that preemption is disabled.
443  */
444 #define __this_cpu_read(pcp)                                            \
445 ({                                                                      \
446         __this_cpu_preempt_check("read");                               \
447         raw_cpu_read(pcp);                                              \
448 })
449
450 #define __this_cpu_write(pcp, val)                                      \
451 ({                                                                      \
452         __this_cpu_preempt_check("write");                              \
453         raw_cpu_write(pcp, val);                                        \
454 })
455
456 #define __this_cpu_add(pcp, val)                                        \
457 ({                                                                      \
458         __this_cpu_preempt_check("add");                                \
459         raw_cpu_add(pcp, val);                                          \
460 })
461
462 #define __this_cpu_and(pcp, val)                                        \
463 ({                                                                      \
464         __this_cpu_preempt_check("and");                                \
465         raw_cpu_and(pcp, val);                                          \
466 })
467
468 #define __this_cpu_or(pcp, val)                                         \
469 ({                                                                      \
470         __this_cpu_preempt_check("or");                                 \
471         raw_cpu_or(pcp, val);                                           \
472 })
473
474 #define __this_cpu_add_return(pcp, val)                                 \
475 ({                                                                      \
476         __this_cpu_preempt_check("add_return");                         \
477         raw_cpu_add_return(pcp, val);                                   \
478 })
479
480 #define __this_cpu_xchg(pcp, nval)                                      \
481 ({                                                                      \
482         __this_cpu_preempt_check("xchg");                               \
483         raw_cpu_xchg(pcp, nval);                                        \
484 })
485
486 #define __this_cpu_cmpxchg(pcp, oval, nval)                             \
487 ({                                                                      \
488         __this_cpu_preempt_check("cmpxchg");                            \
489         raw_cpu_cmpxchg(pcp, oval, nval);                               \
490 })
491
492 #define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
493 ({      __this_cpu_preempt_check("cmpxchg_double");                     \
494         raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
495 })
496
497 #define __this_cpu_sub(pcp, val)        __this_cpu_add(pcp, -(typeof(pcp))(val))
498 #define __this_cpu_inc(pcp)             __this_cpu_add(pcp, 1)
499 #define __this_cpu_dec(pcp)             __this_cpu_sub(pcp, 1)
500 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
501 #define __this_cpu_inc_return(pcp)      __this_cpu_add_return(pcp, 1)
502 #define __this_cpu_dec_return(pcp)      __this_cpu_add_return(pcp, -1)
503
504 /*
505  * Operations with implied preemption/interrupt protection.  These
506  * operations can be used without worrying about preemption or interrupt.
507  */
508 #define this_cpu_read(pcp)              __pcpu_size_call_return(this_cpu_read_, pcp)
509 #define this_cpu_write(pcp, val)        __pcpu_size_call(this_cpu_write_, pcp, val)
510 #define this_cpu_add(pcp, val)          __pcpu_size_call(this_cpu_add_, pcp, val)
511 #define this_cpu_and(pcp, val)          __pcpu_size_call(this_cpu_and_, pcp, val)
512 #define this_cpu_or(pcp, val)           __pcpu_size_call(this_cpu_or_, pcp, val)
513 #define this_cpu_add_return(pcp, val)   __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
514 #define this_cpu_xchg(pcp, nval)        __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
515 #define this_cpu_cmpxchg(pcp, oval, nval) \
516         __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
517 #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
518         __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
519
520 #define this_cpu_sub(pcp, val)          this_cpu_add(pcp, -(typeof(pcp))(val))
521 #define this_cpu_inc(pcp)               this_cpu_add(pcp, 1)
522 #define this_cpu_dec(pcp)               this_cpu_sub(pcp, 1)
523 #define this_cpu_sub_return(pcp, val)   this_cpu_add_return(pcp, -(typeof(pcp))(val))
524 #define this_cpu_inc_return(pcp)        this_cpu_add_return(pcp, 1)
525 #define this_cpu_dec_return(pcp)        this_cpu_add_return(pcp, -1)
526
527 #endif /* __ASSEMBLY__ */
528 #endif /* _LINUX_PERCPU_DEFS_H */