1 /* SPDX-License-Identifier: GPL-2.0 */
3 * workqueue.h --- work queue handling for Linux.
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue_types.h>
20 * The first word is the work queue pointer and the flags rolled into
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */
28 WORK_STRUCT_PWQ_BIT, /* data points to pwq */
29 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */
30 #ifdef CONFIG_DEBUG_OBJECTS_WORK
31 WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */
33 WORK_STRUCT_FLAG_BITS,
35 /* color for workqueue flushing */
36 WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS,
37 WORK_STRUCT_COLOR_BITS = 4,
40 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
41 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
42 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
45 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
48 WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
51 * data contains off-queue information when !WORK_STRUCT_PWQ.
54 * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ]
57 WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS,
58 WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT,
60 WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
63 * When a work item is off queue, the high bits encode off-queue flags
64 * and the last pool it was on. Cap pool ID to 31 bits and use the
65 * highest number to indicate that no pool is associated.
67 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
68 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
69 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
73 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
74 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT,
75 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
76 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
77 #ifdef CONFIG_DEBUG_OBJECTS_WORK
78 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
80 WORK_STRUCT_STATIC = 0,
85 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS),
87 /* not bound to any CPU, prefer the local CPU */
88 WORK_CPU_UNBOUND = NR_CPUS,
90 /* bit mask for work_busy() return values */
91 WORK_BUSY_PENDING = 1 << 0,
92 WORK_BUSY_RUNNING = 1 << 1,
94 /* maximum string length for set_worker_desc() */
98 /* Convenience constants - of type 'unsigned long', not 'enum'! */
99 #define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT)
100 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
101 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
102 #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
104 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
105 #define WORK_DATA_STATIC_INIT() \
106 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
108 struct delayed_work {
109 struct work_struct work;
110 struct timer_list timer;
112 /* target workqueue and CPU ->timer uses to queue ->work */
113 struct workqueue_struct *wq;
118 struct work_struct work;
121 /* target workqueue ->rcu uses to queue ->work */
122 struct workqueue_struct *wq;
126 WQ_AFFN_DFL, /* use system default */
127 WQ_AFFN_CPU, /* one pod per CPU */
128 WQ_AFFN_SMT, /* one pod poer SMT */
129 WQ_AFFN_CACHE, /* one pod per LLC */
130 WQ_AFFN_NUMA, /* one pod per NUMA node */
131 WQ_AFFN_SYSTEM, /* one pod across the whole system */
137 * struct workqueue_attrs - A struct for workqueue attributes.
139 * This can be used to change attributes of an unbound workqueue.
141 struct workqueue_attrs {
148 * @cpumask: allowed CPUs
150 * Work items in this workqueue are affine to these CPUs and not allowed
151 * to execute on other CPUs. A pool serving a workqueue must have the
154 cpumask_var_t cpumask;
157 * @__pod_cpumask: internal attribute used to create per-pod pools
161 * Per-pod unbound worker pools are used to improve locality. Always a
162 * subset of ->cpumask. A workqueue can be associated with multiple
163 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
164 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
166 cpumask_var_t __pod_cpumask;
169 * @affn_strict: affinity scope is strict
171 * If clear, workqueue will make a best-effort attempt at starting the
172 * worker inside @__pod_cpumask but the scheduler is free to migrate it
175 * If set, workers are only allowed to run inside @__pod_cpumask.
180 * Below fields aren't properties of a worker_pool. They only modify how
181 * :c:func:`apply_workqueue_attrs` select pools and thus don't
182 * participate in pool hash calculations or equality comparisons.
186 * @affn_scope: unbound CPU affinity scope
188 * CPU pods are used to improve execution locality of unbound work
189 * items. There are multiple pod types, one for each wq_affn_scope, and
190 * every CPU in the system belongs to one pod in every pod type. CPUs
191 * that belong to the same pod share the worker pool. For example,
192 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
193 * pool for each NUMA node.
195 enum wq_affn_scope affn_scope;
198 * @ordered: work items must be executed one by one in queueing order
203 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
205 return container_of(work, struct delayed_work, work);
208 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
210 return container_of(work, struct rcu_work, work);
213 struct execute_work {
214 struct work_struct work;
217 #ifdef CONFIG_LOCKDEP
219 * NB: because we have to copy the lockdep_map, setting _key
220 * here is required, otherwise it could get initialised to the
221 * copy of the lockdep_map!
223 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
224 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
226 #define __WORK_INIT_LOCKDEP_MAP(n, k)
229 #define __WORK_INITIALIZER(n, f) { \
230 .data = WORK_DATA_STATIC_INIT(), \
231 .entry = { &(n).entry, &(n).entry }, \
233 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
236 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
237 .work = __WORK_INITIALIZER((n).work, (f)), \
238 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
239 (tflags) | TIMER_IRQSAFE), \
242 #define DECLARE_WORK(n, f) \
243 struct work_struct n = __WORK_INITIALIZER(n, f)
245 #define DECLARE_DELAYED_WORK(n, f) \
246 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
248 #define DECLARE_DEFERRABLE_WORK(n, f) \
249 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
251 #ifdef CONFIG_DEBUG_OBJECTS_WORK
252 extern void __init_work(struct work_struct *work, int onstack);
253 extern void destroy_work_on_stack(struct work_struct *work);
254 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
255 static inline unsigned int work_static(struct work_struct *work)
257 return *work_data_bits(work) & WORK_STRUCT_STATIC;
260 static inline void __init_work(struct work_struct *work, int onstack) { }
261 static inline void destroy_work_on_stack(struct work_struct *work) { }
262 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
263 static inline unsigned int work_static(struct work_struct *work) { return 0; }
267 * initialize all of a work item in one go
269 * NOTE! No point in using "atomic_long_set()": using a direct
270 * assignment of the work data initializer allows the compiler
271 * to generate better code.
273 #ifdef CONFIG_LOCKDEP
274 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
276 __init_work((_work), _onstack); \
277 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
278 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
279 INIT_LIST_HEAD(&(_work)->entry); \
280 (_work)->func = (_func); \
283 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
285 __init_work((_work), _onstack); \
286 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
287 INIT_LIST_HEAD(&(_work)->entry); \
288 (_work)->func = (_func); \
292 #define __INIT_WORK(_work, _func, _onstack) \
294 static __maybe_unused struct lock_class_key __key; \
296 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
299 #define INIT_WORK(_work, _func) \
300 __INIT_WORK((_work), (_func), 0)
302 #define INIT_WORK_ONSTACK(_work, _func) \
303 __INIT_WORK((_work), (_func), 1)
305 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
306 __INIT_WORK_KEY((_work), (_func), 1, _key)
308 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
310 INIT_WORK(&(_work)->work, (_func)); \
311 __init_timer(&(_work)->timer, \
312 delayed_work_timer_fn, \
313 (_tflags) | TIMER_IRQSAFE); \
316 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
318 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
319 __init_timer_on_stack(&(_work)->timer, \
320 delayed_work_timer_fn, \
321 (_tflags) | TIMER_IRQSAFE); \
324 #define INIT_DELAYED_WORK(_work, _func) \
325 __INIT_DELAYED_WORK(_work, _func, 0)
327 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
328 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
330 #define INIT_DEFERRABLE_WORK(_work, _func) \
331 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
333 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
334 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
336 #define INIT_RCU_WORK(_work, _func) \
337 INIT_WORK(&(_work)->work, (_func))
339 #define INIT_RCU_WORK_ONSTACK(_work, _func) \
340 INIT_WORK_ONSTACK(&(_work)->work, (_func))
343 * work_pending - Find out whether a work item is currently pending
344 * @work: The work item in question
346 #define work_pending(work) \
347 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
350 * delayed_work_pending - Find out whether a delayable work item is currently
352 * @w: The work item in question
354 #define delayed_work_pending(w) \
355 work_pending(&(w)->work)
358 * Workqueue flags and constants. For details, please refer to
359 * Documentation/core-api/workqueue.rst.
362 WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */
363 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
364 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
365 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
366 WQ_HIGHPRI = 1 << 4, /* high priority */
367 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
368 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
371 * Per-cpu workqueues are generally preferred because they tend to
372 * show better performance thanks to cache locality. Per-cpu
373 * workqueues exclude the scheduler from choosing the CPU to
374 * execute the worker threads, which has an unfortunate side effect
375 * of increasing power consumption.
377 * The scheduler considers a CPU idle if it doesn't have any task
378 * to execute and tries to keep idle cores idle to conserve power;
379 * however, for example, a per-cpu work item scheduled from an
380 * interrupt handler on an idle CPU will force the scheduler to
381 * execute the work item on that CPU breaking the idleness, which in
382 * turn may lead to more scheduling choices which are sub-optimal
383 * in terms of power consumption.
385 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
386 * but become unbound if workqueue.power_efficient kernel param is
387 * specified. Per-cpu workqueues which are identified to
388 * contribute significantly to power-consumption are identified and
389 * marked with this flag and enabling the power_efficient mode
390 * leads to noticeable power saving at the cost of small
391 * performance disadvantage.
393 * http://thread.gmane.org/gmane.linux.kernel/1480396
395 WQ_POWER_EFFICIENT = 1 << 7,
397 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
398 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
399 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
400 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
402 /* BH wq only allows the following flags */
403 __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI,
407 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
408 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
409 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
412 * Per-node default cap on min_active. Unless explicitly set, min_active
413 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
414 * workqueue_struct->min_active definition.
416 WQ_DFL_MIN_ACTIVE = 8,
420 * System-wide workqueues which are always present.
422 * system_wq is the one used by schedule[_delayed]_work[_on]().
423 * Multi-CPU multi-threaded. There are users which expect relatively
424 * short queue flush time. Don't queue works which can run for too
427 * system_highpri_wq is similar to system_wq but for work items which
428 * require WQ_HIGHPRI.
430 * system_long_wq is similar to system_wq but may host long running
431 * works. Queue flushing might take relatively long.
433 * system_unbound_wq is unbound workqueue. Workers are not bound to
434 * any specific CPU, not concurrency managed, and all queued works are
435 * executed immediately as long as max_active limit is not reached and
436 * resources are available.
438 * system_freezable_wq is equivalent to system_wq except that it's
441 * *_power_efficient_wq are inclined towards saving power and converted
442 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
443 * they are same as their non-power-efficient counterparts - e.g.
444 * system_power_efficient_wq is identical to system_wq if
445 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
447 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
448 * are executed in the queueing CPU's BH context in the queueing order.
450 extern struct workqueue_struct *system_wq;
451 extern struct workqueue_struct *system_highpri_wq;
452 extern struct workqueue_struct *system_long_wq;
453 extern struct workqueue_struct *system_unbound_wq;
454 extern struct workqueue_struct *system_freezable_wq;
455 extern struct workqueue_struct *system_power_efficient_wq;
456 extern struct workqueue_struct *system_freezable_power_efficient_wq;
457 extern struct workqueue_struct *system_bh_wq;
458 extern struct workqueue_struct *system_bh_highpri_wq;
460 void workqueue_softirq_action(bool highpri);
461 void workqueue_softirq_dead(unsigned int cpu);
464 * alloc_workqueue - allocate a workqueue
465 * @fmt: printf format for the name of the workqueue
467 * @max_active: max in-flight work items, 0 for default
468 * remaining args: args for @fmt
470 * For a per-cpu workqueue, @max_active limits the number of in-flight work
471 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
472 * executing at most one work item for the workqueue.
474 * For unbound workqueues, @max_active limits the number of in-flight work items
475 * for the whole system. e.g. @max_active of 16 indicates that that there can be
476 * at most 16 work items executing for the workqueue in the whole system.
478 * As sharing the same active counter for an unbound workqueue across multiple
479 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
480 * according to the proportion of the number of online CPUs and enforced
483 * Depending on online CPU distribution, a node may end up with per-node
484 * max_active which is significantly lower than @max_active, which can lead to
485 * deadlocks if the per-node concurrency limit is lower than the maximum number
486 * of interdependent work items for the workqueue.
488 * To guarantee forward progress regardless of online CPU distribution, the
489 * concurrency limit on every node is guaranteed to be equal to or greater than
490 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
491 * that the sum of per-node max_active's may be larger than @max_active.
493 * For detailed information on %WQ_* flags, please refer to
494 * Documentation/core-api/workqueue.rst.
497 * Pointer to the allocated workqueue on success, %NULL on failure.
499 __printf(1, 4) struct workqueue_struct *
500 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
503 * alloc_ordered_workqueue - allocate an ordered workqueue
504 * @fmt: printf format for the name of the workqueue
505 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
506 * @args: args for @fmt
508 * Allocate an ordered workqueue. An ordered workqueue executes at
509 * most one work item at any given time in the queued order. They are
510 * implemented as unbound workqueues with @max_active of one.
513 * Pointer to the allocated workqueue on success, %NULL on failure.
515 #define alloc_ordered_workqueue(fmt, flags, args...) \
516 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
518 #define create_workqueue(name) \
519 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
520 #define create_freezable_workqueue(name) \
521 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
522 WQ_MEM_RECLAIM, 1, (name))
523 #define create_singlethread_workqueue(name) \
524 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
526 #define from_work(var, callback_work, work_fieldname) \
527 container_of(callback_work, typeof(*var), work_fieldname)
529 extern void destroy_workqueue(struct workqueue_struct *wq);
531 struct workqueue_attrs *alloc_workqueue_attrs(void);
532 void free_workqueue_attrs(struct workqueue_attrs *attrs);
533 int apply_workqueue_attrs(struct workqueue_struct *wq,
534 const struct workqueue_attrs *attrs);
535 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
537 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
538 struct work_struct *work);
539 extern bool queue_work_node(int node, struct workqueue_struct *wq,
540 struct work_struct *work);
541 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
542 struct delayed_work *work, unsigned long delay);
543 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
544 struct delayed_work *dwork, unsigned long delay);
545 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
547 extern void __flush_workqueue(struct workqueue_struct *wq);
548 extern void drain_workqueue(struct workqueue_struct *wq);
550 extern int schedule_on_each_cpu(work_func_t func);
552 int execute_in_process_context(work_func_t fn, struct execute_work *);
554 extern bool flush_work(struct work_struct *work);
555 extern bool cancel_work(struct work_struct *work);
556 extern bool cancel_work_sync(struct work_struct *work);
558 extern bool flush_delayed_work(struct delayed_work *dwork);
559 extern bool cancel_delayed_work(struct delayed_work *dwork);
560 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
562 extern bool flush_rcu_work(struct rcu_work *rwork);
564 extern void workqueue_set_max_active(struct workqueue_struct *wq,
566 extern void workqueue_set_min_active(struct workqueue_struct *wq,
568 extern struct work_struct *current_work(void);
569 extern bool current_is_workqueue_rescuer(void);
570 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
571 extern unsigned int work_busy(struct work_struct *work);
572 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
573 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
574 extern void show_all_workqueues(void);
575 extern void show_freezable_workqueues(void);
576 extern void show_one_workqueue(struct workqueue_struct *wq);
577 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
580 * queue_work - queue work on a workqueue
581 * @wq: workqueue to use
582 * @work: work to queue
584 * Returns %false if @work was already on a queue, %true otherwise.
586 * We queue the work to the CPU on which it was submitted, but if the CPU dies
587 * it can be processed by another CPU.
589 * Memory-ordering properties: If it returns %true, guarantees that all stores
590 * preceding the call to queue_work() in the program order will be visible from
591 * the CPU which will execute @work by the time such work executes, e.g.,
593 * { x is initially 0 }
597 * WRITE_ONCE(x, 1); [ @work is being executed ]
598 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
600 * Forbids: r0 == true && r1 == 0
602 static inline bool queue_work(struct workqueue_struct *wq,
603 struct work_struct *work)
605 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
609 * queue_delayed_work - queue work on a workqueue after delay
610 * @wq: workqueue to use
611 * @dwork: delayable work to queue
612 * @delay: number of jiffies to wait before queueing
614 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
616 static inline bool queue_delayed_work(struct workqueue_struct *wq,
617 struct delayed_work *dwork,
620 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
624 * mod_delayed_work - modify delay of or queue a delayed work
625 * @wq: workqueue to use
626 * @dwork: work to queue
627 * @delay: number of jiffies to wait before queueing
629 * mod_delayed_work_on() on local CPU.
631 static inline bool mod_delayed_work(struct workqueue_struct *wq,
632 struct delayed_work *dwork,
635 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
639 * schedule_work_on - put work task on a specific cpu
640 * @cpu: cpu to put the work task on
641 * @work: job to be done
643 * This puts a job on a specific cpu
645 static inline bool schedule_work_on(int cpu, struct work_struct *work)
647 return queue_work_on(cpu, system_wq, work);
651 * schedule_work - put work task in global workqueue
652 * @work: job to be done
654 * Returns %false if @work was already on the kernel-global workqueue and
657 * This puts a job in the kernel-global workqueue if it was not already
658 * queued and leaves it in the same position on the kernel-global
659 * workqueue otherwise.
661 * Shares the same memory-ordering properties of queue_work(), cf. the
662 * DocBook header of queue_work().
664 static inline bool schedule_work(struct work_struct *work)
666 return queue_work(system_wq, work);
670 * Detect attempt to flush system-wide workqueues at compile time when possible.
671 * Warn attempt to flush system-wide workqueues at runtime.
673 * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
674 * for reasons and steps for converting system-wide workqueues into local workqueues.
676 extern void __warn_flushing_systemwide_wq(void)
677 __compiletime_warning("Please avoid flushing system-wide workqueues.");
679 /* Please stop using this function, for this function will be removed in near future. */
680 #define flush_scheduled_work() \
682 __warn_flushing_systemwide_wq(); \
683 __flush_workqueue(system_wq); \
686 #define flush_workqueue(wq) \
688 struct workqueue_struct *_wq = (wq); \
690 if ((__builtin_constant_p(_wq == system_wq) && \
691 _wq == system_wq) || \
692 (__builtin_constant_p(_wq == system_highpri_wq) && \
693 _wq == system_highpri_wq) || \
694 (__builtin_constant_p(_wq == system_long_wq) && \
695 _wq == system_long_wq) || \
696 (__builtin_constant_p(_wq == system_unbound_wq) && \
697 _wq == system_unbound_wq) || \
698 (__builtin_constant_p(_wq == system_freezable_wq) && \
699 _wq == system_freezable_wq) || \
700 (__builtin_constant_p(_wq == system_power_efficient_wq) && \
701 _wq == system_power_efficient_wq) || \
702 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
703 _wq == system_freezable_power_efficient_wq)) \
704 __warn_flushing_systemwide_wq(); \
705 __flush_workqueue(_wq); \
709 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
711 * @dwork: job to be done
712 * @delay: number of jiffies to wait
714 * After waiting for a given time this puts a job in the kernel-global
715 * workqueue on the specified CPU.
717 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
720 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
724 * schedule_delayed_work - put work task in global workqueue after delay
725 * @dwork: job to be done
726 * @delay: number of jiffies to wait or 0 for immediate execution
728 * After waiting for a given time this puts a job in the kernel-global
731 static inline bool schedule_delayed_work(struct delayed_work *dwork,
734 return queue_delayed_work(system_wq, dwork, delay);
738 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
742 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
747 long work_on_cpu_key(int cpu, long (*fn)(void *),
748 void *arg, struct lock_class_key *key);
750 * A new key is defined for each caller to make sure the work
751 * associated with the function doesn't share its locking class.
753 #define work_on_cpu(_cpu, _fn, _arg) \
755 static struct lock_class_key __key; \
757 work_on_cpu_key(_cpu, _fn, _arg, &__key); \
760 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
761 void *arg, struct lock_class_key *key);
764 * A new key is defined for each caller to make sure the work
765 * associated with the function doesn't share its locking class.
767 #define work_on_cpu_safe(_cpu, _fn, _arg) \
769 static struct lock_class_key __key; \
771 work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
773 #endif /* CONFIG_SMP */
775 #ifdef CONFIG_FREEZER
776 extern void freeze_workqueues_begin(void);
777 extern bool freeze_workqueues_busy(void);
778 extern void thaw_workqueues(void);
779 #endif /* CONFIG_FREEZER */
782 int workqueue_sysfs_register(struct workqueue_struct *wq);
783 #else /* CONFIG_SYSFS */
784 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
786 #endif /* CONFIG_SYSFS */
788 #ifdef CONFIG_WQ_WATCHDOG
789 void wq_watchdog_touch(int cpu);
790 #else /* CONFIG_WQ_WATCHDOG */
791 static inline void wq_watchdog_touch(int cpu) { }
792 #endif /* CONFIG_WQ_WATCHDOG */
795 int workqueue_prepare_cpu(unsigned int cpu);
796 int workqueue_online_cpu(unsigned int cpu);
797 int workqueue_offline_cpu(unsigned int cpu);
800 void __init workqueue_init_early(void);
801 void __init workqueue_init(void);
802 void __init workqueue_init_topology(void);