1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
10 #include <uapi/linux/sched/types.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
38 struct kthread_create_info
40 /* Information passed to kthread() from kthreadd. */
41 int (*threadfn)(void *data);
45 /* Result passed back to kthread_create() from kthreadd. */
46 struct task_struct *result;
47 struct completion *done;
49 struct list_head list;
55 int (*threadfn)(void *);
58 struct completion parked;
59 struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
66 KTHREAD_IS_PER_CPU = 0,
71 static inline void set_kthread_struct(void *kthread)
74 * We abuse ->set_child_tid to avoid the new member and because it
75 * can't be wrongly copied by copy_process(). We also rely on fact
76 * that the caller can't exec, so PF_KTHREAD can't be cleared.
78 current->set_child_tid = (__force void __user *)kthread;
81 static inline struct kthread *to_kthread(struct task_struct *k)
83 WARN_ON(!(k->flags & PF_KTHREAD));
84 return (__force void *)k->set_child_tid;
88 * Variant of to_kthread() that doesn't assume @p is a kthread.
90 * Per construction; when:
92 * (p->flags & PF_KTHREAD) && p->set_child_tid
94 * the task is both a kthread and struct kthread is persistent. However
95 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
98 static inline struct kthread *__to_kthread(struct task_struct *p)
100 void *kthread = (__force void *)p->set_child_tid;
101 if (kthread && !(p->flags & PF_KTHREAD))
106 void free_kthread_struct(struct task_struct *k)
108 struct kthread *kthread;
111 * Can be NULL if this kthread was created by kernel_thread()
112 * or if kmalloc() in kthread() failed.
114 kthread = to_kthread(k);
115 #ifdef CONFIG_BLK_CGROUP
116 WARN_ON_ONCE(kthread && kthread->blkcg_css);
122 * kthread_should_stop - should this kthread return now?
124 * When someone calls kthread_stop() on your kthread, it will be woken
125 * and this will return true. You should then return, and your return
126 * value will be passed through to kthread_stop().
128 bool kthread_should_stop(void)
130 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
132 EXPORT_SYMBOL(kthread_should_stop);
134 bool __kthread_should_park(struct task_struct *k)
136 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
138 EXPORT_SYMBOL_GPL(__kthread_should_park);
141 * kthread_should_park - should this kthread park now?
143 * When someone calls kthread_park() on your kthread, it will be woken
144 * and this will return true. You should then do the necessary
145 * cleanup and call kthread_parkme()
147 * Similar to kthread_should_stop(), but this keeps the thread alive
148 * and in a park position. kthread_unpark() "restarts" the thread and
149 * calls the thread function again.
151 bool kthread_should_park(void)
153 return __kthread_should_park(current);
155 EXPORT_SYMBOL_GPL(kthread_should_park);
158 * kthread_freezable_should_stop - should this freezable kthread return now?
159 * @was_frozen: optional out parameter, indicates whether %current was frozen
161 * kthread_should_stop() for freezable kthreads, which will enter
162 * refrigerator if necessary. This function is safe from kthread_stop() /
163 * freezer deadlock and freezable kthreads should use this function instead
164 * of calling try_to_freeze() directly.
166 bool kthread_freezable_should_stop(bool *was_frozen)
172 if (unlikely(freezing(current)))
173 frozen = __refrigerator(true);
176 *was_frozen = frozen;
178 return kthread_should_stop();
180 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
183 * kthread_func - return the function specified on kthread creation
184 * @task: kthread task in question
186 * Returns NULL if the task is not a kthread.
188 void *kthread_func(struct task_struct *task)
190 struct kthread *kthread = __to_kthread(task);
192 return kthread->threadfn;
195 EXPORT_SYMBOL_GPL(kthread_func);
198 * kthread_data - return data value specified on kthread creation
199 * @task: kthread task in question
201 * Return the data value specified when kthread @task was created.
202 * The caller is responsible for ensuring the validity of @task when
203 * calling this function.
205 void *kthread_data(struct task_struct *task)
207 return to_kthread(task)->data;
209 EXPORT_SYMBOL_GPL(kthread_data);
212 * kthread_probe_data - speculative version of kthread_data()
213 * @task: possible kthread task in question
215 * @task could be a kthread task. Return the data value specified when it
216 * was created if accessible. If @task isn't a kthread task or its data is
217 * inaccessible for any reason, %NULL is returned. This function requires
218 * that @task itself is safe to dereference.
220 void *kthread_probe_data(struct task_struct *task)
222 struct kthread *kthread = __to_kthread(task);
226 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
230 static void __kthread_parkme(struct kthread *self)
234 * TASK_PARKED is a special state; we must serialize against
235 * possible pending wakeups to avoid store-store collisions on
238 * Such a collision might possibly result in the task state
239 * changin from TASK_PARKED and us failing the
240 * wait_task_inactive() in kthread_park().
242 set_special_state(TASK_PARKED);
243 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
247 * Thread is going to call schedule(), do not preempt it,
248 * or the caller of kthread_park() may spend more time in
249 * wait_task_inactive().
252 complete(&self->parked);
253 schedule_preempt_disabled();
256 __set_current_state(TASK_RUNNING);
259 void kthread_parkme(void)
261 __kthread_parkme(to_kthread(current));
263 EXPORT_SYMBOL_GPL(kthread_parkme);
265 static int kthread(void *_create)
267 /* Copy data: it's on kthread's stack */
268 struct kthread_create_info *create = _create;
269 int (*threadfn)(void *data) = create->threadfn;
270 void *data = create->data;
271 struct completion *done;
272 struct kthread *self;
275 self = kzalloc(sizeof(*self), GFP_KERNEL);
276 set_kthread_struct(self);
278 /* If user was SIGKILLed, I release the structure. */
279 done = xchg(&create->done, NULL);
286 create->result = ERR_PTR(-ENOMEM);
291 self->threadfn = threadfn;
293 init_completion(&self->exited);
294 init_completion(&self->parked);
295 current->vfork_done = &self->exited;
297 /* OK, tell user we're spawned, wait for stop or wakeup */
298 __set_current_state(TASK_UNINTERRUPTIBLE);
299 create->result = current;
301 * Thread is going to call schedule(), do not preempt it,
302 * or the creator may spend more time in wait_task_inactive().
306 schedule_preempt_disabled();
310 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
311 cgroup_kthread_ready();
312 __kthread_parkme(self);
313 ret = threadfn(data);
318 /* called from do_fork() to get node information for about to be created task */
319 int tsk_fork_get_node(struct task_struct *tsk)
322 if (tsk == kthreadd_task)
323 return tsk->pref_node_fork;
328 static void create_kthread(struct kthread_create_info *create)
333 current->pref_node_fork = create->node;
335 /* We want our own signal handler (we take no signals by default). */
336 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
338 /* If user was SIGKILLed, I release the structure. */
339 struct completion *done = xchg(&create->done, NULL);
345 create->result = ERR_PTR(pid);
350 static __printf(4, 0)
351 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
352 void *data, int node,
353 const char namefmt[],
356 DECLARE_COMPLETION_ONSTACK(done);
357 struct task_struct *task;
358 struct kthread_create_info *create = kmalloc(sizeof(*create),
362 return ERR_PTR(-ENOMEM);
363 create->threadfn = threadfn;
366 create->done = &done;
368 spin_lock(&kthread_create_lock);
369 list_add_tail(&create->list, &kthread_create_list);
370 spin_unlock(&kthread_create_lock);
372 wake_up_process(kthreadd_task);
374 * Wait for completion in killable state, for I might be chosen by
375 * the OOM killer while kthreadd is trying to allocate memory for
378 if (unlikely(wait_for_completion_killable(&done))) {
380 * If I was SIGKILLed before kthreadd (or new kernel thread)
381 * calls complete(), leave the cleanup of this structure to
384 if (xchg(&create->done, NULL))
385 return ERR_PTR(-EINTR);
387 * kthreadd (or new kernel thread) will call complete()
390 wait_for_completion(&done);
392 task = create->result;
394 static const struct sched_param param = { .sched_priority = 0 };
395 char name[TASK_COMM_LEN];
398 * task is already visible to other tasks, so updating
399 * COMM must be protected.
401 vsnprintf(name, sizeof(name), namefmt, args);
402 set_task_comm(task, name);
404 * root may have changed our (kthreadd's) priority or CPU mask.
405 * The kernel thread should not inherit these properties.
407 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
408 set_cpus_allowed_ptr(task,
409 housekeeping_cpumask(HK_FLAG_KTHREAD));
416 * kthread_create_on_node - create a kthread.
417 * @threadfn: the function to run until signal_pending(current).
418 * @data: data ptr for @threadfn.
419 * @node: task and thread structures for the thread are allocated on this node
420 * @namefmt: printf-style name for the thread.
422 * Description: This helper function creates and names a kernel
423 * thread. The thread will be stopped: use wake_up_process() to start
424 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
425 * is affine to all CPUs.
427 * If thread is going to be bound on a particular cpu, give its node
428 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
429 * When woken, the thread will run @threadfn() with @data as its
430 * argument. @threadfn() can either call do_exit() directly if it is a
431 * standalone thread for which no one will call kthread_stop(), or
432 * return when 'kthread_should_stop()' is true (which means
433 * kthread_stop() has been called). The return value should be zero
434 * or a negative error number; it will be passed to kthread_stop().
436 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
438 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
439 void *data, int node,
440 const char namefmt[],
443 struct task_struct *task;
446 va_start(args, namefmt);
447 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
452 EXPORT_SYMBOL(kthread_create_on_node);
454 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
458 if (!wait_task_inactive(p, state)) {
463 /* It's safe because the task is inactive. */
464 raw_spin_lock_irqsave(&p->pi_lock, flags);
465 do_set_cpus_allowed(p, mask);
466 p->flags |= PF_NO_SETAFFINITY;
467 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
470 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
472 __kthread_bind_mask(p, cpumask_of(cpu), state);
475 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
477 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
481 * kthread_bind - bind a just-created kthread to a cpu.
482 * @p: thread created by kthread_create().
483 * @cpu: cpu (might not be online, must be possible) for @k to run on.
485 * Description: This function is equivalent to set_cpus_allowed(),
486 * except that @cpu doesn't need to be online, and the thread must be
487 * stopped (i.e., just returned from kthread_create()).
489 void kthread_bind(struct task_struct *p, unsigned int cpu)
491 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
493 EXPORT_SYMBOL(kthread_bind);
496 * kthread_create_on_cpu - Create a cpu bound kthread
497 * @threadfn: the function to run until signal_pending(current).
498 * @data: data ptr for @threadfn.
499 * @cpu: The cpu on which the thread should be bound,
500 * @namefmt: printf-style name for the thread. Format is restricted
501 * to "name.*%u". Code fills in cpu number.
503 * Description: This helper function creates and names a kernel thread
505 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
506 void *data, unsigned int cpu,
509 struct task_struct *p;
511 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
515 kthread_bind(p, cpu);
516 /* CPU hotplug need to bind once again when unparking the thread. */
517 to_kthread(p)->cpu = cpu;
521 void kthread_set_per_cpu(struct task_struct *k, int cpu)
523 struct kthread *kthread = to_kthread(k);
527 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
530 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
535 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
538 bool kthread_is_per_cpu(struct task_struct *p)
540 struct kthread *kthread = __to_kthread(p);
544 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
548 * kthread_unpark - unpark a thread created by kthread_create().
549 * @k: thread created by kthread_create().
551 * Sets kthread_should_park() for @k to return false, wakes it, and
552 * waits for it to return. If the thread is marked percpu then its
553 * bound to the cpu again.
555 void kthread_unpark(struct task_struct *k)
557 struct kthread *kthread = to_kthread(k);
560 * Newly created kthread was parked when the CPU was offline.
561 * The binding was lost and we need to set it again.
563 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
564 __kthread_bind(k, kthread->cpu, TASK_PARKED);
566 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
568 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
570 wake_up_state(k, TASK_PARKED);
572 EXPORT_SYMBOL_GPL(kthread_unpark);
575 * kthread_park - park a thread created by kthread_create().
576 * @k: thread created by kthread_create().
578 * Sets kthread_should_park() for @k to return true, wakes it, and
579 * waits for it to return. This can also be called after kthread_create()
580 * instead of calling wake_up_process(): the thread will park without
581 * calling threadfn().
583 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
584 * If called by the kthread itself just the park bit is set.
586 int kthread_park(struct task_struct *k)
588 struct kthread *kthread = to_kthread(k);
590 if (WARN_ON(k->flags & PF_EXITING))
593 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
596 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
600 * Wait for __kthread_parkme() to complete(), this means we
601 * _will_ have TASK_PARKED and are about to call schedule().
603 wait_for_completion(&kthread->parked);
605 * Now wait for that schedule() to complete and the task to
608 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
613 EXPORT_SYMBOL_GPL(kthread_park);
616 * kthread_stop - stop a thread created by kthread_create().
617 * @k: thread created by kthread_create().
619 * Sets kthread_should_stop() for @k to return true, wakes it, and
620 * waits for it to exit. This can also be called after kthread_create()
621 * instead of calling wake_up_process(): the thread will exit without
622 * calling threadfn().
624 * If threadfn() may call do_exit() itself, the caller must ensure
625 * task_struct can't go away.
627 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
630 int kthread_stop(struct task_struct *k)
632 struct kthread *kthread;
635 trace_sched_kthread_stop(k);
638 kthread = to_kthread(k);
639 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
642 wait_for_completion(&kthread->exited);
646 trace_sched_kthread_stop_ret(ret);
649 EXPORT_SYMBOL(kthread_stop);
651 int kthreadd(void *unused)
653 struct task_struct *tsk = current;
655 /* Setup a clean context for our children to inherit. */
656 set_task_comm(tsk, "kthreadd");
658 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
659 set_mems_allowed(node_states[N_MEMORY]);
661 current->flags |= PF_NOFREEZE;
662 cgroup_init_kthreadd();
665 set_current_state(TASK_INTERRUPTIBLE);
666 if (list_empty(&kthread_create_list))
668 __set_current_state(TASK_RUNNING);
670 spin_lock(&kthread_create_lock);
671 while (!list_empty(&kthread_create_list)) {
672 struct kthread_create_info *create;
674 create = list_entry(kthread_create_list.next,
675 struct kthread_create_info, list);
676 list_del_init(&create->list);
677 spin_unlock(&kthread_create_lock);
679 create_kthread(create);
681 spin_lock(&kthread_create_lock);
683 spin_unlock(&kthread_create_lock);
689 void __kthread_init_worker(struct kthread_worker *worker,
691 struct lock_class_key *key)
693 memset(worker, 0, sizeof(struct kthread_worker));
694 raw_spin_lock_init(&worker->lock);
695 lockdep_set_class_and_name(&worker->lock, key, name);
696 INIT_LIST_HEAD(&worker->work_list);
697 INIT_LIST_HEAD(&worker->delayed_work_list);
699 EXPORT_SYMBOL_GPL(__kthread_init_worker);
702 * kthread_worker_fn - kthread function to process kthread_worker
703 * @worker_ptr: pointer to initialized kthread_worker
705 * This function implements the main cycle of kthread worker. It processes
706 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
709 * The works are not allowed to keep any locks, disable preemption or interrupts
710 * when they finish. There is defined a safe point for freezing when one work
711 * finishes and before a new one is started.
713 * Also the works must not be handled by more than one worker at the same time,
714 * see also kthread_queue_work().
716 int kthread_worker_fn(void *worker_ptr)
718 struct kthread_worker *worker = worker_ptr;
719 struct kthread_work *work;
722 * FIXME: Update the check and remove the assignment when all kthread
723 * worker users are created using kthread_create_worker*() functions.
725 WARN_ON(worker->task && worker->task != current);
726 worker->task = current;
728 if (worker->flags & KTW_FREEZABLE)
732 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
734 if (kthread_should_stop()) {
735 __set_current_state(TASK_RUNNING);
736 raw_spin_lock_irq(&worker->lock);
738 raw_spin_unlock_irq(&worker->lock);
743 raw_spin_lock_irq(&worker->lock);
744 if (!list_empty(&worker->work_list)) {
745 work = list_first_entry(&worker->work_list,
746 struct kthread_work, node);
747 list_del_init(&work->node);
749 worker->current_work = work;
750 raw_spin_unlock_irq(&worker->lock);
753 __set_current_state(TASK_RUNNING);
755 } else if (!freezing(current))
762 EXPORT_SYMBOL_GPL(kthread_worker_fn);
764 static __printf(3, 0) struct kthread_worker *
765 __kthread_create_worker(int cpu, unsigned int flags,
766 const char namefmt[], va_list args)
768 struct kthread_worker *worker;
769 struct task_struct *task;
770 int node = NUMA_NO_NODE;
772 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
774 return ERR_PTR(-ENOMEM);
776 kthread_init_worker(worker);
779 node = cpu_to_node(cpu);
781 task = __kthread_create_on_node(kthread_worker_fn, worker,
782 node, namefmt, args);
787 kthread_bind(task, cpu);
789 worker->flags = flags;
791 wake_up_process(task);
796 return ERR_CAST(task);
800 * kthread_create_worker - create a kthread worker
801 * @flags: flags modifying the default behavior of the worker
802 * @namefmt: printf-style name for the kthread worker (task).
804 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
805 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
806 * when the worker was SIGKILLed.
808 struct kthread_worker *
809 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
811 struct kthread_worker *worker;
814 va_start(args, namefmt);
815 worker = __kthread_create_worker(-1, flags, namefmt, args);
820 EXPORT_SYMBOL(kthread_create_worker);
823 * kthread_create_worker_on_cpu - create a kthread worker and bind it
824 * to a given CPU and the associated NUMA node.
826 * @flags: flags modifying the default behavior of the worker
827 * @namefmt: printf-style name for the kthread worker (task).
829 * Use a valid CPU number if you want to bind the kthread worker
830 * to the given CPU and the associated NUMA node.
832 * A good practice is to add the cpu number also into the worker name.
833 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
835 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
836 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
837 * when the worker was SIGKILLed.
839 struct kthread_worker *
840 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
841 const char namefmt[], ...)
843 struct kthread_worker *worker;
846 va_start(args, namefmt);
847 worker = __kthread_create_worker(cpu, flags, namefmt, args);
852 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
855 * Returns true when the work could not be queued at the moment.
856 * It happens when it is already pending in a worker list
857 * or when it is being cancelled.
859 static inline bool queuing_blocked(struct kthread_worker *worker,
860 struct kthread_work *work)
862 lockdep_assert_held(&worker->lock);
864 return !list_empty(&work->node) || work->canceling;
867 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
868 struct kthread_work *work)
870 lockdep_assert_held(&worker->lock);
871 WARN_ON_ONCE(!list_empty(&work->node));
872 /* Do not use a work with >1 worker, see kthread_queue_work() */
873 WARN_ON_ONCE(work->worker && work->worker != worker);
876 /* insert @work before @pos in @worker */
877 static void kthread_insert_work(struct kthread_worker *worker,
878 struct kthread_work *work,
879 struct list_head *pos)
881 kthread_insert_work_sanity_check(worker, work);
883 list_add_tail(&work->node, pos);
884 work->worker = worker;
885 if (!worker->current_work && likely(worker->task))
886 wake_up_process(worker->task);
890 * kthread_queue_work - queue a kthread_work
891 * @worker: target kthread_worker
892 * @work: kthread_work to queue
894 * Queue @work to work processor @task for async execution. @task
895 * must have been created with kthread_worker_create(). Returns %true
896 * if @work was successfully queued, %false if it was already pending.
898 * Reinitialize the work if it needs to be used by another worker.
899 * For example, when the worker was stopped and started again.
901 bool kthread_queue_work(struct kthread_worker *worker,
902 struct kthread_work *work)
907 raw_spin_lock_irqsave(&worker->lock, flags);
908 if (!queuing_blocked(worker, work)) {
909 kthread_insert_work(worker, work, &worker->work_list);
912 raw_spin_unlock_irqrestore(&worker->lock, flags);
915 EXPORT_SYMBOL_GPL(kthread_queue_work);
918 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
919 * delayed work when the timer expires.
920 * @t: pointer to the expired timer
922 * The format of the function is defined by struct timer_list.
923 * It should have been called from irqsafe timer with irq already off.
925 void kthread_delayed_work_timer_fn(struct timer_list *t)
927 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
928 struct kthread_work *work = &dwork->work;
929 struct kthread_worker *worker = work->worker;
933 * This might happen when a pending work is reinitialized.
934 * It means that it is used a wrong way.
936 if (WARN_ON_ONCE(!worker))
939 raw_spin_lock_irqsave(&worker->lock, flags);
940 /* Work must not be used with >1 worker, see kthread_queue_work(). */
941 WARN_ON_ONCE(work->worker != worker);
943 /* Move the work from worker->delayed_work_list. */
944 WARN_ON_ONCE(list_empty(&work->node));
945 list_del_init(&work->node);
946 if (!work->canceling)
947 kthread_insert_work(worker, work, &worker->work_list);
949 raw_spin_unlock_irqrestore(&worker->lock, flags);
951 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
953 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
954 struct kthread_delayed_work *dwork,
957 struct timer_list *timer = &dwork->timer;
958 struct kthread_work *work = &dwork->work;
960 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
963 * If @delay is 0, queue @dwork->work immediately. This is for
964 * both optimization and correctness. The earliest @timer can
965 * expire is on the closest next tick and delayed_work users depend
966 * on that there's no such delay when @delay is 0.
969 kthread_insert_work(worker, work, &worker->work_list);
973 /* Be paranoid and try to detect possible races already now. */
974 kthread_insert_work_sanity_check(worker, work);
976 list_add(&work->node, &worker->delayed_work_list);
977 work->worker = worker;
978 timer->expires = jiffies + delay;
983 * kthread_queue_delayed_work - queue the associated kthread work
985 * @worker: target kthread_worker
986 * @dwork: kthread_delayed_work to queue
987 * @delay: number of jiffies to wait before queuing
989 * If the work has not been pending it starts a timer that will queue
990 * the work after the given @delay. If @delay is zero, it queues the
993 * Return: %false if the @work has already been pending. It means that
994 * either the timer was running or the work was queued. It returns %true
997 bool kthread_queue_delayed_work(struct kthread_worker *worker,
998 struct kthread_delayed_work *dwork,
1001 struct kthread_work *work = &dwork->work;
1002 unsigned long flags;
1005 raw_spin_lock_irqsave(&worker->lock, flags);
1007 if (!queuing_blocked(worker, work)) {
1008 __kthread_queue_delayed_work(worker, dwork, delay);
1012 raw_spin_unlock_irqrestore(&worker->lock, flags);
1015 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1017 struct kthread_flush_work {
1018 struct kthread_work work;
1019 struct completion done;
1022 static void kthread_flush_work_fn(struct kthread_work *work)
1024 struct kthread_flush_work *fwork =
1025 container_of(work, struct kthread_flush_work, work);
1026 complete(&fwork->done);
1030 * kthread_flush_work - flush a kthread_work
1031 * @work: work to flush
1033 * If @work is queued or executing, wait for it to finish execution.
1035 void kthread_flush_work(struct kthread_work *work)
1037 struct kthread_flush_work fwork = {
1038 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1039 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1041 struct kthread_worker *worker;
1044 worker = work->worker;
1048 raw_spin_lock_irq(&worker->lock);
1049 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1050 WARN_ON_ONCE(work->worker != worker);
1052 if (!list_empty(&work->node))
1053 kthread_insert_work(worker, &fwork.work, work->node.next);
1054 else if (worker->current_work == work)
1055 kthread_insert_work(worker, &fwork.work,
1056 worker->work_list.next);
1060 raw_spin_unlock_irq(&worker->lock);
1063 wait_for_completion(&fwork.done);
1065 EXPORT_SYMBOL_GPL(kthread_flush_work);
1068 * Make sure that the timer is neither set nor running and could
1069 * not manipulate the work list_head any longer.
1071 * The function is called under worker->lock. The lock is temporary
1072 * released but the timer can't be set again in the meantime.
1074 static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1075 unsigned long *flags)
1077 struct kthread_delayed_work *dwork =
1078 container_of(work, struct kthread_delayed_work, work);
1079 struct kthread_worker *worker = work->worker;
1082 * del_timer_sync() must be called to make sure that the timer
1083 * callback is not running. The lock must be temporary released
1084 * to avoid a deadlock with the callback. In the meantime,
1085 * any queuing is blocked by setting the canceling counter.
1088 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1089 del_timer_sync(&dwork->timer);
1090 raw_spin_lock_irqsave(&worker->lock, *flags);
1095 * This function removes the work from the worker queue.
1097 * It is called under worker->lock. The caller must make sure that
1098 * the timer used by delayed work is not running, e.g. by calling
1099 * kthread_cancel_delayed_work_timer().
1101 * The work might still be in use when this function finishes. See the
1102 * current_work proceed by the worker.
1104 * Return: %true if @work was pending and successfully canceled,
1105 * %false if @work was not pending
1107 static bool __kthread_cancel_work(struct kthread_work *work)
1110 * Try to remove the work from a worker list. It might either
1111 * be from worker->work_list or from worker->delayed_work_list.
1113 if (!list_empty(&work->node)) {
1114 list_del_init(&work->node);
1122 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1123 * @worker: kthread worker to use
1124 * @dwork: kthread delayed work to queue
1125 * @delay: number of jiffies to wait before queuing
1127 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1128 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1129 * @work is guaranteed to be queued immediately.
1131 * Return: %false if @dwork was idle and queued, %true otherwise.
1133 * A special case is when the work is being canceled in parallel.
1134 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1135 * or yet another kthread_mod_delayed_work() call. We let the other command
1136 * win and return %true here. The return value can be used for reference
1137 * counting and the number of queued works stays the same. Anyway, the caller
1138 * is supposed to synchronize these operations a reasonable way.
1140 * This function is safe to call from any context including IRQ handler.
1141 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1144 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1145 struct kthread_delayed_work *dwork,
1146 unsigned long delay)
1148 struct kthread_work *work = &dwork->work;
1149 unsigned long flags;
1152 raw_spin_lock_irqsave(&worker->lock, flags);
1154 /* Do not bother with canceling when never queued. */
1155 if (!work->worker) {
1160 /* Work must not be used with >1 worker, see kthread_queue_work() */
1161 WARN_ON_ONCE(work->worker != worker);
1164 * Temporary cancel the work but do not fight with another command
1165 * that is canceling the work as well.
1167 * It is a bit tricky because of possible races with another
1168 * mod_delayed_work() and cancel_delayed_work() callers.
1170 * The timer must be canceled first because worker->lock is released
1171 * when doing so. But the work can be removed from the queue (list)
1172 * only when it can be queued again so that the return value can
1173 * be used for reference counting.
1175 kthread_cancel_delayed_work_timer(work, &flags);
1176 if (work->canceling) {
1177 /* The number of works in the queue does not change. */
1181 ret = __kthread_cancel_work(work);
1184 __kthread_queue_delayed_work(worker, dwork, delay);
1186 raw_spin_unlock_irqrestore(&worker->lock, flags);
1189 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1191 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1193 struct kthread_worker *worker = work->worker;
1194 unsigned long flags;
1200 raw_spin_lock_irqsave(&worker->lock, flags);
1201 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1202 WARN_ON_ONCE(work->worker != worker);
1205 kthread_cancel_delayed_work_timer(work, &flags);
1207 ret = __kthread_cancel_work(work);
1209 if (worker->current_work != work)
1213 * The work is in progress and we need to wait with the lock released.
1214 * In the meantime, block any queuing by setting the canceling counter.
1217 raw_spin_unlock_irqrestore(&worker->lock, flags);
1218 kthread_flush_work(work);
1219 raw_spin_lock_irqsave(&worker->lock, flags);
1223 raw_spin_unlock_irqrestore(&worker->lock, flags);
1229 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1230 * @work: the kthread work to cancel
1232 * Cancel @work and wait for its execution to finish. This function
1233 * can be used even if the work re-queues itself. On return from this
1234 * function, @work is guaranteed to be not pending or executing on any CPU.
1236 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1237 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1239 * The caller must ensure that the worker on which @work was last
1240 * queued can't be destroyed before this function returns.
1242 * Return: %true if @work was pending, %false otherwise.
1244 bool kthread_cancel_work_sync(struct kthread_work *work)
1246 return __kthread_cancel_work_sync(work, false);
1248 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1251 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1252 * wait for it to finish.
1253 * @dwork: the kthread delayed work to cancel
1255 * This is kthread_cancel_work_sync() for delayed works.
1257 * Return: %true if @dwork was pending, %false otherwise.
1259 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1261 return __kthread_cancel_work_sync(&dwork->work, true);
1263 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1266 * kthread_flush_worker - flush all current works on a kthread_worker
1267 * @worker: worker to flush
1269 * Wait until all currently executing or pending works on @worker are
1272 void kthread_flush_worker(struct kthread_worker *worker)
1274 struct kthread_flush_work fwork = {
1275 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1276 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1279 kthread_queue_work(worker, &fwork.work);
1280 wait_for_completion(&fwork.done);
1282 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1285 * kthread_destroy_worker - destroy a kthread worker
1286 * @worker: worker to be destroyed
1288 * Flush and destroy @worker. The simple flush is enough because the kthread
1289 * worker API is used only in trivial scenarios. There are no multi-step state
1292 void kthread_destroy_worker(struct kthread_worker *worker)
1294 struct task_struct *task;
1296 task = worker->task;
1300 kthread_flush_worker(worker);
1302 WARN_ON(!list_empty(&worker->work_list));
1305 EXPORT_SYMBOL(kthread_destroy_worker);
1308 * kthread_use_mm - make the calling kthread operate on an address space
1309 * @mm: address space to operate on
1311 void kthread_use_mm(struct mm_struct *mm)
1313 struct mm_struct *active_mm;
1314 struct task_struct *tsk = current;
1316 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1317 WARN_ON_ONCE(tsk->mm);
1320 /* Hold off tlb flush IPIs while switching mm's */
1321 local_irq_disable();
1322 active_mm = tsk->active_mm;
1323 if (active_mm != mm) {
1325 tsk->active_mm = mm;
1328 switch_mm_irqs_off(active_mm, mm, tsk);
1331 #ifdef finish_arch_post_lock_switch
1332 finish_arch_post_lock_switch();
1335 if (active_mm != mm)
1338 to_kthread(tsk)->oldfs = force_uaccess_begin();
1340 EXPORT_SYMBOL_GPL(kthread_use_mm);
1343 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1344 * @mm: address space to operate on
1346 void kthread_unuse_mm(struct mm_struct *mm)
1348 struct task_struct *tsk = current;
1350 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1351 WARN_ON_ONCE(!tsk->mm);
1353 force_uaccess_end(to_kthread(tsk)->oldfs);
1357 local_irq_disable();
1359 /* active_mm is still 'mm' */
1360 enter_lazy_tlb(mm, tsk);
1364 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1366 #ifdef CONFIG_BLK_CGROUP
1368 * kthread_associate_blkcg - associate blkcg to current kthread
1369 * @css: the cgroup info
1371 * Current thread must be a kthread. The thread is running jobs on behalf of
1372 * other threads. In some cases, we expect the jobs attach cgroup info of
1373 * original threads instead of that of current thread. This function stores
1374 * original thread's cgroup info in current kthread context for later
1377 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1379 struct kthread *kthread;
1381 if (!(current->flags & PF_KTHREAD))
1383 kthread = to_kthread(current);
1387 if (kthread->blkcg_css) {
1388 css_put(kthread->blkcg_css);
1389 kthread->blkcg_css = NULL;
1393 kthread->blkcg_css = css;
1396 EXPORT_SYMBOL(kthread_associate_blkcg);
1399 * kthread_blkcg - get associated blkcg css of current kthread
1401 * Current thread must be a kthread.
1403 struct cgroup_subsys_state *kthread_blkcg(void)
1405 struct kthread *kthread;
1407 if (current->flags & PF_KTHREAD) {
1408 kthread = to_kthread(current);
1410 return kthread->blkcg_css;
1414 EXPORT_SYMBOL(kthread_blkcg);