1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_TASK_H
3 #define _LINUX_SCHED_TASK_H
6 * Interface between the scheduler and various task lifetime (fork()/exit())
10 #include <linux/rcupdate.h>
11 #include <linux/refcount.h>
12 #include <linux/sched.h>
13 #include <linux/uaccess.h>
20 /* All the bits taken by the old clone syscall. */
21 #define CLONE_LEGACY_FLAGS 0xffffffffULL
23 struct kernel_clone_args {
26 int __user *child_tid;
27 int __user *parent_tid;
35 unsigned long stack_size;
38 /* Number of elements in *set_tid */
49 * This serializes "schedule()" and also protects
50 * the run-queue from deletions/modifications (but
51 * _adding_ to the beginning of the run-queue has
54 extern rwlock_t tasklist_lock;
55 extern spinlock_t mmlist_lock;
57 extern union thread_union init_thread_union;
58 extern struct task_struct init_task;
60 extern int lockdep_tasklist_lock_is_held(void);
62 extern asmlinkage void schedule_tail(struct task_struct *prev);
63 extern void init_idle(struct task_struct *idle, int cpu);
65 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
66 extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
67 extern void sched_post_fork(struct task_struct *p);
68 extern void sched_dead(struct task_struct *p);
70 void __noreturn do_task_dead(void);
71 void __noreturn make_task_dead(int signr);
73 extern void mm_cache_init(void);
74 extern void proc_caches_init(void);
76 extern void fork_init(void);
78 extern void release_task(struct task_struct * p);
80 extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
82 extern void flush_thread(void);
84 #ifdef CONFIG_HAVE_EXIT_THREAD
85 extern void exit_thread(struct task_struct *tsk);
87 static inline void exit_thread(struct task_struct *tsk)
91 extern __noreturn void do_group_exit(int);
93 extern void exit_files(struct task_struct *);
94 extern void exit_itimers(struct task_struct *);
96 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
97 struct task_struct *copy_process(struct pid *pid, int trace, int node,
98 struct kernel_clone_args *args);
99 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
100 struct task_struct *fork_idle(int);
101 extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
102 unsigned long flags);
103 extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
104 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
105 int kernel_wait(pid_t pid, int *stat);
107 extern void free_task(struct task_struct *tsk);
109 /* sched_exec is called by processes performing an exec */
111 extern void sched_exec(void);
113 #define sched_exec() {}
116 static inline struct task_struct *get_task_struct(struct task_struct *t)
118 refcount_inc(&t->usage);
122 extern void __put_task_struct(struct task_struct *t);
123 extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
125 static inline void put_task_struct(struct task_struct *t)
127 if (!refcount_dec_and_test(&t->usage))
131 * In !RT, it is always safe to call __put_task_struct().
132 * Under RT, we can only call it in preemptible context.
134 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
135 static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
137 lock_map_acquire_try(&put_task_map);
138 __put_task_struct(t);
139 lock_map_release(&put_task_map);
144 * under PREEMPT_RT, we can't call put_task_struct
145 * in atomic context because it will indirectly
146 * acquire sleeping locks.
148 * call_rcu() will schedule delayed_put_task_struct_rcu()
149 * to be called in process context.
151 * __put_task_struct() is called when
152 * refcount_dec_and_test(&t->usage) succeeds.
154 * This means that it can't "conflict" with
155 * put_task_struct_rcu_user() which abuses ->rcu the same
156 * way; rcu_users has a reference so task->usage can't be
157 * zero after rcu_users 1 -> 0 transition.
159 * delayed_free_task() also uses ->rcu, but it is only called
160 * when it fails to fork a process. Therefore, there is no
161 * way it can conflict with put_task_struct().
163 call_rcu(&t->rcu, __put_task_struct_rcu_cb);
166 DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
168 static inline void put_task_struct_many(struct task_struct *t, int nr)
170 if (refcount_sub_and_test(nr, &t->usage))
171 __put_task_struct(t);
174 void put_task_struct_rcu_user(struct task_struct *task);
176 /* Free all architecture-specific resources held by a thread. */
177 void release_thread(struct task_struct *dead_task);
179 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
180 extern int arch_task_struct_size __read_mostly;
182 # define arch_task_struct_size (sizeof(struct task_struct))
185 #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
187 * If an architecture has not declared a thread_struct whitelist we
188 * must assume something there may need to be copied to userspace.
190 static inline void arch_thread_struct_whitelist(unsigned long *offset,
194 /* Handle dynamically sized thread_struct. */
195 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
199 #ifdef CONFIG_VMAP_STACK
200 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
202 return t->stack_vm_area;
205 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
212 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
213 * subscriptions and synchronises with wait4(). Also used in procfs. Also
214 * pins the final release of task.io_context. Also protects ->cpuset and
215 * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
217 * Nests both inside and outside of read_lock(&tasklist_lock).
218 * It must not be nested with write_lock_irq(&tasklist_lock),
219 * neither inside nor outside.
221 static inline void task_lock(struct task_struct *p)
223 spin_lock(&p->alloc_lock);
226 static inline void task_unlock(struct task_struct *p)
228 spin_unlock(&p->alloc_lock);
231 DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
233 #endif /* _LINUX_SCHED_TASK_H */