1 // SPDX-License-Identifier: GPL-2.0-only
4 * A simple wrapper around refcount. An allocated sched_core_cookie's
5 * address is used to compute the cookie of the task.
7 struct sched_core_cookie {
11 static unsigned long sched_core_alloc_cookie(void)
13 struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL);
17 refcount_set(&ck->refcnt, 1);
20 return (unsigned long)ck;
23 static void sched_core_put_cookie(unsigned long cookie)
25 struct sched_core_cookie *ptr = (void *)cookie;
27 if (ptr && refcount_dec_and_test(&ptr->refcnt)) {
33 static unsigned long sched_core_get_cookie(unsigned long cookie)
35 struct sched_core_cookie *ptr = (void *)cookie;
38 refcount_inc(&ptr->refcnt);
44 * sched_core_update_cookie - replace the cookie on a task
45 * @p: the task to update
46 * @cookie: the new cookie
48 * Effectively exchange the task cookie; caller is responsible for lifetimes on
51 * Returns: the old cookie
53 static unsigned long sched_core_update_cookie(struct task_struct *p,
56 unsigned long old_cookie;
61 rq = task_rq_lock(p, &rf);
64 * Since creating a cookie implies sched_core_get(), and we cannot set
65 * a cookie until after we've created it, similarly, we cannot destroy
66 * a cookie until after we've removed it, we must have core scheduling
69 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
71 enqueued = sched_core_enqueued(p);
73 sched_core_dequeue(rq, p, DEQUEUE_SAVE);
75 old_cookie = p->core_cookie;
76 p->core_cookie = cookie;
79 sched_core_enqueue(rq, p);
82 * If task is currently running, it may not be compatible anymore after
83 * the cookie change, so enter the scheduler on its CPU to schedule it
86 * Note that it is possible that as a result of this cookie change, the
87 * core has now entered/left forced idle state. Defer accounting to the
88 * next scheduling edge, rather than always forcing a reschedule here.
90 if (task_running(rq, p))
93 task_rq_unlock(rq, p, &rf);
98 static unsigned long sched_core_clone_cookie(struct task_struct *p)
100 unsigned long cookie, flags;
102 raw_spin_lock_irqsave(&p->pi_lock, flags);
103 cookie = sched_core_get_cookie(p->core_cookie);
104 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
109 void sched_core_fork(struct task_struct *p)
111 RB_CLEAR_NODE(&p->core_node);
112 p->core_cookie = sched_core_clone_cookie(current);
115 void sched_core_free(struct task_struct *p)
117 sched_core_put_cookie(p->core_cookie);
120 static void __sched_core_set(struct task_struct *p, unsigned long cookie)
122 cookie = sched_core_get_cookie(cookie);
123 cookie = sched_core_update_cookie(p, cookie);
124 sched_core_put_cookie(cookie);
127 /* Called from prctl interface: PR_SCHED_CORE */
128 int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
131 unsigned long cookie = 0, id = 0;
132 struct task_struct *task, *p;
136 if (!static_branch_likely(&sched_smt_present))
139 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD != PIDTYPE_PID);
140 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_THREAD_GROUP != PIDTYPE_TGID);
141 BUILD_BUG_ON(PR_SCHED_CORE_SCOPE_PROCESS_GROUP != PIDTYPE_PGID);
143 if (type > PIDTYPE_PGID || cmd >= PR_SCHED_CORE_MAX || pid < 0 ||
144 (cmd != PR_SCHED_CORE_GET && uaddr))
151 task = find_task_by_vpid(pid);
157 get_task_struct(task);
161 * Check if this process has the right to modify the specified
162 * process. Use the regular "ptrace_may_access()" checks.
164 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
170 case PR_SCHED_CORE_GET:
171 if (type != PIDTYPE_PID || uaddr & 7) {
175 cookie = sched_core_clone_cookie(task);
178 ptr_to_hashval((void *)cookie, &id);
180 err = put_user(id, (u64 __user *)uaddr);
183 case PR_SCHED_CORE_CREATE:
184 cookie = sched_core_alloc_cookie();
191 case PR_SCHED_CORE_SHARE_TO:
192 cookie = sched_core_clone_cookie(current);
195 case PR_SCHED_CORE_SHARE_FROM:
196 if (type != PIDTYPE_PID) {
200 cookie = sched_core_clone_cookie(task);
201 __sched_core_set(current, cookie);
209 if (type == PIDTYPE_PID) {
210 __sched_core_set(task, cookie);
214 read_lock(&tasklist_lock);
215 grp = task_pid_type(task, type);
217 do_each_pid_thread(grp, type, p) {
218 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) {
222 } while_each_pid_thread(grp, type, p);
224 do_each_pid_thread(grp, type, p) {
225 __sched_core_set(p, cookie);
226 } while_each_pid_thread(grp, type, p);
228 read_unlock(&tasklist_lock);
231 sched_core_put_cookie(cookie);
232 put_task_struct(task);
236 #ifdef CONFIG_SCHEDSTATS
238 /* REQUIRES: rq->core's clock recently updated. */
239 void __sched_core_account_forceidle(struct rq *rq)
241 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
242 u64 delta, now = rq_clock(rq->core);
244 struct task_struct *p;
247 lockdep_assert_rq_held(rq);
249 WARN_ON_ONCE(!rq->core->core_forceidle_count);
251 if (rq->core->core_forceidle_start == 0)
254 delta = now - rq->core->core_forceidle_start;
255 if (unlikely((s64)delta <= 0))
258 rq->core->core_forceidle_start = now;
260 if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) {
261 /* can't be forced idle without a running task */
262 } else if (rq->core->core_forceidle_count > 1 ||
263 rq->core->core_forceidle_occupation > 1) {
265 * For larger SMT configurations, we need to scale the charged
266 * forced idle amount since there can be more than one forced
267 * idle sibling and more than one running cookied task.
269 delta *= rq->core->core_forceidle_count;
270 delta = div_u64(delta, rq->core->core_forceidle_occupation);
273 for_each_cpu(i, smt_mask) {
275 p = rq_i->core_pick ?: rq_i->curr;
280 __schedstat_add(p->stats.core_forceidle_sum, delta);
284 void __sched_core_tick(struct rq *rq)
286 if (!rq->core->core_forceidle_count)
290 update_rq_clock(rq->core);
292 __sched_core_account_forceidle(rq);
295 #endif /* CONFIG_SCHEDSTATS */