1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to io context handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/security.h>
12 #include <linux/sched/task.h>
15 #include "blk-mq-sched.h"
18 * For io context allocations
20 static struct kmem_cache *iocontext_cachep;
24 * get_io_context - increment reference count to io_context
25 * @ioc: io_context to get
27 * Increment reference count to @ioc.
29 static void get_io_context(struct io_context *ioc)
31 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
32 atomic_long_inc(&ioc->refcount);
35 static void icq_free_icq_rcu(struct rcu_head *head)
37 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
39 kmem_cache_free(icq->__rcu_icq_cache, icq);
43 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
44 * and queue locked for legacy.
46 static void ioc_exit_icq(struct io_cq *icq)
48 struct elevator_type *et = icq->q->elevator->type;
50 if (icq->flags & ICQ_EXITED)
54 et->ops.exit_icq(icq);
56 icq->flags |= ICQ_EXITED;
59 static void ioc_exit_icqs(struct io_context *ioc)
63 spin_lock_irq(&ioc->lock);
64 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
66 spin_unlock_irq(&ioc->lock);
70 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
71 * and queue locked for legacy.
73 static void ioc_destroy_icq(struct io_cq *icq)
75 struct io_context *ioc = icq->ioc;
76 struct request_queue *q = icq->q;
77 struct elevator_type *et = q->elevator->type;
79 lockdep_assert_held(&ioc->lock);
81 radix_tree_delete(&ioc->icq_tree, icq->q->id);
82 hlist_del_init(&icq->ioc_node);
83 list_del_init(&icq->q_node);
86 * Both setting lookup hint to and clearing it from @icq are done
87 * under queue_lock. If it's not pointing to @icq now, it never
88 * will. Hint assignment itself can race safely.
90 if (rcu_access_pointer(ioc->icq_hint) == icq)
91 rcu_assign_pointer(ioc->icq_hint, NULL);
96 * @icq->q might have gone away by the time RCU callback runs
97 * making it impossible to determine icq_cache. Record it in @icq.
99 icq->__rcu_icq_cache = et->icq_cache;
100 icq->flags |= ICQ_DESTROYED;
101 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
105 * Slow path for ioc release in put_io_context(). Performs double-lock
106 * dancing to unlink all icq's and then frees ioc.
108 static void ioc_release_fn(struct work_struct *work)
110 struct io_context *ioc = container_of(work, struct io_context,
112 spin_lock_irq(&ioc->lock);
114 while (!hlist_empty(&ioc->icq_list)) {
115 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
116 struct io_cq, ioc_node);
117 struct request_queue *q = icq->q;
119 if (spin_trylock(&q->queue_lock)) {
120 ioc_destroy_icq(icq);
121 spin_unlock(&q->queue_lock);
123 /* Make sure q and icq cannot be freed. */
126 /* Re-acquire the locks in the correct order. */
127 spin_unlock(&ioc->lock);
128 spin_lock(&q->queue_lock);
129 spin_lock(&ioc->lock);
132 * The icq may have been destroyed when the ioc lock
135 if (!(icq->flags & ICQ_DESTROYED))
136 ioc_destroy_icq(icq);
138 spin_unlock(&q->queue_lock);
143 spin_unlock_irq(&ioc->lock);
145 kmem_cache_free(iocontext_cachep, ioc);
149 * Releasing icqs requires reverse order double locking and we may already be
150 * holding a queue_lock. Do it asynchronously from a workqueue.
152 static bool ioc_delay_free(struct io_context *ioc)
156 spin_lock_irqsave(&ioc->lock, flags);
157 if (!hlist_empty(&ioc->icq_list)) {
158 queue_work(system_power_efficient_wq, &ioc->release_work);
159 spin_unlock_irqrestore(&ioc->lock, flags);
162 spin_unlock_irqrestore(&ioc->lock, flags);
167 * ioc_clear_queue - break any ioc association with the specified queue
168 * @q: request_queue being cleared
170 * Walk @q->icq_list and exit all io_cq's.
172 void ioc_clear_queue(struct request_queue *q)
176 spin_lock_irq(&q->queue_lock);
177 list_splice_init(&q->icq_list, &icq_list);
178 spin_unlock_irq(&q->queue_lock);
181 while (!list_empty(&icq_list)) {
183 list_entry(icq_list.next, struct io_cq, q_node);
185 spin_lock_irq(&icq->ioc->lock);
186 if (!(icq->flags & ICQ_DESTROYED))
187 ioc_destroy_icq(icq);
188 spin_unlock_irq(&icq->ioc->lock);
192 #else /* CONFIG_BLK_ICQ */
193 static inline void ioc_exit_icqs(struct io_context *ioc)
196 static inline bool ioc_delay_free(struct io_context *ioc)
200 #endif /* CONFIG_BLK_ICQ */
203 * put_io_context - put a reference of io_context
204 * @ioc: io_context to put
206 * Decrement reference count of @ioc and release it if the count reaches
209 void put_io_context(struct io_context *ioc)
211 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
212 if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
213 kmem_cache_free(iocontext_cachep, ioc);
215 EXPORT_SYMBOL_GPL(put_io_context);
217 /* Called by the exiting task */
218 void exit_io_context(struct task_struct *task)
220 struct io_context *ioc;
223 ioc = task->io_context;
224 task->io_context = NULL;
227 if (atomic_dec_and_test(&ioc->active_ref)) {
233 static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
235 struct io_context *ioc;
237 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
242 atomic_long_set(&ioc->refcount, 1);
243 atomic_set(&ioc->active_ref, 1);
244 #ifdef CONFIG_BLK_ICQ
245 spin_lock_init(&ioc->lock);
246 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
247 INIT_HLIST_HEAD(&ioc->icq_list);
248 INIT_WORK(&ioc->release_work, ioc_release_fn);
253 int set_task_ioprio(struct task_struct *task, int ioprio)
256 const struct cred *cred = current_cred(), *tcred;
259 tcred = __task_cred(task);
260 if (!uid_eq(tcred->uid, cred->euid) &&
261 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
267 err = security_task_setioprio(task, ioprio);
272 if (unlikely(!task->io_context)) {
273 struct io_context *ioc;
277 ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
282 if (task->flags & PF_EXITING) {
283 kmem_cache_free(iocontext_cachep, ioc);
286 if (task->io_context)
287 kmem_cache_free(iocontext_cachep, ioc);
289 task->io_context = ioc;
291 task->io_context->ioprio = ioprio;
296 EXPORT_SYMBOL_GPL(set_task_ioprio);
298 int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
300 struct io_context *ioc = current->io_context;
303 * Share io context with parent, if CLONE_IO is set
305 if (clone_flags & CLONE_IO) {
306 atomic_inc(&ioc->active_ref);
307 tsk->io_context = ioc;
308 } else if (ioprio_valid(ioc->ioprio)) {
309 tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
310 if (!tsk->io_context)
312 tsk->io_context->ioprio = ioc->ioprio;
318 #ifdef CONFIG_BLK_ICQ
320 * ioc_lookup_icq - lookup io_cq from ioc
321 * @q: the associated request_queue
323 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
324 * with @q->queue_lock held.
326 struct io_cq *ioc_lookup_icq(struct request_queue *q)
328 struct io_context *ioc = current->io_context;
331 lockdep_assert_held(&q->queue_lock);
334 * icq's are indexed from @ioc using radix tree and hint pointer,
335 * both of which are protected with RCU. All removals are done
336 * holding both q and ioc locks, and we're holding q lock - if we
337 * find a icq which points to us, it's guaranteed to be valid.
340 icq = rcu_dereference(ioc->icq_hint);
341 if (icq && icq->q == q)
344 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
345 if (icq && icq->q == q)
346 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
353 EXPORT_SYMBOL(ioc_lookup_icq);
356 * ioc_create_icq - create and link io_cq
357 * @q: request_queue of interest
359 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
360 * will be created using @gfp_mask.
362 * The caller is responsible for ensuring @ioc won't go away and @q is
363 * alive and will stay alive until this function returns.
365 static struct io_cq *ioc_create_icq(struct request_queue *q)
367 struct io_context *ioc = current->io_context;
368 struct elevator_type *et = q->elevator->type;
372 icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
377 if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
378 kmem_cache_free(et->icq_cache, icq);
384 INIT_LIST_HEAD(&icq->q_node);
385 INIT_HLIST_NODE(&icq->ioc_node);
387 /* lock both q and ioc and try to link @icq */
388 spin_lock_irq(&q->queue_lock);
389 spin_lock(&ioc->lock);
391 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
392 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
393 list_add(&icq->q_node, &q->icq_list);
394 if (et->ops.init_icq)
395 et->ops.init_icq(icq);
397 kmem_cache_free(et->icq_cache, icq);
398 icq = ioc_lookup_icq(q);
400 printk(KERN_ERR "cfq: icq link failed!\n");
403 spin_unlock(&ioc->lock);
404 spin_unlock_irq(&q->queue_lock);
405 radix_tree_preload_end();
409 struct io_cq *ioc_find_get_icq(struct request_queue *q)
411 struct io_context *ioc = current->io_context;
412 struct io_cq *icq = NULL;
414 if (unlikely(!ioc)) {
415 ioc = alloc_io_context(GFP_ATOMIC, q->node);
420 if (current->io_context) {
421 kmem_cache_free(iocontext_cachep, ioc);
422 ioc = current->io_context;
424 current->io_context = ioc;
428 task_unlock(current);
432 spin_lock_irq(&q->queue_lock);
433 icq = ioc_lookup_icq(q);
434 spin_unlock_irq(&q->queue_lock);
438 icq = ioc_create_icq(q);
446 EXPORT_SYMBOL_GPL(ioc_find_get_icq);
447 #endif /* CONFIG_BLK_ICQ */
449 static int __init blk_ioc_init(void)
451 iocontext_cachep = kmem_cache_create("blkdev_ioc",
452 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
455 subsys_initcall(blk_ioc_init);