1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
18 #define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
23 struct kmem_cache *io_buf_cachep;
25 struct io_provide_buf {
35 struct hlist_node list;
41 static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
44 return xa_load(&ctx->io_bl_xa, bgid);
47 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
50 lockdep_assert_held(&ctx->uring_lock);
52 return __io_buffer_get_list(ctx, bgid);
55 static int io_buffer_add_list(struct io_ring_ctx *ctx,
56 struct io_buffer_list *bl, unsigned int bgid)
59 * Store buffer group ID and finally mark the list as visible.
60 * The normal lookup doesn't care about the visibility as we're
61 * always under the ->uring_lock, but the RCU lookup from mmap does.
64 atomic_set(&bl->refs, 1);
65 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
68 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
70 struct io_ring_ctx *ctx = req->ctx;
71 struct io_buffer_list *bl;
72 struct io_buffer *buf;
74 io_ring_submit_lock(ctx, issue_flags);
77 bl = io_buffer_get_list(ctx, buf->bgid);
78 list_add(&buf->list, &bl->buf_list);
79 req->flags &= ~REQ_F_BUFFER_SELECTED;
80 req->buf_index = buf->bgid;
82 io_ring_submit_unlock(ctx, issue_flags);
86 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
89 * We can add this buffer back to two lists:
91 * 1) The io_buffers_cache list. This one is protected by the
92 * ctx->uring_lock. If we already hold this lock, add back to this
93 * list as we can grab it from issue as well.
94 * 2) The io_buffers_comp list. This one is protected by the
95 * ctx->completion_lock.
97 * We migrate buffers from the comp_list to the issue cache list
100 if (issue_flags & IO_URING_F_UNLOCKED) {
101 struct io_ring_ctx *ctx = req->ctx;
103 spin_lock(&ctx->completion_lock);
104 __io_put_kbuf_list(req, &ctx->io_buffers_comp);
105 spin_unlock(&ctx->completion_lock);
107 lockdep_assert_held(&req->ctx->uring_lock);
109 __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
113 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
114 struct io_buffer_list *bl)
116 if (!list_empty(&bl->buf_list)) {
117 struct io_buffer *kbuf;
119 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
120 list_del(&kbuf->list);
121 if (*len == 0 || *len > kbuf->len)
123 if (list_empty(&bl->buf_list))
124 req->flags |= REQ_F_BL_EMPTY;
125 req->flags |= REQ_F_BUFFER_SELECTED;
127 req->buf_index = kbuf->bid;
128 return u64_to_user_ptr(kbuf->addr);
133 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
134 struct io_buffer_list *bl,
135 unsigned int issue_flags)
137 struct io_uring_buf_ring *br = bl->buf_ring;
138 __u16 tail, head = bl->head;
139 struct io_uring_buf *buf;
141 tail = smp_load_acquire(&br->tail);
142 if (unlikely(tail == head))
145 if (head + 1 == tail)
146 req->flags |= REQ_F_BL_EMPTY;
149 /* mmaped buffers are always contig */
150 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
151 buf = &br->bufs[head];
153 int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
154 int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
155 buf = page_address(bl->buf_pages[index]);
158 if (*len == 0 || *len > buf->len)
160 req->flags |= REQ_F_BUFFER_RING;
162 req->buf_index = buf->bid;
164 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
166 * If we came in unlocked, we have no choice but to consume the
167 * buffer here, otherwise nothing ensures that the buffer won't
168 * get used by others. This does mean it'll be pinned until the
169 * IO completes, coming in unlocked means we're being called from
170 * io-wq context and there may be further retries in async hybrid
171 * mode. For the locked case, the caller must call commit when
172 * the transfer completes (or if we get -EAGAIN and must poll of
175 req->buf_list = NULL;
178 return u64_to_user_ptr(buf->addr);
181 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
182 unsigned int issue_flags)
184 struct io_ring_ctx *ctx = req->ctx;
185 struct io_buffer_list *bl;
186 void __user *ret = NULL;
188 io_ring_submit_lock(req->ctx, issue_flags);
190 bl = io_buffer_get_list(ctx, req->buf_index);
193 ret = io_ring_buffer_select(req, len, bl, issue_flags);
195 ret = io_provided_buffer_select(req, len, bl);
197 io_ring_submit_unlock(req->ctx, issue_flags);
202 * Mark the given mapped range as free for reuse
204 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
206 struct io_buf_free *ibf;
208 hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
209 if (bl->buf_ring == ibf->mem) {
215 /* can't happen... */
219 static int __io_remove_buffers(struct io_ring_ctx *ctx,
220 struct io_buffer_list *bl, unsigned nbufs)
224 /* shouldn't happen */
228 if (bl->is_buf_ring) {
229 i = bl->buf_ring->tail - bl->head;
232 * io_kbuf_list_free() will free the page(s) at
235 io_kbuf_mark_free(ctx, bl);
238 } else if (bl->buf_nr_pages) {
241 for (j = 0; j < bl->buf_nr_pages; j++)
242 unpin_user_page(bl->buf_pages[j]);
243 kvfree(bl->buf_pages);
244 bl->buf_pages = NULL;
245 bl->buf_nr_pages = 0;
247 /* make sure it's seen as empty */
248 INIT_LIST_HEAD(&bl->buf_list);
253 /* protects io_buffers_cache */
254 lockdep_assert_held(&ctx->uring_lock);
256 while (!list_empty(&bl->buf_list)) {
257 struct io_buffer *nxt;
259 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
260 list_move(&nxt->list, &ctx->io_buffers_cache);
269 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
271 if (atomic_dec_and_test(&bl->refs)) {
272 __io_remove_buffers(ctx, bl, -1U);
277 void io_destroy_buffers(struct io_ring_ctx *ctx)
279 struct io_buffer_list *bl;
280 struct list_head *item, *tmp;
281 struct io_buffer *buf;
284 xa_for_each(&ctx->io_bl_xa, index, bl) {
285 xa_erase(&ctx->io_bl_xa, bl->bgid);
290 * Move deferred locked entries to cache before pruning
292 spin_lock(&ctx->completion_lock);
293 if (!list_empty(&ctx->io_buffers_comp))
294 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
295 spin_unlock(&ctx->completion_lock);
297 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
298 buf = list_entry(item, struct io_buffer, list);
299 kmem_cache_free(io_buf_cachep, buf);
303 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
305 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
308 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
312 tmp = READ_ONCE(sqe->fd);
313 if (!tmp || tmp > MAX_BIDS_PER_BGID)
316 memset(p, 0, sizeof(*p));
318 p->bgid = READ_ONCE(sqe->buf_group);
322 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
324 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
325 struct io_ring_ctx *ctx = req->ctx;
326 struct io_buffer_list *bl;
329 io_ring_submit_lock(ctx, issue_flags);
332 bl = io_buffer_get_list(ctx, p->bgid);
335 /* can't use provide/remove buffers command on mapped buffers */
336 if (!bl->is_buf_ring)
337 ret = __io_remove_buffers(ctx, bl, p->nbufs);
339 io_ring_submit_unlock(ctx, issue_flags);
342 io_req_set_res(req, ret, 0);
346 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
348 unsigned long size, tmp_check;
349 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
352 if (sqe->rw_flags || sqe->splice_fd_in)
355 tmp = READ_ONCE(sqe->fd);
356 if (!tmp || tmp > MAX_BIDS_PER_BGID)
359 p->addr = READ_ONCE(sqe->addr);
360 p->len = READ_ONCE(sqe->len);
362 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
365 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
368 size = (unsigned long)p->len * p->nbufs;
369 if (!access_ok(u64_to_user_ptr(p->addr), size))
372 p->bgid = READ_ONCE(sqe->buf_group);
373 tmp = READ_ONCE(sqe->off);
376 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
382 #define IO_BUFFER_ALLOC_BATCH 64
384 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
386 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
390 * Completions that don't happen inline (eg not under uring_lock) will
391 * add to ->io_buffers_comp. If we don't have any free buffers, check
392 * the completion list and splice those entries first.
394 if (!list_empty_careful(&ctx->io_buffers_comp)) {
395 spin_lock(&ctx->completion_lock);
396 if (!list_empty(&ctx->io_buffers_comp)) {
397 list_splice_init(&ctx->io_buffers_comp,
398 &ctx->io_buffers_cache);
399 spin_unlock(&ctx->completion_lock);
402 spin_unlock(&ctx->completion_lock);
406 * No free buffers and no completion entries either. Allocate a new
407 * batch of buffer entries and add those to our freelist.
410 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
411 ARRAY_SIZE(bufs), (void **) bufs);
412 if (unlikely(!allocated)) {
414 * Bulk alloc is all-or-nothing. If we fail to get a batch,
415 * retry single alloc to be on the safe side.
417 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
424 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
429 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
430 struct io_buffer_list *bl)
432 struct io_buffer *buf;
433 u64 addr = pbuf->addr;
434 int i, bid = pbuf->bid;
436 for (i = 0; i < pbuf->nbufs; i++) {
437 if (list_empty(&ctx->io_buffers_cache) &&
438 io_refill_buffer_cache(ctx))
440 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
442 list_move_tail(&buf->list, &bl->buf_list);
444 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
446 buf->bgid = pbuf->bgid;
452 return i ? 0 : -ENOMEM;
455 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
457 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
458 struct io_ring_ctx *ctx = req->ctx;
459 struct io_buffer_list *bl;
462 io_ring_submit_lock(ctx, issue_flags);
464 bl = io_buffer_get_list(ctx, p->bgid);
466 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
471 INIT_LIST_HEAD(&bl->buf_list);
472 ret = io_buffer_add_list(ctx, bl, p->bgid);
475 * Doesn't need rcu free as it was never visible, but
476 * let's keep it consistent throughout.
482 /* can't add buffers via this command for a mapped buffer ring */
483 if (bl->is_buf_ring) {
488 ret = io_add_buffers(ctx, p, bl);
490 io_ring_submit_unlock(ctx, issue_flags);
494 io_req_set_res(req, ret, 0);
498 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
499 struct io_buffer_list *bl)
501 struct io_uring_buf_ring *br;
505 pages = io_pin_pages(reg->ring_addr,
506 flex_array_size(br, bufs, reg->ring_entries),
509 return PTR_ERR(pages);
512 * Apparently some 32-bit boxes (ARM) will return highmem pages,
513 * which then need to be mapped. We could support that, but it'd
514 * complicate the code and slowdown the common cases quite a bit.
515 * So just error out, returning -EINVAL just like we did on kernels
516 * that didn't support mapped buffer rings.
518 for (i = 0; i < nr_pages; i++)
519 if (PageHighMem(pages[i]))
522 br = page_address(pages[0]);
525 * On platforms that have specific aliasing requirements, SHM_COLOUR
526 * is set and we must guarantee that the kernel and user side align
527 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
528 * the application mmap's the provided ring buffer. Fail the request
529 * if we, by chance, don't end up with aligned addresses. The app
530 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
531 * this transparently.
533 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1))
536 bl->buf_pages = pages;
537 bl->buf_nr_pages = nr_pages;
543 for (i = 0; i < nr_pages; i++)
544 unpin_user_page(pages[i]);
550 * See if we have a suitable region that we can reuse, rather than allocate
551 * both a new io_buf_free and mem region again. We leave it on the list as
552 * even a reused entry will need freeing at ring release.
554 static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
557 struct io_buf_free *ibf, *best = NULL;
560 hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
563 if (ibf->inuse || ibf->size < ring_size)
565 dist = ibf->size - ring_size;
566 if (!best || dist < best_dist) {
577 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
578 struct io_uring_buf_reg *reg,
579 struct io_buffer_list *bl)
581 struct io_buf_free *ibf;
585 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
587 /* Reuse existing entry, if we can */
588 ibf = io_lookup_buf_free_entry(ctx, ring_size);
590 ptr = io_mem_alloc(ring_size);
594 /* Allocate and store deferred free entry */
595 ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
601 ibf->size = ring_size;
602 hlist_add_head(&ibf->list, &ctx->io_buf_list);
605 bl->buf_ring = ibf->mem;
611 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
613 struct io_uring_buf_reg reg;
614 struct io_buffer_list *bl, *free_bl = NULL;
617 lockdep_assert_held(&ctx->uring_lock);
619 if (copy_from_user(®, arg, sizeof(reg)))
622 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
624 if (reg.flags & ~IOU_PBUF_RING_MMAP)
626 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
629 if (reg.ring_addr & ~PAGE_MASK)
636 if (!is_power_of_2(reg.ring_entries))
639 /* cannot disambiguate full vs empty due to head/tail size */
640 if (reg.ring_entries >= 65536)
643 bl = io_buffer_get_list(ctx, reg.bgid);
645 /* if mapped buffer ring OR classic exists, don't allow */
646 if (bl->is_buf_ring || !list_empty(&bl->buf_list))
649 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
654 if (!(reg.flags & IOU_PBUF_RING_MMAP))
655 ret = io_pin_pbuf_ring(®, bl);
657 ret = io_alloc_pbuf_ring(ctx, ®, bl);
660 bl->nr_entries = reg.ring_entries;
661 bl->mask = reg.ring_entries - 1;
663 io_buffer_add_list(ctx, bl, reg.bgid);
667 kfree_rcu(free_bl, rcu);
671 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
673 struct io_uring_buf_reg reg;
674 struct io_buffer_list *bl;
676 lockdep_assert_held(&ctx->uring_lock);
678 if (copy_from_user(®, arg, sizeof(reg)))
680 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
685 bl = io_buffer_get_list(ctx, reg.bgid);
688 if (!bl->is_buf_ring)
691 xa_erase(&ctx->io_bl_xa, bl->bgid);
696 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
698 struct io_uring_buf_status buf_status;
699 struct io_buffer_list *bl;
702 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
705 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
706 if (buf_status.resv[i])
709 bl = io_buffer_get_list(ctx, buf_status.buf_group);
712 if (!bl->is_buf_ring)
715 buf_status.head = bl->head;
716 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
722 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
725 struct io_buffer_list *bl;
729 * We have to be a bit careful here - we're inside mmap and cannot grab
730 * the uring_lock. This means the buffer_list could be simultaneously
731 * going away, if someone is trying to be sneaky. Look it up under rcu
732 * so we know it's not going away, and attempt to grab a reference to
733 * it. If the ref is already zero, then fail the mapping. If successful,
734 * the caller will call io_put_bl() to drop the the reference at at the
735 * end. This may then safely free the buffer_list (and drop the pages)
736 * at that point, vm_insert_pages() would've already grabbed the
737 * necessary vma references.
740 bl = xa_load(&ctx->io_bl_xa, bgid);
741 /* must be a mmap'able buffer ring and have pages */
743 if (bl && bl->is_mmap)
744 ret = atomic_inc_not_zero(&bl->refs);
750 return ERR_PTR(-EINVAL);
754 * Called at or after ->release(), free the mmap'ed buffers that we used
755 * for memory mapped provided buffer rings.
757 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
759 struct io_buf_free *ibf;
760 struct hlist_node *tmp;
762 hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
763 hlist_del(&ibf->list);
764 io_mem_free(ibf->mem);