1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
13 #include <uapi/linux/io_uring.h>
16 #include "openclose.h"
19 struct io_rsrc_update {
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 struct io_mapped_ubuf **pimu,
28 struct page **last_hpage);
30 #define IO_RSRC_REF_BATCH 100
33 #define IORING_MAX_FIXED_FILES (1U << 20)
34 #define IORING_MAX_REG_BUFFERS (1U << 14)
36 void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
37 __must_hold(&ctx->uring_lock)
39 if (ctx->rsrc_cached_refs) {
40 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
41 ctx->rsrc_cached_refs = 0;
45 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
47 unsigned long page_limit, cur_pages, new_pages;
52 /* Don't allow more pages than we can safely lock */
53 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
55 cur_pages = atomic_long_read(&user->locked_vm);
57 new_pages = cur_pages + nr_pages;
58 if (new_pages > page_limit)
60 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
61 &cur_pages, new_pages));
65 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
68 __io_unaccount_mem(ctx->user, nr_pages);
71 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
74 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
79 ret = __io_account_mem(ctx->user, nr_pages);
85 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
90 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
91 void __user *arg, unsigned index)
93 struct iovec __user *src;
97 struct compat_iovec __user *ciovs;
98 struct compat_iovec ciov;
100 ciovs = (struct compat_iovec __user *) arg;
101 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
104 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
105 dst->iov_len = ciov.iov_len;
109 src = (struct iovec __user *) arg;
110 if (copy_from_user(dst, &src[index], sizeof(*dst)))
115 static int io_buffer_validate(struct iovec *iov)
117 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
120 * Don't impose further limits on the size and buffer
121 * constraints here, we'll -EINVAL later when IO is
122 * submitted if they are wrong.
125 return iov->iov_len ? -EFAULT : 0;
129 /* arbitrary limit, but we need something */
130 if (iov->iov_len > SZ_1G)
133 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
139 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
141 struct io_mapped_ubuf *imu = *slot;
144 if (imu != ctx->dummy_ubuf) {
145 for (i = 0; i < imu->nr_bvecs; i++)
146 unpin_user_page(imu->bvec[i].bv_page);
148 io_unaccount_mem(ctx, imu->acct_pages);
154 void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
155 __must_hold(&ctx->uring_lock)
157 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
158 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
161 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
163 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
164 struct io_ring_ctx *ctx = rsrc_data->ctx;
165 struct io_rsrc_put *prsrc, *tmp;
167 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
168 list_del(&prsrc->list);
171 if (ctx->flags & IORING_SETUP_IOPOLL) {
172 mutex_lock(&ctx->uring_lock);
173 io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
174 mutex_unlock(&ctx->uring_lock);
176 io_post_aux_cqe(ctx, prsrc->tag, 0, 0, true);
180 rsrc_data->do_put(ctx, prsrc);
184 io_rsrc_node_destroy(ref_node);
185 if (atomic_dec_and_test(&rsrc_data->refs))
186 complete(&rsrc_data->done);
189 void io_rsrc_put_work(struct work_struct *work)
191 struct io_ring_ctx *ctx;
192 struct llist_node *node;
194 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
195 node = llist_del_all(&ctx->rsrc_put_llist);
198 struct io_rsrc_node *ref_node;
199 struct llist_node *next = node->next;
201 ref_node = llist_entry(node, struct io_rsrc_node, llist);
202 __io_rsrc_put_work(ref_node);
207 void io_wait_rsrc_data(struct io_rsrc_data *data)
209 if (data && !atomic_dec_and_test(&data->refs))
210 wait_for_completion(&data->done);
213 void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
215 percpu_ref_exit(&ref_node->refs);
219 static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
221 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
222 struct io_ring_ctx *ctx = node->rsrc_data->ctx;
224 bool first_add = false;
225 unsigned long delay = HZ;
227 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
230 /* if we are mid-quiesce then do not delay */
231 if (node->rsrc_data->quiesce)
234 while (!list_empty(&ctx->rsrc_ref_list)) {
235 node = list_first_entry(&ctx->rsrc_ref_list,
236 struct io_rsrc_node, node);
237 /* recycle ref nodes in order */
240 list_del(&node->node);
241 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
243 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
246 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
249 static struct io_rsrc_node *io_rsrc_node_alloc(void)
251 struct io_rsrc_node *ref_node;
253 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
257 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
262 INIT_LIST_HEAD(&ref_node->node);
263 INIT_LIST_HEAD(&ref_node->rsrc_list);
264 ref_node->done = false;
268 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
269 struct io_rsrc_data *data_to_kill)
270 __must_hold(&ctx->uring_lock)
272 WARN_ON_ONCE(!ctx->rsrc_backup_node);
273 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
275 io_rsrc_refs_drop(ctx);
278 struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
280 rsrc_node->rsrc_data = data_to_kill;
281 spin_lock_irq(&ctx->rsrc_ref_lock);
282 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
283 spin_unlock_irq(&ctx->rsrc_ref_lock);
285 atomic_inc(&data_to_kill->refs);
286 percpu_ref_kill(&rsrc_node->refs);
287 ctx->rsrc_node = NULL;
290 if (!ctx->rsrc_node) {
291 ctx->rsrc_node = ctx->rsrc_backup_node;
292 ctx->rsrc_backup_node = NULL;
296 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
298 if (ctx->rsrc_backup_node)
300 ctx->rsrc_backup_node = io_rsrc_node_alloc();
301 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
304 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
305 struct io_ring_ctx *ctx)
309 /* As we may drop ->uring_lock, other task may have started quiesce */
313 data->quiesce = true;
315 ret = io_rsrc_node_switch_start(ctx);
318 io_rsrc_node_switch(ctx, data);
320 /* kill initial ref, already quiesced if zero */
321 if (atomic_dec_and_test(&data->refs))
323 mutex_unlock(&ctx->uring_lock);
324 flush_delayed_work(&ctx->rsrc_put_work);
325 ret = wait_for_completion_interruptible(&data->done);
327 mutex_lock(&ctx->uring_lock);
328 if (atomic_read(&data->refs) > 0) {
330 * it has been revived by another thread while
333 mutex_unlock(&ctx->uring_lock);
339 atomic_inc(&data->refs);
340 /* wait for all works potentially completing data->done */
341 flush_delayed_work(&ctx->rsrc_put_work);
342 reinit_completion(&data->done);
344 ret = io_run_task_work_sig(ctx);
345 mutex_lock(&ctx->uring_lock);
347 data->quiesce = false;
352 static void io_free_page_table(void **table, size_t size)
354 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
356 for (i = 0; i < nr_tables; i++)
361 static void io_rsrc_data_free(struct io_rsrc_data *data)
363 size_t size = data->nr * sizeof(data->tags[0][0]);
366 io_free_page_table((void **)data->tags, size);
370 static __cold void **io_alloc_page_table(size_t size)
372 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
373 size_t init_size = size;
376 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
380 for (i = 0; i < nr_tables; i++) {
381 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
383 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
385 io_free_page_table(table, init_size);
393 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
394 rsrc_put_fn *do_put, u64 __user *utags,
395 unsigned nr, struct io_rsrc_data **pdata)
397 struct io_rsrc_data *data;
401 data = kzalloc(sizeof(*data), GFP_KERNEL);
404 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
412 data->do_put = do_put;
415 for (i = 0; i < nr; i++) {
416 u64 *tag_slot = io_get_tag_slot(data, i);
418 if (copy_from_user(tag_slot, &utags[i],
424 atomic_set(&data->refs, 1);
425 init_completion(&data->done);
429 io_rsrc_data_free(data);
433 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
434 struct io_uring_rsrc_update2 *up,
437 u64 __user *tags = u64_to_user_ptr(up->tags);
438 __s32 __user *fds = u64_to_user_ptr(up->data);
439 struct io_rsrc_data *data = ctx->file_data;
440 struct io_fixed_file *file_slot;
444 bool needs_switch = false;
448 if (up->offset + nr_args > ctx->nr_user_files)
451 for (done = 0; done < nr_args; done++) {
454 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
455 copy_from_user(&fd, &fds[done], sizeof(fd))) {
459 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
463 if (fd == IORING_REGISTER_FILES_SKIP)
466 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
467 file_slot = io_fixed_file_slot(&ctx->file_table, i);
469 if (file_slot->file_ptr) {
470 file = (struct file *)(file_slot->file_ptr & FFS_MASK);
471 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
474 file_slot->file_ptr = 0;
475 io_file_bitmap_clear(&ctx->file_table, i);
485 * Don't allow io_uring instances to be registered. If
486 * UNIX isn't enabled, then this causes a reference
487 * cycle and this instance can never get freed. If UNIX
488 * is enabled we'll handle it just fine, but there's
489 * still no point in allowing a ring fd as it doesn't
490 * support regular read/write anyway.
492 if (io_is_uring_fops(file)) {
497 *io_get_tag_slot(data, i) = tag;
498 io_fixed_file_set(file_slot, file);
499 io_file_bitmap_set(&ctx->file_table, i);
504 io_rsrc_node_switch(ctx, data);
505 return done ? done : err;
508 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
509 struct io_uring_rsrc_update2 *up,
510 unsigned int nr_args)
512 u64 __user *tags = u64_to_user_ptr(up->tags);
513 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
514 struct page *last_hpage = NULL;
515 bool needs_switch = false;
521 if (up->offset + nr_args > ctx->nr_user_bufs)
524 for (done = 0; done < nr_args; done++) {
525 struct io_mapped_ubuf *imu;
526 int offset = up->offset + done;
529 err = io_copy_iov(ctx, &iov, iovs, done);
532 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
536 err = io_buffer_validate(&iov);
539 if (!iov.iov_base && tag) {
543 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
547 i = array_index_nospec(offset, ctx->nr_user_bufs);
548 if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
549 err = io_queue_rsrc_removal(ctx->buf_data, i,
550 ctx->rsrc_node, ctx->user_bufs[i]);
552 io_buffer_unmap(ctx, &imu);
555 ctx->user_bufs[i] = ctx->dummy_ubuf;
559 ctx->user_bufs[i] = imu;
560 *io_get_tag_slot(ctx->buf_data, i) = tag;
564 io_rsrc_node_switch(ctx, ctx->buf_data);
565 return done ? done : err;
568 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
569 struct io_uring_rsrc_update2 *up,
575 if (check_add_overflow(up->offset, nr_args, &tmp))
577 err = io_rsrc_node_switch_start(ctx);
582 case IORING_RSRC_FILE:
583 return __io_sqe_files_update(ctx, up, nr_args);
584 case IORING_RSRC_BUFFER:
585 return __io_sqe_buffers_update(ctx, up, nr_args);
590 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
593 struct io_uring_rsrc_update2 up;
597 memset(&up, 0, sizeof(up));
598 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
600 if (up.resv || up.resv2)
602 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
605 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
606 unsigned size, unsigned type)
608 struct io_uring_rsrc_update2 up;
610 if (size != sizeof(up))
612 if (copy_from_user(&up, arg, sizeof(up)))
614 if (!up.nr || up.resv || up.resv2)
616 return __io_register_rsrc_update(ctx, type, &up, up.nr);
619 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
620 unsigned int size, unsigned int type)
622 struct io_uring_rsrc_register rr;
624 /* keep it extendible */
625 if (size != sizeof(rr))
628 memset(&rr, 0, sizeof(rr));
629 if (copy_from_user(&rr, arg, size))
631 if (!rr.nr || rr.resv2)
633 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
637 case IORING_RSRC_FILE:
638 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
640 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
641 rr.nr, u64_to_user_ptr(rr.tags));
642 case IORING_RSRC_BUFFER:
643 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
645 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
646 rr.nr, u64_to_user_ptr(rr.tags));
651 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
653 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
655 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
657 if (sqe->rw_flags || sqe->splice_fd_in)
660 up->offset = READ_ONCE(sqe->off);
661 up->nr_args = READ_ONCE(sqe->len);
664 up->arg = READ_ONCE(sqe->addr);
668 static int io_files_update_with_index_alloc(struct io_kiocb *req,
669 unsigned int issue_flags)
671 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
672 __s32 __user *fds = u64_to_user_ptr(up->arg);
677 if (!req->ctx->file_data)
680 for (done = 0; done < up->nr_args; done++) {
681 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
691 ret = io_fixed_fd_install(req, issue_flags, file,
692 IORING_FILE_INDEX_ALLOC);
695 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
696 __io_close_fixed(req->ctx, issue_flags, ret);
707 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
709 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
710 struct io_ring_ctx *ctx = req->ctx;
711 struct io_uring_rsrc_update2 up2;
714 up2.offset = up->offset;
721 if (up->offset == IORING_FILE_INDEX_ALLOC) {
722 ret = io_files_update_with_index_alloc(req, issue_flags);
724 io_ring_submit_lock(ctx, issue_flags);
725 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
727 io_ring_submit_unlock(ctx, issue_flags);
732 io_req_set_res(req, ret, 0);
736 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
737 struct io_rsrc_node *node, void *rsrc)
739 u64 *tag_slot = io_get_tag_slot(data, idx);
740 struct io_rsrc_put *prsrc;
742 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
746 prsrc->tag = *tag_slot;
749 list_add(&prsrc->list, &node->rsrc_list);
753 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
757 for (i = 0; i < ctx->nr_user_files; i++) {
758 struct file *file = io_file_from_index(&ctx->file_table, i);
762 io_file_bitmap_clear(&ctx->file_table, i);
766 io_free_file_tables(&ctx->file_table);
767 io_file_table_set_alloc_range(ctx, 0, 0);
768 io_rsrc_data_free(ctx->file_data);
769 ctx->file_data = NULL;
770 ctx->nr_user_files = 0;
773 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
775 unsigned nr = ctx->nr_user_files;
782 * Quiesce may unlock ->uring_lock, and while it's not held
783 * prevent new requests using the table.
785 ctx->nr_user_files = 0;
786 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
787 ctx->nr_user_files = nr;
789 __io_sqe_files_unregister(ctx);
793 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
795 struct file *file = prsrc->file;
800 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
801 unsigned nr_args, u64 __user *tags)
803 __s32 __user *fds = (__s32 __user *) arg;
812 if (nr_args > IORING_MAX_FIXED_FILES)
814 if (nr_args > rlimit(RLIMIT_NOFILE))
816 ret = io_rsrc_node_switch_start(ctx);
819 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
824 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
825 io_rsrc_data_free(ctx->file_data);
826 ctx->file_data = NULL;
830 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
831 struct io_fixed_file *file_slot;
833 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
837 /* allow sparse sets */
838 if (!fds || fd == -1) {
840 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
851 * Don't allow io_uring instances to be registered.
853 if (io_is_uring_fops(file)) {
857 file_slot = io_fixed_file_slot(&ctx->file_table, i);
858 io_fixed_file_set(file_slot, file);
859 io_file_bitmap_set(&ctx->file_table, i);
862 /* default it to the whole table */
863 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
864 io_rsrc_node_switch(ctx, NULL);
867 __io_sqe_files_unregister(ctx);
871 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
873 io_buffer_unmap(ctx, &prsrc->buf);
877 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
881 for (i = 0; i < ctx->nr_user_bufs; i++)
882 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
883 kfree(ctx->user_bufs);
884 io_rsrc_data_free(ctx->buf_data);
885 ctx->user_bufs = NULL;
886 ctx->buf_data = NULL;
887 ctx->nr_user_bufs = 0;
890 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
892 unsigned nr = ctx->nr_user_bufs;
899 * Quiesce may unlock ->uring_lock, and while it's not held
900 * prevent new requests using the table.
902 ctx->nr_user_bufs = 0;
903 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
904 ctx->nr_user_bufs = nr;
906 __io_sqe_buffers_unregister(ctx);
911 * Not super efficient, but this is just a registration time. And we do cache
912 * the last compound head, so generally we'll only do a full search if we don't
915 * We check if the given compound head page has already been accounted, to
916 * avoid double accounting it. This allows us to account the full size of the
917 * page, not just the constituent pages of a huge page.
919 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
920 int nr_pages, struct page *hpage)
924 /* check current page array */
925 for (i = 0; i < nr_pages; i++) {
926 if (!PageCompound(pages[i]))
928 if (compound_head(pages[i]) == hpage)
932 /* check previously registered pages */
933 for (i = 0; i < ctx->nr_user_bufs; i++) {
934 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
936 for (j = 0; j < imu->nr_bvecs; j++) {
937 if (!PageCompound(imu->bvec[j].bv_page))
939 if (compound_head(imu->bvec[j].bv_page) == hpage)
947 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
948 int nr_pages, struct io_mapped_ubuf *imu,
949 struct page **last_hpage)
954 for (i = 0; i < nr_pages; i++) {
955 if (!PageCompound(pages[i])) {
960 hpage = compound_head(pages[i]);
961 if (hpage == *last_hpage)
964 if (headpage_already_acct(ctx, pages, i, hpage))
966 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
970 if (!imu->acct_pages)
973 ret = io_account_mem(ctx, imu->acct_pages);
979 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
981 unsigned long start, end, nr_pages;
982 struct vm_area_struct **vmas = NULL;
983 struct page **pages = NULL;
984 int i, pret, ret = -ENOMEM;
986 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
987 start = ubuf >> PAGE_SHIFT;
988 nr_pages = end - start;
990 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
994 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1000 mmap_read_lock(current->mm);
1001 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1003 if (pret == nr_pages) {
1004 struct file *file = vmas[0]->vm_file;
1006 /* don't support file backed memory */
1007 for (i = 0; i < nr_pages; i++) {
1008 if (vmas[i]->vm_file != file) {
1014 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1021 ret = pret < 0 ? pret : -EFAULT;
1023 mmap_read_unlock(current->mm);
1026 * if we did partial map, or found file backed vmas,
1027 * release any pages we did get
1030 unpin_user_pages(pages, pret);
1038 pages = ERR_PTR(ret);
1043 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1044 struct io_mapped_ubuf **pimu,
1045 struct page **last_hpage)
1047 struct io_mapped_ubuf *imu = NULL;
1048 struct page **pages = NULL;
1051 int ret, nr_pages, i;
1053 *pimu = ctx->dummy_ubuf;
1058 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1060 if (IS_ERR(pages)) {
1061 ret = PTR_ERR(pages);
1066 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1070 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1072 unpin_user_pages(pages, nr_pages);
1076 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1077 size = iov->iov_len;
1078 for (i = 0; i < nr_pages; i++) {
1081 vec_len = min_t(size_t, size, PAGE_SIZE - off);
1082 imu->bvec[i].bv_page = pages[i];
1083 imu->bvec[i].bv_len = vec_len;
1084 imu->bvec[i].bv_offset = off;
1088 /* store original address for later verification */
1089 imu->ubuf = (unsigned long) iov->iov_base;
1090 imu->ubuf_end = imu->ubuf + iov->iov_len;
1091 imu->nr_bvecs = nr_pages;
1101 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1103 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1104 return ctx->user_bufs ? 0 : -ENOMEM;
1107 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1108 unsigned int nr_args, u64 __user *tags)
1110 struct page *last_hpage = NULL;
1111 struct io_rsrc_data *data;
1115 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1119 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1121 ret = io_rsrc_node_switch_start(ctx);
1124 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1127 ret = io_buffers_map_alloc(ctx, nr_args);
1129 io_rsrc_data_free(data);
1133 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1135 ret = io_copy_iov(ctx, &iov, arg, i);
1138 ret = io_buffer_validate(&iov);
1142 memset(&iov, 0, sizeof(iov));
1145 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1150 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1156 WARN_ON_ONCE(ctx->buf_data);
1158 ctx->buf_data = data;
1160 __io_sqe_buffers_unregister(ctx);
1162 io_rsrc_node_switch(ctx, NULL);
1166 int io_import_fixed(int ddir, struct iov_iter *iter,
1167 struct io_mapped_ubuf *imu,
1168 u64 buf_addr, size_t len)
1173 if (WARN_ON_ONCE(!imu))
1175 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1177 /* not inside the mapped region */
1178 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1182 * May not be a start of buffer, set size appropriately
1183 * and advance us to the beginning.
1185 offset = buf_addr - imu->ubuf;
1186 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1190 * Don't use iov_iter_advance() here, as it's really slow for
1191 * using the latter parts of a big fixed buffer - it iterates
1192 * over each segment manually. We can cheat a bit here, because
1195 * 1) it's a BVEC iter, we set it up
1196 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1197 * first and last bvec
1199 * So just find our index, and adjust the iterator afterwards.
1200 * If the offset is within the first bvec (or the whole first
1201 * bvec, just use iov_iter_advance(). This makes it easier
1202 * since we can just skip the first segment, which may not
1203 * be PAGE_SIZE aligned.
1205 const struct bio_vec *bvec = imu->bvec;
1207 if (offset < bvec->bv_len) {
1208 iov_iter_advance(iter, offset);
1210 unsigned long seg_skip;
1212 /* skip first vec */
1213 offset -= bvec->bv_len;
1214 seg_skip = 1 + (offset >> PAGE_SHIFT);
1216 iter->bvec = bvec + seg_skip;
1217 iter->nr_segs -= seg_skip;
1218 iter->count -= bvec->bv_len + offset;
1219 iter->iov_offset = offset & ~PAGE_MASK;