1 // SPDX-License-Identifier: GPL-2.0-only
3 * Helpers for the host side of a virtio ring.
5 * Since these may be in userspace, we use (inline) accessors.
7 #include <linux/compiler.h>
8 #include <linux/module.h>
9 #include <linux/vringh.h>
10 #include <linux/virtio_ring.h>
11 #include <linux/kernel.h>
12 #include <linux/ratelimit.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <uapi/linux/virtio_config.h>
18 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
20 static DEFINE_RATELIMIT_STATE(vringh_rs,
21 DEFAULT_RATELIMIT_INTERVAL,
22 DEFAULT_RATELIMIT_BURST);
23 if (__ratelimit(&vringh_rs)) {
26 printk(KERN_NOTICE "vringh:");
32 /* Returns vring->num if empty, -ve on error. */
33 static inline int __vringh_get_head(const struct vringh *vrh,
34 int (*getu16)(const struct vringh *vrh,
35 u16 *val, const __virtio16 *p),
38 u16 avail_idx, i, head;
41 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
43 vringh_bad("Failed to access avail idx at %p",
44 &vrh->vring.avail->idx);
48 if (*last_avail_idx == avail_idx)
49 return vrh->vring.num;
51 /* Only get avail ring entries after they have been exposed by guest. */
52 virtio_rmb(vrh->weak_barriers);
54 i = *last_avail_idx & (vrh->vring.num - 1);
56 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
58 vringh_bad("Failed to read head: idx %d address %p",
59 *last_avail_idx, &vrh->vring.avail->ring[i]);
63 if (head >= vrh->vring.num) {
64 vringh_bad("Guest says index %u > %u is available",
65 head, vrh->vring.num);
73 /* Copy some bytes to/from the iovec. Returns num copied. */
74 static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
75 void *ptr, size_t len,
76 int (*xfer)(void *addr, void *ptr,
81 while (len && iov->i < iov->used) {
84 partlen = min(iov->iov[iov->i].iov_len, len);
85 err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
91 iov->consumed += partlen;
92 iov->iov[iov->i].iov_len -= partlen;
93 iov->iov[iov->i].iov_base += partlen;
95 if (!iov->iov[iov->i].iov_len) {
96 /* Fix up old iov element then increment. */
97 iov->iov[iov->i].iov_len = iov->consumed;
98 iov->iov[iov->i].iov_base -= iov->consumed;
107 /* May reduce *len if range is shorter. */
108 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
109 struct vringh_range *range,
110 bool (*getrange)(struct vringh *,
111 u64, struct vringh_range *))
113 if (addr < range->start || addr > range->end_incl) {
114 if (!getrange(vrh, addr, range))
117 BUG_ON(addr < range->start || addr > range->end_incl);
119 /* To end of memory? */
120 if (unlikely(addr + *len == 0)) {
121 if (range->end_incl == -1ULL)
126 /* Otherwise, don't wrap. */
127 if (addr + *len < addr) {
128 vringh_bad("Wrapping descriptor %zu@0x%llx",
129 *len, (unsigned long long)addr);
133 if (unlikely(addr + *len - 1 > range->end_incl))
138 *len = range->end_incl + 1 - addr;
142 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
143 struct vringh_range *range,
144 bool (*getrange)(struct vringh *,
145 u64, struct vringh_range *))
150 /* No reason for this code to be inline. */
151 static int move_to_indirect(const struct vringh *vrh,
152 int *up_next, u16 *i, void *addr,
153 const struct vring_desc *desc,
154 struct vring_desc **descs, int *desc_max)
158 /* Indirect tables can't have indirect. */
159 if (*up_next != -1) {
160 vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
164 len = vringh32_to_cpu(vrh, desc->len);
165 if (unlikely(len % sizeof(struct vring_desc))) {
166 vringh_bad("Strange indirect len %u", desc->len);
170 /* We will check this when we follow it! */
171 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
172 *up_next = vringh16_to_cpu(vrh, desc->next);
176 *desc_max = len / sizeof(struct vring_desc);
178 /* Now, start at the first indirect. */
183 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
186 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
191 flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
193 new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
195 new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
197 memcpy(new, iov->iov,
198 iov->max_num * sizeof(struct iovec));
199 flag = VRINGH_IOV_ALLOCATED;
205 iov->max_num = (new_num | flag);
209 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
210 struct vring_desc **descs, int *desc_max)
215 *descs = vrh->vring.desc;
216 *desc_max = vrh->vring.num;
220 static int slow_copy(struct vringh *vrh, void *dst, const void *src,
221 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
222 struct vringh_range *range,
223 bool (*getrange)(struct vringh *vrh,
225 struct vringh_range *)),
226 bool (*getrange)(struct vringh *vrh,
228 struct vringh_range *r),
229 struct vringh_range *range,
230 int (*copy)(void *dst, const void *src, size_t len))
232 size_t part, len = sizeof(struct vring_desc);
239 addr = (u64)(unsigned long)src - range->offset;
241 if (!rcheck(vrh, addr, &part, range, getrange))
244 err = copy(dst, src, part);
256 __vringh_iov(struct vringh *vrh, u16 i,
257 struct vringh_kiov *riov,
258 struct vringh_kiov *wiov,
259 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
260 struct vringh_range *range,
261 bool (*getrange)(struct vringh *, u64,
262 struct vringh_range *)),
263 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
265 int (*copy)(void *dst, const void *src, size_t len))
267 int err, count = 0, indirect_count = 0, up_next, desc_max;
268 struct vring_desc desc, *descs;
269 struct vringh_range range = { -1ULL, 0 }, slowrange;
272 /* We start traversing vring's descriptor table. */
273 descs = vrh->vring.desc;
274 desc_max = vrh->vring.num;
277 /* You must want something! */
278 if (WARN_ON(!riov && !wiov))
282 riov->i = riov->used = 0;
284 wiov->i = wiov->used = 0;
288 struct vringh_kiov *iov;
292 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
295 err = copy(&desc, &descs[i], sizeof(desc));
299 if (unlikely(desc.flags &
300 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
301 u64 a = vringh64_to_cpu(vrh, desc.addr);
303 /* Make sure it's OK, and get offset. */
304 len = vringh32_to_cpu(vrh, desc.len);
305 if (!rcheck(vrh, a, &len, &range, getrange)) {
310 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
312 /* We need to save this range to use offset */
316 addr = (void *)(long)(a + range.offset);
317 err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
329 if (count > vrh->vring.num || indirect_count > desc_max) {
330 vringh_bad("Descriptor loop in %p", descs);
335 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
339 if (unlikely(wiov && wiov->used)) {
340 vringh_bad("Readable desc %p after writable",
348 vringh_bad("Unexpected %s desc",
349 !wiov ? "writable" : "readable");
355 /* Make sure it's OK, and get offset. */
356 len = vringh32_to_cpu(vrh, desc.len);
357 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
362 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
365 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
366 err = resize_iovec(iov, gfp);
371 iov->iov[iov->used].iov_base = addr;
372 iov->iov[iov->used].iov_len = len;
375 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
376 desc.len = cpu_to_vringh32(vrh,
377 vringh32_to_cpu(vrh, desc.len) - len);
378 desc.addr = cpu_to_vringh64(vrh,
379 vringh64_to_cpu(vrh, desc.addr) + len);
383 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
384 i = vringh16_to_cpu(vrh, desc.next);
386 /* Just in case we need to finish traversing above. */
387 if (unlikely(up_next > 0)) {
388 i = return_from_indirect(vrh, &up_next,
397 vringh_bad("Chained index %u > %u", i, desc_max);
409 static inline int __vringh_complete(struct vringh *vrh,
410 const struct vring_used_elem *used,
411 unsigned int num_used,
412 int (*putu16)(const struct vringh *vrh,
413 __virtio16 *p, u16 val),
414 int (*putused)(struct vring_used_elem *dst,
415 const struct vring_used_elem
418 struct vring_used *used_ring;
422 used_ring = vrh->vring.used;
423 used_idx = vrh->last_used_idx + vrh->completed;
425 off = used_idx % vrh->vring.num;
427 /* Compiler knows num_used == 1 sometimes, hence extra check */
428 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
429 u16 part = vrh->vring.num - off;
430 err = putused(&used_ring->ring[off], used, part);
432 err = putused(&used_ring->ring[0], used + part,
435 err = putused(&used_ring->ring[off], used, num_used);
438 vringh_bad("Failed to write %u used entries %u at %p",
439 num_used, off, &used_ring->ring[off]);
443 /* Make sure buffer is written before we update index. */
444 virtio_wmb(vrh->weak_barriers);
446 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
448 vringh_bad("Failed to update used index at %p",
449 &vrh->vring.used->idx);
453 vrh->completed += num_used;
458 static inline int __vringh_need_notify(struct vringh *vrh,
459 int (*getu16)(const struct vringh *vrh,
461 const __virtio16 *p))
467 /* Flush out used index update. This is paired with the
468 * barrier that the Guest executes when enabling
470 virtio_mb(vrh->weak_barriers);
472 /* Old-style, without event indices. */
473 if (!vrh->event_indices) {
475 err = getu16(vrh, &flags, &vrh->vring.avail->flags);
477 vringh_bad("Failed to get flags at %p",
478 &vrh->vring.avail->flags);
481 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
484 /* Modern: we know when other side wants to know. */
485 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
487 vringh_bad("Failed to get used event idx at %p",
488 &vring_used_event(&vrh->vring));
492 /* Just in case we added so many that we wrap. */
493 if (unlikely(vrh->completed > 0xffff))
496 notify = vring_need_event(used_event,
497 vrh->last_used_idx + vrh->completed,
500 vrh->last_used_idx += vrh->completed;
505 static inline bool __vringh_notify_enable(struct vringh *vrh,
506 int (*getu16)(const struct vringh *vrh,
507 u16 *val, const __virtio16 *p),
508 int (*putu16)(const struct vringh *vrh,
509 __virtio16 *p, u16 val))
513 if (!vrh->event_indices) {
514 /* Old-school; update flags. */
515 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
516 vringh_bad("Clearing used flags %p",
517 &vrh->vring.used->flags);
521 if (putu16(vrh, &vring_avail_event(&vrh->vring),
522 vrh->last_avail_idx) != 0) {
523 vringh_bad("Updating avail event index %p",
524 &vring_avail_event(&vrh->vring));
529 /* They could have slipped one in as we were doing that: make
530 * sure it's written, then check again. */
531 virtio_mb(vrh->weak_barriers);
533 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
534 vringh_bad("Failed to check avail idx at %p",
535 &vrh->vring.avail->idx);
539 /* This is unlikely, so we just leave notifications enabled
540 * (if we're using event_indices, we'll only get one
541 * notification anyway). */
542 return avail == vrh->last_avail_idx;
545 static inline void __vringh_notify_disable(struct vringh *vrh,
546 int (*putu16)(const struct vringh *vrh,
547 __virtio16 *p, u16 val))
549 if (!vrh->event_indices) {
550 /* Old-school; update flags. */
551 if (putu16(vrh, &vrh->vring.used->flags,
552 VRING_USED_F_NO_NOTIFY)) {
553 vringh_bad("Setting used flags %p",
554 &vrh->vring.used->flags);
559 /* Userspace access helpers: in this case, addresses are really userspace. */
560 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
563 int rc = get_user(v, (__force __virtio16 __user *)p);
564 *val = vringh16_to_cpu(vrh, v);
568 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
570 __virtio16 v = cpu_to_vringh16(vrh, val);
571 return put_user(v, (__force __virtio16 __user *)p);
574 static inline int copydesc_user(void *dst, const void *src, size_t len)
576 return copy_from_user(dst, (__force void __user *)src, len) ?
580 static inline int putused_user(struct vring_used_elem *dst,
581 const struct vring_used_elem *src,
584 return copy_to_user((__force void __user *)dst, src,
585 sizeof(*dst) * num) ? -EFAULT : 0;
588 static inline int xfer_from_user(void *src, void *dst, size_t len)
590 return copy_from_user(dst, (__force void __user *)src, len) ?
594 static inline int xfer_to_user(void *dst, void *src, size_t len)
596 return copy_to_user((__force void __user *)dst, src, len) ?
601 * vringh_init_user - initialize a vringh for a userspace vring.
602 * @vrh: the vringh to initialize.
603 * @features: the feature bits for this ring.
604 * @num: the number of elements.
605 * @weak_barriers: true if we only need memory barriers, not I/O.
606 * @desc: the userpace descriptor pointer.
607 * @avail: the userpace avail pointer.
608 * @used: the userpace used pointer.
610 * Returns an error if num is invalid: you should check pointers
613 int vringh_init_user(struct vringh *vrh, u64 features,
614 unsigned int num, bool weak_barriers,
615 struct vring_desc __user *desc,
616 struct vring_avail __user *avail,
617 struct vring_used __user *used)
619 /* Sane power of 2 please! */
620 if (!num || num > 0xffff || (num & (num - 1))) {
621 vringh_bad("Bad ring size %u", num);
625 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
626 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
627 vrh->weak_barriers = weak_barriers;
629 vrh->last_avail_idx = 0;
630 vrh->last_used_idx = 0;
631 vrh->vring.num = num;
632 /* vring expects kernel addresses, but only used via accessors. */
633 vrh->vring.desc = (__force struct vring_desc *)desc;
634 vrh->vring.avail = (__force struct vring_avail *)avail;
635 vrh->vring.used = (__force struct vring_used *)used;
638 EXPORT_SYMBOL(vringh_init_user);
641 * vringh_getdesc_user - get next available descriptor from userspace ring.
642 * @vrh: the userspace vring.
643 * @riov: where to put the readable descriptors (or NULL)
644 * @wiov: where to put the writable descriptors (or NULL)
645 * @getrange: function to call to check ranges.
646 * @head: head index we received, for passing to vringh_complete_user().
648 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
650 * Note that on error return, you can tell the difference between an
651 * invalid ring and a single invalid descriptor: in the former case,
652 * *head will be vrh->vring.num. You may be able to ignore an invalid
653 * descriptor, but there's not much you can do with an invalid ring.
655 * Note that you may need to clean up riov and wiov, even on error!
657 int vringh_getdesc_user(struct vringh *vrh,
658 struct vringh_iov *riov,
659 struct vringh_iov *wiov,
660 bool (*getrange)(struct vringh *vrh,
661 u64 addr, struct vringh_range *r),
666 *head = vrh->vring.num;
667 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
672 if (err == vrh->vring.num)
675 /* We need the layouts to be the identical for this to work */
676 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
677 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
678 offsetof(struct vringh_iov, iov));
679 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
680 offsetof(struct vringh_iov, i));
681 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
682 offsetof(struct vringh_iov, used));
683 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
684 offsetof(struct vringh_iov, max_num));
685 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
686 BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
687 offsetof(struct kvec, iov_base));
688 BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
689 offsetof(struct kvec, iov_len));
690 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
691 != sizeof(((struct kvec *)NULL)->iov_base));
692 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
693 != sizeof(((struct kvec *)NULL)->iov_len));
696 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
697 (struct vringh_kiov *)wiov,
698 range_check, getrange, GFP_KERNEL, copydesc_user);
704 EXPORT_SYMBOL(vringh_getdesc_user);
707 * vringh_iov_pull_user - copy bytes from vring_iov.
708 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
709 * @dst: the place to copy.
710 * @len: the maximum length to copy.
712 * Returns the bytes copied <= len or a negative errno.
714 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
716 return vringh_iov_xfer((struct vringh_kiov *)riov,
717 dst, len, xfer_from_user);
719 EXPORT_SYMBOL(vringh_iov_pull_user);
722 * vringh_iov_push_user - copy bytes into vring_iov.
723 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
724 * @dst: the place to copy.
725 * @len: the maximum length to copy.
727 * Returns the bytes copied <= len or a negative errno.
729 ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
730 const void *src, size_t len)
732 return vringh_iov_xfer((struct vringh_kiov *)wiov,
733 (void *)src, len, xfer_to_user);
735 EXPORT_SYMBOL(vringh_iov_push_user);
738 * vringh_abandon_user - we've decided not to handle the descriptor(s).
740 * @num: the number of descriptors to put back (ie. num
741 * vringh_get_user() to undo).
743 * The next vringh_get_user() will return the old descriptor(s) again.
745 void vringh_abandon_user(struct vringh *vrh, unsigned int num)
747 /* We only update vring_avail_event(vr) when we want to be notified,
748 * so we haven't changed that yet. */
749 vrh->last_avail_idx -= num;
751 EXPORT_SYMBOL(vringh_abandon_user);
754 * vringh_complete_user - we've finished with descriptor, publish it.
756 * @head: the head as filled in by vringh_getdesc_user.
757 * @len: the length of data we have written.
759 * You should check vringh_need_notify_user() after one or more calls
762 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
764 struct vring_used_elem used;
766 used.id = cpu_to_vringh32(vrh, head);
767 used.len = cpu_to_vringh32(vrh, len);
768 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
770 EXPORT_SYMBOL(vringh_complete_user);
773 * vringh_complete_multi_user - we've finished with many descriptors.
775 * @used: the head, length pairs.
776 * @num_used: the number of used elements.
778 * You should check vringh_need_notify_user() after one or more calls
781 int vringh_complete_multi_user(struct vringh *vrh,
782 const struct vring_used_elem used[],
785 return __vringh_complete(vrh, used, num_used,
786 putu16_user, putused_user);
788 EXPORT_SYMBOL(vringh_complete_multi_user);
791 * vringh_notify_enable_user - we want to know if something changes.
794 * This always enables notifications, but returns false if there are
795 * now more buffers available in the vring.
797 bool vringh_notify_enable_user(struct vringh *vrh)
799 return __vringh_notify_enable(vrh, getu16_user, putu16_user);
801 EXPORT_SYMBOL(vringh_notify_enable_user);
804 * vringh_notify_disable_user - don't tell us if something changes.
807 * This is our normal running state: we disable and then only enable when
808 * we're going to sleep.
810 void vringh_notify_disable_user(struct vringh *vrh)
812 __vringh_notify_disable(vrh, putu16_user);
814 EXPORT_SYMBOL(vringh_notify_disable_user);
817 * vringh_need_notify_user - must we tell the other side about used buffers?
818 * @vrh: the vring we've called vringh_complete_user() on.
820 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
822 int vringh_need_notify_user(struct vringh *vrh)
824 return __vringh_need_notify(vrh, getu16_user);
826 EXPORT_SYMBOL(vringh_need_notify_user);
828 /* Kernelspace access helpers. */
829 static inline int getu16_kern(const struct vringh *vrh,
830 u16 *val, const __virtio16 *p)
832 *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
836 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
838 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
842 static inline int copydesc_kern(void *dst, const void *src, size_t len)
844 memcpy(dst, src, len);
848 static inline int putused_kern(struct vring_used_elem *dst,
849 const struct vring_used_elem *src,
852 memcpy(dst, src, num * sizeof(*dst));
856 static inline int xfer_kern(void *src, void *dst, size_t len)
858 memcpy(dst, src, len);
862 static inline int kern_xfer(void *dst, void *src, size_t len)
864 memcpy(dst, src, len);
869 * vringh_init_kern - initialize a vringh for a kernelspace vring.
870 * @vrh: the vringh to initialize.
871 * @features: the feature bits for this ring.
872 * @num: the number of elements.
873 * @weak_barriers: true if we only need memory barriers, not I/O.
874 * @desc: the userpace descriptor pointer.
875 * @avail: the userpace avail pointer.
876 * @used: the userpace used pointer.
878 * Returns an error if num is invalid.
880 int vringh_init_kern(struct vringh *vrh, u64 features,
881 unsigned int num, bool weak_barriers,
882 struct vring_desc *desc,
883 struct vring_avail *avail,
884 struct vring_used *used)
886 /* Sane power of 2 please! */
887 if (!num || num > 0xffff || (num & (num - 1))) {
888 vringh_bad("Bad ring size %u", num);
892 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
893 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
894 vrh->weak_barriers = weak_barriers;
896 vrh->last_avail_idx = 0;
897 vrh->last_used_idx = 0;
898 vrh->vring.num = num;
899 vrh->vring.desc = desc;
900 vrh->vring.avail = avail;
901 vrh->vring.used = used;
904 EXPORT_SYMBOL(vringh_init_kern);
907 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
908 * @vrh: the kernelspace vring.
909 * @riov: where to put the readable descriptors (or NULL)
910 * @wiov: where to put the writable descriptors (or NULL)
911 * @head: head index we received, for passing to vringh_complete_kern().
912 * @gfp: flags for allocating larger riov/wiov.
914 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
916 * Note that on error return, you can tell the difference between an
917 * invalid ring and a single invalid descriptor: in the former case,
918 * *head will be vrh->vring.num. You may be able to ignore an invalid
919 * descriptor, but there's not much you can do with an invalid ring.
921 * Note that you may need to clean up riov and wiov, even on error!
923 int vringh_getdesc_kern(struct vringh *vrh,
924 struct vringh_kiov *riov,
925 struct vringh_kiov *wiov,
931 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
936 if (err == vrh->vring.num)
940 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
947 EXPORT_SYMBOL(vringh_getdesc_kern);
950 * vringh_iov_pull_kern - copy bytes from vring_iov.
951 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
952 * @dst: the place to copy.
953 * @len: the maximum length to copy.
955 * Returns the bytes copied <= len or a negative errno.
957 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
959 return vringh_iov_xfer(riov, dst, len, xfer_kern);
961 EXPORT_SYMBOL(vringh_iov_pull_kern);
964 * vringh_iov_push_kern - copy bytes into vring_iov.
965 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
966 * @dst: the place to copy.
967 * @len: the maximum length to copy.
969 * Returns the bytes copied <= len or a negative errno.
971 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
972 const void *src, size_t len)
974 return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
976 EXPORT_SYMBOL(vringh_iov_push_kern);
979 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
981 * @num: the number of descriptors to put back (ie. num
982 * vringh_get_kern() to undo).
984 * The next vringh_get_kern() will return the old descriptor(s) again.
986 void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
988 /* We only update vring_avail_event(vr) when we want to be notified,
989 * so we haven't changed that yet. */
990 vrh->last_avail_idx -= num;
992 EXPORT_SYMBOL(vringh_abandon_kern);
995 * vringh_complete_kern - we've finished with descriptor, publish it.
997 * @head: the head as filled in by vringh_getdesc_kern.
998 * @len: the length of data we have written.
1000 * You should check vringh_need_notify_kern() after one or more calls
1003 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1005 struct vring_used_elem used;
1007 used.id = cpu_to_vringh32(vrh, head);
1008 used.len = cpu_to_vringh32(vrh, len);
1010 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1012 EXPORT_SYMBOL(vringh_complete_kern);
1015 * vringh_notify_enable_kern - we want to know if something changes.
1018 * This always enables notifications, but returns false if there are
1019 * now more buffers available in the vring.
1021 bool vringh_notify_enable_kern(struct vringh *vrh)
1023 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1025 EXPORT_SYMBOL(vringh_notify_enable_kern);
1028 * vringh_notify_disable_kern - don't tell us if something changes.
1031 * This is our normal running state: we disable and then only enable when
1032 * we're going to sleep.
1034 void vringh_notify_disable_kern(struct vringh *vrh)
1036 __vringh_notify_disable(vrh, putu16_kern);
1038 EXPORT_SYMBOL(vringh_notify_disable_kern);
1041 * vringh_need_notify_kern - must we tell the other side about used buffers?
1042 * @vrh: the vring we've called vringh_complete_kern() on.
1044 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1046 int vringh_need_notify_kern(struct vringh *vrh)
1048 return __vringh_need_notify(vrh, getu16_kern);
1050 EXPORT_SYMBOL(vringh_need_notify_kern);
1052 MODULE_LICENSE("GPL");