1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
4 * Copyright 2007 Rusty Russell IBM Corporation
6 #include <linux/virtio.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/virtio_config.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/hrtimer.h>
13 #include <linux/dma-mapping.h>
17 /* For development, we want to crash whenever the ring is screwed. */
18 #define BAD_RING(_vq, fmt, args...) \
20 dev_err(&(_vq)->vq.vdev->dev, \
21 "%s:"fmt, (_vq)->vq.name, ##args); \
24 /* Caller is supposed to guarantee no reentry. */
25 #define START_USE(_vq) \
28 panic("%s:in_use = %i\n", \
29 (_vq)->vq.name, (_vq)->in_use); \
30 (_vq)->in_use = __LINE__; \
32 #define END_USE(_vq) \
33 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34 #define LAST_ADD_TIME_UPDATE(_vq) \
36 ktime_t now = ktime_get(); \
38 /* No kick or get, with .1 second between? Warn. */ \
39 if ((_vq)->last_add_time_valid) \
40 WARN_ON(ktime_to_ms(ktime_sub(now, \
41 (_vq)->last_add_time)) > 100); \
42 (_vq)->last_add_time = now; \
43 (_vq)->last_add_time_valid = true; \
45 #define LAST_ADD_TIME_CHECK(_vq) \
47 if ((_vq)->last_add_time_valid) { \
48 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49 (_vq)->last_add_time)) > 100); \
52 #define LAST_ADD_TIME_INVALID(_vq) \
53 ((_vq)->last_add_time_valid = false)
55 #define BAD_RING(_vq, fmt, args...) \
57 dev_err(&_vq->vq.vdev->dev, \
58 "%s:"fmt, (_vq)->vq.name, ##args); \
59 (_vq)->broken = true; \
63 #define LAST_ADD_TIME_UPDATE(vq)
64 #define LAST_ADD_TIME_CHECK(vq)
65 #define LAST_ADD_TIME_INVALID(vq)
68 struct vring_desc_state_split {
69 void *data; /* Data for callback. */
70 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
73 struct vring_desc_state_packed {
74 void *data; /* Data for callback. */
75 struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76 u16 num; /* Descriptor list length. */
77 u16 next; /* The next desc state in a list. */
78 u16 last; /* The last desc state in a list. */
81 struct vring_desc_extra_packed {
82 dma_addr_t addr; /* Buffer DMA addr. */
83 u32 len; /* Buffer length. */
84 u16 flags; /* Descriptor flags. */
87 struct vring_virtqueue {
90 /* Is this a packed ring? */
93 /* Is DMA API used? */
96 /* Can we use weak barriers? */
99 /* Other side has made a mess, don't try any more. */
102 /* Host supports indirect buffers */
105 /* Host publishes avail event idx */
108 /* Head of free buffer list. */
109 unsigned int free_head;
110 /* Number we've added since last sync. */
111 unsigned int num_added;
113 /* Last used index we've seen. */
117 /* Available for split ring */
119 /* Actual memory layout for this queue. */
122 /* Last written value to avail->flags */
123 u16 avail_flags_shadow;
126 * Last written value to avail->idx in
129 u16 avail_idx_shadow;
131 /* Per-descriptor state. */
132 struct vring_desc_state_split *desc_state;
134 /* DMA address and size information */
135 dma_addr_t queue_dma_addr;
136 size_t queue_size_in_bytes;
139 /* Available for packed ring */
141 /* Actual memory layout for this queue. */
144 struct vring_packed_desc *desc;
145 struct vring_packed_desc_event *driver;
146 struct vring_packed_desc_event *device;
149 /* Driver ring wrap counter. */
150 bool avail_wrap_counter;
152 /* Device ring wrap counter. */
153 bool used_wrap_counter;
155 /* Avail used flags. */
156 u16 avail_used_flags;
158 /* Index of the next avail descriptor. */
162 * Last written value to driver->flags in
165 u16 event_flags_shadow;
167 /* Per-descriptor state. */
168 struct vring_desc_state_packed *desc_state;
169 struct vring_desc_extra_packed *desc_extra;
171 /* DMA address and size information */
172 dma_addr_t ring_dma_addr;
173 dma_addr_t driver_event_dma_addr;
174 dma_addr_t device_event_dma_addr;
175 size_t ring_size_in_bytes;
176 size_t event_size_in_bytes;
180 /* How to notify other side. FIXME: commonalize hcalls! */
181 bool (*notify)(struct virtqueue *vq);
183 /* DMA, allocation, and size information */
187 /* They're supposed to lock for us. */
190 /* Figure out if their kicks are too delayed. */
191 bool last_add_time_valid;
192 ktime_t last_add_time;
201 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
203 static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
204 unsigned int total_sg)
206 struct vring_virtqueue *vq = to_vvq(_vq);
209 * If the host supports indirect descriptor tables, and we have multiple
210 * buffers, then go indirect. FIXME: tune this threshold
212 return (vq->indirect && total_sg > 1 && vq->vq.num_free);
216 * Modern virtio devices have feature bits to specify whether they need a
217 * quirk and bypass the IOMMU. If not there, just use the DMA API.
219 * If there, the interaction between virtio and DMA API is messy.
221 * On most systems with virtio, physical addresses match bus addresses,
222 * and it doesn't particularly matter whether we use the DMA API.
224 * On some systems, including Xen and any system with a physical device
225 * that speaks virtio behind a physical IOMMU, we must use the DMA API
226 * for virtio DMA to work at all.
228 * On other systems, including SPARC and PPC64, virtio-pci devices are
229 * enumerated as though they are behind an IOMMU, but the virtio host
230 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
231 * there or somehow map everything as the identity.
233 * For the time being, we preserve historic behavior and bypass the DMA
236 * TODO: install a per-device DMA ops structure that does the right thing
237 * taking into account all the above quirks, and use the DMA API
238 * unconditionally on data path.
241 static bool vring_use_dma_api(struct virtio_device *vdev)
243 if (!virtio_has_dma_quirk(vdev))
246 /* Otherwise, we are left to guess. */
248 * In theory, it's possible to have a buggy QEMU-supposed
249 * emulated Q35 IOMMU and Xen enabled at the same time. On
250 * such a configuration, virtio has never worked and will
251 * not work without an even larger kludge. Instead, enable
252 * the DMA API if we're a Xen guest, which at least allows
253 * all of the sensible Xen configurations to work correctly.
261 size_t virtio_max_dma_size(struct virtio_device *vdev)
263 size_t max_segment_size = SIZE_MAX;
265 if (vring_use_dma_api(vdev))
266 max_segment_size = dma_max_mapping_size(vdev->dev.parent);
268 return max_segment_size;
270 EXPORT_SYMBOL_GPL(virtio_max_dma_size);
272 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
273 dma_addr_t *dma_handle, gfp_t flag)
275 if (vring_use_dma_api(vdev)) {
276 return dma_alloc_coherent(vdev->dev.parent, size,
279 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
282 phys_addr_t phys_addr = virt_to_phys(queue);
283 *dma_handle = (dma_addr_t)phys_addr;
286 * Sanity check: make sure we dind't truncate
287 * the address. The only arches I can find that
288 * have 64-bit phys_addr_t but 32-bit dma_addr_t
289 * are certain non-highmem MIPS and x86
290 * configurations, but these configurations
291 * should never allocate physical pages above 32
292 * bits, so this is fine. Just in case, throw a
293 * warning and abort if we end up with an
294 * unrepresentable address.
296 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
297 free_pages_exact(queue, PAGE_ALIGN(size));
305 static void vring_free_queue(struct virtio_device *vdev, size_t size,
306 void *queue, dma_addr_t dma_handle)
308 if (vring_use_dma_api(vdev))
309 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
311 free_pages_exact(queue, PAGE_ALIGN(size));
315 * The DMA ops on various arches are rather gnarly right now, and
316 * making all of the arch DMA ops work on the vring device itself
317 * is a mess. For now, we use the parent device for DMA ops.
319 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
321 return vq->vq.vdev->dev.parent;
324 /* Map one sg entry. */
325 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
326 struct scatterlist *sg,
327 enum dma_data_direction direction)
329 if (!vq->use_dma_api)
330 return (dma_addr_t)sg_phys(sg);
333 * We can't use dma_map_sg, because we don't use scatterlists in
334 * the way it expects (we don't guarantee that the scatterlist
335 * will exist for the lifetime of the mapping).
337 return dma_map_page(vring_dma_dev(vq),
338 sg_page(sg), sg->offset, sg->length,
342 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
343 void *cpu_addr, size_t size,
344 enum dma_data_direction direction)
346 if (!vq->use_dma_api)
347 return (dma_addr_t)virt_to_phys(cpu_addr);
349 return dma_map_single(vring_dma_dev(vq),
350 cpu_addr, size, direction);
353 static int vring_mapping_error(const struct vring_virtqueue *vq,
356 if (!vq->use_dma_api)
359 return dma_mapping_error(vring_dma_dev(vq), addr);
364 * Split ring specific functions - *_split().
367 static void vring_unmap_one_split(const struct vring_virtqueue *vq,
368 struct vring_desc *desc)
372 if (!vq->use_dma_api)
375 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
377 if (flags & VRING_DESC_F_INDIRECT) {
378 dma_unmap_single(vring_dma_dev(vq),
379 virtio64_to_cpu(vq->vq.vdev, desc->addr),
380 virtio32_to_cpu(vq->vq.vdev, desc->len),
381 (flags & VRING_DESC_F_WRITE) ?
382 DMA_FROM_DEVICE : DMA_TO_DEVICE);
384 dma_unmap_page(vring_dma_dev(vq),
385 virtio64_to_cpu(vq->vq.vdev, desc->addr),
386 virtio32_to_cpu(vq->vq.vdev, desc->len),
387 (flags & VRING_DESC_F_WRITE) ?
388 DMA_FROM_DEVICE : DMA_TO_DEVICE);
392 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
393 unsigned int total_sg,
396 struct vring_desc *desc;
400 * We require lowmem mappings for the descriptors because
401 * otherwise virt_to_phys will give us bogus addresses in the
404 gfp &= ~__GFP_HIGHMEM;
406 desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
410 for (i = 0; i < total_sg; i++)
411 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
415 static inline int virtqueue_add_split(struct virtqueue *_vq,
416 struct scatterlist *sgs[],
417 unsigned int total_sg,
418 unsigned int out_sgs,
424 struct vring_virtqueue *vq = to_vvq(_vq);
425 struct scatterlist *sg;
426 struct vring_desc *desc;
427 unsigned int i, n, avail, descs_used, prev, err_idx;
433 BUG_ON(data == NULL);
434 BUG_ON(ctx && vq->indirect);
436 if (unlikely(vq->broken)) {
441 LAST_ADD_TIME_UPDATE(vq);
443 BUG_ON(total_sg == 0);
445 head = vq->free_head;
447 if (virtqueue_use_indirect(_vq, total_sg))
448 desc = alloc_indirect_split(_vq, total_sg, gfp);
451 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
455 /* Use a single buffer which doesn't continue */
457 /* Set up rest to use this indirect table. */
462 desc = vq->split.vring.desc;
464 descs_used = total_sg;
467 if (vq->vq.num_free < descs_used) {
468 pr_debug("Can't add buf len %i - avail = %i\n",
469 descs_used, vq->vq.num_free);
470 /* FIXME: for historical reasons, we force a notify here if
471 * there are outgoing parts to the buffer. Presumably the
472 * host should service the ring ASAP. */
481 for (n = 0; n < out_sgs; n++) {
482 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
483 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
484 if (vring_mapping_error(vq, addr))
487 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
488 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
489 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
491 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
494 for (; n < (out_sgs + in_sgs); n++) {
495 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
496 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
497 if (vring_mapping_error(vq, addr))
500 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
501 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
502 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
504 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
507 /* Last one doesn't continue. */
508 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
511 /* Now that the indirect table is filled in, map it. */
512 dma_addr_t addr = vring_map_single(
513 vq, desc, total_sg * sizeof(struct vring_desc),
515 if (vring_mapping_error(vq, addr))
518 vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
519 VRING_DESC_F_INDIRECT);
520 vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
523 vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
524 total_sg * sizeof(struct vring_desc));
527 /* We're using some buffers from the free list. */
528 vq->vq.num_free -= descs_used;
530 /* Update free pointer */
532 vq->free_head = virtio16_to_cpu(_vq->vdev,
533 vq->split.vring.desc[head].next);
537 /* Store token and indirect buffer state. */
538 vq->split.desc_state[head].data = data;
540 vq->split.desc_state[head].indir_desc = desc;
542 vq->split.desc_state[head].indir_desc = ctx;
544 /* Put entry in available array (but don't update avail->idx until they
546 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
547 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
549 /* Descriptors and available array need to be set before we expose the
550 * new available array entries. */
551 virtio_wmb(vq->weak_barriers);
552 vq->split.avail_idx_shadow++;
553 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
554 vq->split.avail_idx_shadow);
557 pr_debug("Added buffer head %i to %p\n", head, vq);
560 /* This is very unlikely, but theoretically possible. Kick
562 if (unlikely(vq->num_added == (1 << 16) - 1))
575 for (n = 0; n < total_sg; n++) {
578 vring_unmap_one_split(vq, &desc[i]);
579 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
589 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
591 struct vring_virtqueue *vq = to_vvq(_vq);
596 /* We need to expose available array entries before checking avail
598 virtio_mb(vq->weak_barriers);
600 old = vq->split.avail_idx_shadow - vq->num_added;
601 new = vq->split.avail_idx_shadow;
604 LAST_ADD_TIME_CHECK(vq);
605 LAST_ADD_TIME_INVALID(vq);
608 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
609 vring_avail_event(&vq->split.vring)),
612 needs_kick = !(vq->split.vring.used->flags &
613 cpu_to_virtio16(_vq->vdev,
614 VRING_USED_F_NO_NOTIFY));
620 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
624 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
626 /* Clear data ptr. */
627 vq->split.desc_state[head].data = NULL;
629 /* Put back on free list: unmap first-level descriptors and find end */
632 while (vq->split.vring.desc[i].flags & nextflag) {
633 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
634 i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
638 vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
639 vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
641 vq->free_head = head;
643 /* Plus final descriptor */
647 struct vring_desc *indir_desc =
648 vq->split.desc_state[head].indir_desc;
651 /* Free the indirect table, if any, now that it's unmapped. */
655 len = virtio32_to_cpu(vq->vq.vdev,
656 vq->split.vring.desc[head].len);
658 BUG_ON(!(vq->split.vring.desc[head].flags &
659 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
660 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
662 for (j = 0; j < len / sizeof(struct vring_desc); j++)
663 vring_unmap_one_split(vq, &indir_desc[j]);
666 vq->split.desc_state[head].indir_desc = NULL;
668 *ctx = vq->split.desc_state[head].indir_desc;
672 static inline bool more_used_split(const struct vring_virtqueue *vq)
674 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
675 vq->split.vring.used->idx);
678 static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
682 struct vring_virtqueue *vq = to_vvq(_vq);
689 if (unlikely(vq->broken)) {
694 if (!more_used_split(vq)) {
695 pr_debug("No more buffers in queue\n");
700 /* Only get used array entries after they have been exposed by host. */
701 virtio_rmb(vq->weak_barriers);
703 last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
704 i = virtio32_to_cpu(_vq->vdev,
705 vq->split.vring.used->ring[last_used].id);
706 *len = virtio32_to_cpu(_vq->vdev,
707 vq->split.vring.used->ring[last_used].len);
709 if (unlikely(i >= vq->split.vring.num)) {
710 BAD_RING(vq, "id %u out of range\n", i);
713 if (unlikely(!vq->split.desc_state[i].data)) {
714 BAD_RING(vq, "id %u is not a head!\n", i);
718 /* detach_buf_split clears data, so grab it now. */
719 ret = vq->split.desc_state[i].data;
720 detach_buf_split(vq, i, ctx);
722 /* If we expect an interrupt for the next entry, tell host
723 * by writing event index and flush out the write before
724 * the read in the next get_buf call. */
725 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
726 virtio_store_mb(vq->weak_barriers,
727 &vring_used_event(&vq->split.vring),
728 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
730 LAST_ADD_TIME_INVALID(vq);
736 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
738 struct vring_virtqueue *vq = to_vvq(_vq);
740 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
741 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
743 vq->split.vring.avail->flags =
744 cpu_to_virtio16(_vq->vdev,
745 vq->split.avail_flags_shadow);
749 static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
751 struct vring_virtqueue *vq = to_vvq(_vq);
756 /* We optimistically turn back on interrupts, then check if there was
758 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
759 * either clear the flags bit or point the event index at the next
760 * entry. Always do both to keep code simple. */
761 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
762 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
764 vq->split.vring.avail->flags =
765 cpu_to_virtio16(_vq->vdev,
766 vq->split.avail_flags_shadow);
768 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
769 last_used_idx = vq->last_used_idx);
771 return last_used_idx;
774 static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
776 struct vring_virtqueue *vq = to_vvq(_vq);
778 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
779 vq->split.vring.used->idx);
782 static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
784 struct vring_virtqueue *vq = to_vvq(_vq);
789 /* We optimistically turn back on interrupts, then check if there was
791 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
792 * either clear the flags bit or point the event index at the next
793 * entry. Always update the event index to keep code simple. */
794 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
795 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
797 vq->split.vring.avail->flags =
798 cpu_to_virtio16(_vq->vdev,
799 vq->split.avail_flags_shadow);
801 /* TODO: tune this threshold */
802 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
804 virtio_store_mb(vq->weak_barriers,
805 &vring_used_event(&vq->split.vring),
806 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
808 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
809 - vq->last_used_idx) > bufs)) {
818 static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
820 struct vring_virtqueue *vq = to_vvq(_vq);
826 for (i = 0; i < vq->split.vring.num; i++) {
827 if (!vq->split.desc_state[i].data)
829 /* detach_buf_split clears data, so grab it now. */
830 buf = vq->split.desc_state[i].data;
831 detach_buf_split(vq, i, NULL);
832 vq->split.avail_idx_shadow--;
833 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
834 vq->split.avail_idx_shadow);
838 /* That should have freed everything. */
839 BUG_ON(vq->vq.num_free != vq->split.vring.num);
845 static struct virtqueue *vring_create_virtqueue_split(
848 unsigned int vring_align,
849 struct virtio_device *vdev,
853 bool (*notify)(struct virtqueue *),
854 void (*callback)(struct virtqueue *),
857 struct virtqueue *vq;
860 size_t queue_size_in_bytes;
863 /* We assume num is a power of 2. */
864 if (num & (num - 1)) {
865 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
869 /* TODO: allocate each queue chunk individually */
870 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
871 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
873 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
884 /* Try to get a single page. You are my only hope! */
885 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
886 &dma_addr, GFP_KERNEL|__GFP_ZERO);
891 queue_size_in_bytes = vring_size(num, vring_align);
892 vring_init(&vring, num, queue, vring_align);
894 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
895 notify, callback, name);
897 vring_free_queue(vdev, queue_size_in_bytes, queue,
902 to_vvq(vq)->split.queue_dma_addr = dma_addr;
903 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
904 to_vvq(vq)->we_own_ring = true;
911 * Packed ring specific functions - *_packed().
914 static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
915 struct vring_desc_extra_packed *state)
919 if (!vq->use_dma_api)
922 flags = state->flags;
924 if (flags & VRING_DESC_F_INDIRECT) {
925 dma_unmap_single(vring_dma_dev(vq),
926 state->addr, state->len,
927 (flags & VRING_DESC_F_WRITE) ?
928 DMA_FROM_DEVICE : DMA_TO_DEVICE);
930 dma_unmap_page(vring_dma_dev(vq),
931 state->addr, state->len,
932 (flags & VRING_DESC_F_WRITE) ?
933 DMA_FROM_DEVICE : DMA_TO_DEVICE);
937 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
938 struct vring_packed_desc *desc)
942 if (!vq->use_dma_api)
945 flags = le16_to_cpu(desc->flags);
947 if (flags & VRING_DESC_F_INDIRECT) {
948 dma_unmap_single(vring_dma_dev(vq),
949 le64_to_cpu(desc->addr),
950 le32_to_cpu(desc->len),
951 (flags & VRING_DESC_F_WRITE) ?
952 DMA_FROM_DEVICE : DMA_TO_DEVICE);
954 dma_unmap_page(vring_dma_dev(vq),
955 le64_to_cpu(desc->addr),
956 le32_to_cpu(desc->len),
957 (flags & VRING_DESC_F_WRITE) ?
958 DMA_FROM_DEVICE : DMA_TO_DEVICE);
962 static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
965 struct vring_packed_desc *desc;
968 * We require lowmem mappings for the descriptors because
969 * otherwise virt_to_phys will give us bogus addresses in the
972 gfp &= ~__GFP_HIGHMEM;
974 desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
979 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
980 struct scatterlist *sgs[],
981 unsigned int total_sg,
982 unsigned int out_sgs,
987 struct vring_packed_desc *desc;
988 struct scatterlist *sg;
989 unsigned int i, n, err_idx;
993 head = vq->packed.next_avail_idx;
994 desc = alloc_indirect_packed(total_sg, gfp);
998 if (unlikely(vq->vq.num_free < 1)) {
999 pr_debug("Can't add buf len 1 - avail = 0\n");
1007 BUG_ON(id == vq->packed.vring.num);
1009 for (n = 0; n < out_sgs + in_sgs; n++) {
1010 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1011 addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1012 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1013 if (vring_mapping_error(vq, addr))
1016 desc[i].flags = cpu_to_le16(n < out_sgs ?
1017 0 : VRING_DESC_F_WRITE);
1018 desc[i].addr = cpu_to_le64(addr);
1019 desc[i].len = cpu_to_le32(sg->length);
1024 /* Now that the indirect table is filled in, map it. */
1025 addr = vring_map_single(vq, desc,
1026 total_sg * sizeof(struct vring_packed_desc),
1028 if (vring_mapping_error(vq, addr))
1031 vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1032 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1033 sizeof(struct vring_packed_desc));
1034 vq->packed.vring.desc[head].id = cpu_to_le16(id);
1036 if (vq->use_dma_api) {
1037 vq->packed.desc_extra[id].addr = addr;
1038 vq->packed.desc_extra[id].len = total_sg *
1039 sizeof(struct vring_packed_desc);
1040 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1041 vq->packed.avail_used_flags;
1045 * A driver MUST NOT make the first descriptor in the list
1046 * available before all subsequent descriptors comprising
1047 * the list are made available.
1049 virtio_wmb(vq->weak_barriers);
1050 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1051 vq->packed.avail_used_flags);
1053 /* We're using some buffers from the free list. */
1054 vq->vq.num_free -= 1;
1056 /* Update free pointer */
1058 if (n >= vq->packed.vring.num) {
1060 vq->packed.avail_wrap_counter ^= 1;
1061 vq->packed.avail_used_flags ^=
1062 1 << VRING_PACKED_DESC_F_AVAIL |
1063 1 << VRING_PACKED_DESC_F_USED;
1065 vq->packed.next_avail_idx = n;
1066 vq->free_head = vq->packed.desc_state[id].next;
1068 /* Store token and indirect buffer state. */
1069 vq->packed.desc_state[id].num = 1;
1070 vq->packed.desc_state[id].data = data;
1071 vq->packed.desc_state[id].indir_desc = desc;
1072 vq->packed.desc_state[id].last = id;
1076 pr_debug("Added buffer head %i to %p\n", head, vq);
1084 for (i = 0; i < err_idx; i++)
1085 vring_unmap_desc_packed(vq, &desc[i]);
1093 static inline int virtqueue_add_packed(struct virtqueue *_vq,
1094 struct scatterlist *sgs[],
1095 unsigned int total_sg,
1096 unsigned int out_sgs,
1097 unsigned int in_sgs,
1102 struct vring_virtqueue *vq = to_vvq(_vq);
1103 struct vring_packed_desc *desc;
1104 struct scatterlist *sg;
1105 unsigned int i, n, c, descs_used, err_idx;
1106 __le16 head_flags, flags;
1107 u16 head, id, prev, curr, avail_used_flags;
1112 BUG_ON(data == NULL);
1113 BUG_ON(ctx && vq->indirect);
1115 if (unlikely(vq->broken)) {
1120 LAST_ADD_TIME_UPDATE(vq);
1122 BUG_ON(total_sg == 0);
1124 if (virtqueue_use_indirect(_vq, total_sg)) {
1125 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1127 if (err != -ENOMEM) {
1132 /* fall back on direct */
1135 head = vq->packed.next_avail_idx;
1136 avail_used_flags = vq->packed.avail_used_flags;
1138 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1140 desc = vq->packed.vring.desc;
1142 descs_used = total_sg;
1144 if (unlikely(vq->vq.num_free < descs_used)) {
1145 pr_debug("Can't add buf len %i - avail = %i\n",
1146 descs_used, vq->vq.num_free);
1152 BUG_ON(id == vq->packed.vring.num);
1156 for (n = 0; n < out_sgs + in_sgs; n++) {
1157 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1158 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1159 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1160 if (vring_mapping_error(vq, addr))
1163 flags = cpu_to_le16(vq->packed.avail_used_flags |
1164 (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1165 (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1169 desc[i].flags = flags;
1171 desc[i].addr = cpu_to_le64(addr);
1172 desc[i].len = cpu_to_le32(sg->length);
1173 desc[i].id = cpu_to_le16(id);
1175 if (unlikely(vq->use_dma_api)) {
1176 vq->packed.desc_extra[curr].addr = addr;
1177 vq->packed.desc_extra[curr].len = sg->length;
1178 vq->packed.desc_extra[curr].flags =
1182 curr = vq->packed.desc_state[curr].next;
1184 if ((unlikely(++i >= vq->packed.vring.num))) {
1186 vq->packed.avail_used_flags ^=
1187 1 << VRING_PACKED_DESC_F_AVAIL |
1188 1 << VRING_PACKED_DESC_F_USED;
1194 vq->packed.avail_wrap_counter ^= 1;
1196 /* We're using some buffers from the free list. */
1197 vq->vq.num_free -= descs_used;
1199 /* Update free pointer */
1200 vq->packed.next_avail_idx = i;
1201 vq->free_head = curr;
1204 vq->packed.desc_state[id].num = descs_used;
1205 vq->packed.desc_state[id].data = data;
1206 vq->packed.desc_state[id].indir_desc = ctx;
1207 vq->packed.desc_state[id].last = prev;
1210 * A driver MUST NOT make the first descriptor in the list
1211 * available before all subsequent descriptors comprising
1212 * the list are made available.
1214 virtio_wmb(vq->weak_barriers);
1215 vq->packed.vring.desc[head].flags = head_flags;
1216 vq->num_added += descs_used;
1218 pr_debug("Added buffer head %i to %p\n", head, vq);
1227 vq->packed.avail_used_flags = avail_used_flags;
1229 for (n = 0; n < total_sg; n++) {
1232 vring_unmap_desc_packed(vq, &desc[i]);
1234 if (i >= vq->packed.vring.num)
1242 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1244 struct vring_virtqueue *vq = to_vvq(_vq);
1245 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1258 * We need to expose the new flags value before checking notification
1261 virtio_mb(vq->weak_barriers);
1263 old = vq->packed.next_avail_idx - vq->num_added;
1264 new = vq->packed.next_avail_idx;
1267 snapshot.u32 = *(u32 *)vq->packed.vring.device;
1268 flags = le16_to_cpu(snapshot.flags);
1270 LAST_ADD_TIME_CHECK(vq);
1271 LAST_ADD_TIME_INVALID(vq);
1273 if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1274 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1278 off_wrap = le16_to_cpu(snapshot.off_wrap);
1280 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1281 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1282 if (wrap_counter != vq->packed.avail_wrap_counter)
1283 event_idx -= vq->packed.vring.num;
1285 needs_kick = vring_need_event(event_idx, new, old);
1291 static void detach_buf_packed(struct vring_virtqueue *vq,
1292 unsigned int id, void **ctx)
1294 struct vring_desc_state_packed *state = NULL;
1295 struct vring_packed_desc *desc;
1296 unsigned int i, curr;
1298 state = &vq->packed.desc_state[id];
1300 /* Clear data ptr. */
1303 vq->packed.desc_state[state->last].next = vq->free_head;
1305 vq->vq.num_free += state->num;
1307 if (unlikely(vq->use_dma_api)) {
1309 for (i = 0; i < state->num; i++) {
1310 vring_unmap_state_packed(vq,
1311 &vq->packed.desc_extra[curr]);
1312 curr = vq->packed.desc_state[curr].next;
1319 /* Free the indirect table, if any, now that it's unmapped. */
1320 desc = state->indir_desc;
1324 if (vq->use_dma_api) {
1325 len = vq->packed.desc_extra[id].len;
1326 for (i = 0; i < len / sizeof(struct vring_packed_desc);
1328 vring_unmap_desc_packed(vq, &desc[i]);
1331 state->indir_desc = NULL;
1333 *ctx = state->indir_desc;
1337 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1338 u16 idx, bool used_wrap_counter)
1343 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1344 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1345 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1347 return avail == used && used == used_wrap_counter;
1350 static inline bool more_used_packed(const struct vring_virtqueue *vq)
1352 return is_used_desc_packed(vq, vq->last_used_idx,
1353 vq->packed.used_wrap_counter);
1356 static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1360 struct vring_virtqueue *vq = to_vvq(_vq);
1366 if (unlikely(vq->broken)) {
1371 if (!more_used_packed(vq)) {
1372 pr_debug("No more buffers in queue\n");
1377 /* Only get used elements after they have been exposed by host. */
1378 virtio_rmb(vq->weak_barriers);
1380 last_used = vq->last_used_idx;
1381 id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1382 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1384 if (unlikely(id >= vq->packed.vring.num)) {
1385 BAD_RING(vq, "id %u out of range\n", id);
1388 if (unlikely(!vq->packed.desc_state[id].data)) {
1389 BAD_RING(vq, "id %u is not a head!\n", id);
1393 /* detach_buf_packed clears data, so grab it now. */
1394 ret = vq->packed.desc_state[id].data;
1395 detach_buf_packed(vq, id, ctx);
1397 vq->last_used_idx += vq->packed.desc_state[id].num;
1398 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1399 vq->last_used_idx -= vq->packed.vring.num;
1400 vq->packed.used_wrap_counter ^= 1;
1404 * If we expect an interrupt for the next entry, tell host
1405 * by writing event index and flush out the write before
1406 * the read in the next get_buf call.
1408 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1409 virtio_store_mb(vq->weak_barriers,
1410 &vq->packed.vring.driver->off_wrap,
1411 cpu_to_le16(vq->last_used_idx |
1412 (vq->packed.used_wrap_counter <<
1413 VRING_PACKED_EVENT_F_WRAP_CTR)));
1415 LAST_ADD_TIME_INVALID(vq);
1421 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1423 struct vring_virtqueue *vq = to_vvq(_vq);
1425 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1426 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1427 vq->packed.vring.driver->flags =
1428 cpu_to_le16(vq->packed.event_flags_shadow);
1432 static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1434 struct vring_virtqueue *vq = to_vvq(_vq);
1439 * We optimistically turn back on interrupts, then check if there was
1444 vq->packed.vring.driver->off_wrap =
1445 cpu_to_le16(vq->last_used_idx |
1446 (vq->packed.used_wrap_counter <<
1447 VRING_PACKED_EVENT_F_WRAP_CTR));
1449 * We need to update event offset and event wrap
1450 * counter first before updating event flags.
1452 virtio_wmb(vq->weak_barriers);
1455 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1456 vq->packed.event_flags_shadow = vq->event ?
1457 VRING_PACKED_EVENT_FLAG_DESC :
1458 VRING_PACKED_EVENT_FLAG_ENABLE;
1459 vq->packed.vring.driver->flags =
1460 cpu_to_le16(vq->packed.event_flags_shadow);
1464 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1465 VRING_PACKED_EVENT_F_WRAP_CTR);
1468 static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1470 struct vring_virtqueue *vq = to_vvq(_vq);
1474 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1475 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1477 return is_used_desc_packed(vq, used_idx, wrap_counter);
1480 static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1482 struct vring_virtqueue *vq = to_vvq(_vq);
1483 u16 used_idx, wrap_counter;
1489 * We optimistically turn back on interrupts, then check if there was
1494 /* TODO: tune this threshold */
1495 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1496 wrap_counter = vq->packed.used_wrap_counter;
1498 used_idx = vq->last_used_idx + bufs;
1499 if (used_idx >= vq->packed.vring.num) {
1500 used_idx -= vq->packed.vring.num;
1504 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1505 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1508 * We need to update event offset and event wrap
1509 * counter first before updating event flags.
1511 virtio_wmb(vq->weak_barriers);
1514 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1515 vq->packed.event_flags_shadow = vq->event ?
1516 VRING_PACKED_EVENT_FLAG_DESC :
1517 VRING_PACKED_EVENT_FLAG_ENABLE;
1518 vq->packed.vring.driver->flags =
1519 cpu_to_le16(vq->packed.event_flags_shadow);
1523 * We need to update event suppression structure first
1524 * before re-checking for more used buffers.
1526 virtio_mb(vq->weak_barriers);
1528 if (is_used_desc_packed(vq,
1530 vq->packed.used_wrap_counter)) {
1539 static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1541 struct vring_virtqueue *vq = to_vvq(_vq);
1547 for (i = 0; i < vq->packed.vring.num; i++) {
1548 if (!vq->packed.desc_state[i].data)
1550 /* detach_buf clears data, so grab it now. */
1551 buf = vq->packed.desc_state[i].data;
1552 detach_buf_packed(vq, i, NULL);
1556 /* That should have freed everything. */
1557 BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1563 static struct virtqueue *vring_create_virtqueue_packed(
1566 unsigned int vring_align,
1567 struct virtio_device *vdev,
1569 bool may_reduce_num,
1571 bool (*notify)(struct virtqueue *),
1572 void (*callback)(struct virtqueue *),
1575 struct vring_virtqueue *vq;
1576 struct vring_packed_desc *ring;
1577 struct vring_packed_desc_event *driver, *device;
1578 dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1579 size_t ring_size_in_bytes, event_size_in_bytes;
1582 ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1584 ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1586 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1590 event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1592 driver = vring_alloc_queue(vdev, event_size_in_bytes,
1593 &driver_event_dma_addr,
1594 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1598 device = vring_alloc_queue(vdev, event_size_in_bytes,
1599 &device_event_dma_addr,
1600 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1604 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1608 vq->vq.callback = callback;
1611 vq->vq.num_free = num;
1612 vq->vq.index = index;
1613 vq->we_own_ring = true;
1614 vq->notify = notify;
1615 vq->weak_barriers = weak_barriers;
1617 vq->last_used_idx = 0;
1619 vq->packed_ring = true;
1620 vq->use_dma_api = vring_use_dma_api(vdev);
1623 vq->last_add_time_valid = false;
1626 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1628 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1630 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1631 vq->weak_barriers = false;
1633 vq->packed.ring_dma_addr = ring_dma_addr;
1634 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1635 vq->packed.device_event_dma_addr = device_event_dma_addr;
1637 vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1638 vq->packed.event_size_in_bytes = event_size_in_bytes;
1640 vq->packed.vring.num = num;
1641 vq->packed.vring.desc = ring;
1642 vq->packed.vring.driver = driver;
1643 vq->packed.vring.device = device;
1645 vq->packed.next_avail_idx = 0;
1646 vq->packed.avail_wrap_counter = 1;
1647 vq->packed.used_wrap_counter = 1;
1648 vq->packed.event_flags_shadow = 0;
1649 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1651 vq->packed.desc_state = kmalloc_array(num,
1652 sizeof(struct vring_desc_state_packed),
1654 if (!vq->packed.desc_state)
1655 goto err_desc_state;
1657 memset(vq->packed.desc_state, 0,
1658 num * sizeof(struct vring_desc_state_packed));
1660 /* Put everything in free lists. */
1662 for (i = 0; i < num-1; i++)
1663 vq->packed.desc_state[i].next = i + 1;
1665 vq->packed.desc_extra = kmalloc_array(num,
1666 sizeof(struct vring_desc_extra_packed),
1668 if (!vq->packed.desc_extra)
1669 goto err_desc_extra;
1671 memset(vq->packed.desc_extra, 0,
1672 num * sizeof(struct vring_desc_extra_packed));
1674 /* No callback? Tell other side not to bother us. */
1676 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1677 vq->packed.vring.driver->flags =
1678 cpu_to_le16(vq->packed.event_flags_shadow);
1681 spin_lock(&vdev->vqs_list_lock);
1682 list_add_tail(&vq->vq.list, &vdev->vqs);
1683 spin_unlock(&vdev->vqs_list_lock);
1687 kfree(vq->packed.desc_state);
1691 vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1693 vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1695 vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1702 * Generic functions and exported symbols.
1705 static inline int virtqueue_add(struct virtqueue *_vq,
1706 struct scatterlist *sgs[],
1707 unsigned int total_sg,
1708 unsigned int out_sgs,
1709 unsigned int in_sgs,
1714 struct vring_virtqueue *vq = to_vvq(_vq);
1716 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1717 out_sgs, in_sgs, data, ctx, gfp) :
1718 virtqueue_add_split(_vq, sgs, total_sg,
1719 out_sgs, in_sgs, data, ctx, gfp);
1723 * virtqueue_add_sgs - expose buffers to other end
1724 * @_vq: the struct virtqueue we're talking about.
1725 * @sgs: array of terminated scatterlists.
1726 * @out_sgs: the number of scatterlists readable by other side
1727 * @in_sgs: the number of scatterlists which are writable (after readable ones)
1728 * @data: the token identifying the buffer.
1729 * @gfp: how to do memory allocations (if necessary).
1731 * Caller must ensure we don't call this with other virtqueue operations
1732 * at the same time (except where noted).
1734 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1736 int virtqueue_add_sgs(struct virtqueue *_vq,
1737 struct scatterlist *sgs[],
1738 unsigned int out_sgs,
1739 unsigned int in_sgs,
1743 unsigned int i, total_sg = 0;
1745 /* Count them first. */
1746 for (i = 0; i < out_sgs + in_sgs; i++) {
1747 struct scatterlist *sg;
1749 for (sg = sgs[i]; sg; sg = sg_next(sg))
1752 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1755 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1758 * virtqueue_add_outbuf - expose output buffers to other end
1759 * @vq: the struct virtqueue we're talking about.
1760 * @sg: scatterlist (must be well-formed and terminated!)
1761 * @num: the number of entries in @sg readable by other side
1762 * @data: the token identifying the buffer.
1763 * @gfp: how to do memory allocations (if necessary).
1765 * Caller must ensure we don't call this with other virtqueue operations
1766 * at the same time (except where noted).
1768 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1770 int virtqueue_add_outbuf(struct virtqueue *vq,
1771 struct scatterlist *sg, unsigned int num,
1775 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1777 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1780 * virtqueue_add_inbuf - expose input buffers to other end
1781 * @vq: the struct virtqueue we're talking about.
1782 * @sg: scatterlist (must be well-formed and terminated!)
1783 * @num: the number of entries in @sg writable by other side
1784 * @data: the token identifying the buffer.
1785 * @gfp: how to do memory allocations (if necessary).
1787 * Caller must ensure we don't call this with other virtqueue operations
1788 * at the same time (except where noted).
1790 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1792 int virtqueue_add_inbuf(struct virtqueue *vq,
1793 struct scatterlist *sg, unsigned int num,
1797 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1799 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1802 * virtqueue_add_inbuf_ctx - expose input buffers to other end
1803 * @vq: the struct virtqueue we're talking about.
1804 * @sg: scatterlist (must be well-formed and terminated!)
1805 * @num: the number of entries in @sg writable by other side
1806 * @data: the token identifying the buffer.
1807 * @ctx: extra context for the token
1808 * @gfp: how to do memory allocations (if necessary).
1810 * Caller must ensure we don't call this with other virtqueue operations
1811 * at the same time (except where noted).
1813 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1815 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1816 struct scatterlist *sg, unsigned int num,
1821 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1823 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1826 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1827 * @_vq: the struct virtqueue
1829 * Instead of virtqueue_kick(), you can do:
1830 * if (virtqueue_kick_prepare(vq))
1831 * virtqueue_notify(vq);
1833 * This is sometimes useful because the virtqueue_kick_prepare() needs
1834 * to be serialized, but the actual virtqueue_notify() call does not.
1836 bool virtqueue_kick_prepare(struct virtqueue *_vq)
1838 struct vring_virtqueue *vq = to_vvq(_vq);
1840 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1841 virtqueue_kick_prepare_split(_vq);
1843 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1846 * virtqueue_notify - second half of split virtqueue_kick call.
1847 * @_vq: the struct virtqueue
1849 * This does not need to be serialized.
1851 * Returns false if host notify failed or queue is broken, otherwise true.
1853 bool virtqueue_notify(struct virtqueue *_vq)
1855 struct vring_virtqueue *vq = to_vvq(_vq);
1857 if (unlikely(vq->broken))
1860 /* Prod other side to tell it about changes. */
1861 if (!vq->notify(_vq)) {
1867 EXPORT_SYMBOL_GPL(virtqueue_notify);
1870 * virtqueue_kick - update after add_buf
1871 * @vq: the struct virtqueue
1873 * After one or more virtqueue_add_* calls, invoke this to kick
1876 * Caller must ensure we don't call this with other virtqueue
1877 * operations at the same time (except where noted).
1879 * Returns false if kick failed, otherwise true.
1881 bool virtqueue_kick(struct virtqueue *vq)
1883 if (virtqueue_kick_prepare(vq))
1884 return virtqueue_notify(vq);
1887 EXPORT_SYMBOL_GPL(virtqueue_kick);
1890 * virtqueue_get_buf - get the next used buffer
1891 * @_vq: the struct virtqueue we're talking about.
1892 * @len: the length written into the buffer
1893 * @ctx: extra context for the token
1895 * If the device wrote data into the buffer, @len will be set to the
1896 * amount written. This means you don't need to clear the buffer
1897 * beforehand to ensure there's no data leakage in the case of short
1900 * Caller must ensure we don't call this with other virtqueue
1901 * operations at the same time (except where noted).
1903 * Returns NULL if there are no used buffers, or the "data" token
1904 * handed to virtqueue_add_*().
1906 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1909 struct vring_virtqueue *vq = to_vvq(_vq);
1911 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1912 virtqueue_get_buf_ctx_split(_vq, len, ctx);
1914 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1916 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1918 return virtqueue_get_buf_ctx(_vq, len, NULL);
1920 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1922 * virtqueue_disable_cb - disable callbacks
1923 * @_vq: the struct virtqueue we're talking about.
1925 * Note that this is not necessarily synchronous, hence unreliable and only
1926 * useful as an optimization.
1928 * Unlike other operations, this need not be serialized.
1930 void virtqueue_disable_cb(struct virtqueue *_vq)
1932 struct vring_virtqueue *vq = to_vvq(_vq);
1934 if (vq->packed_ring)
1935 virtqueue_disable_cb_packed(_vq);
1937 virtqueue_disable_cb_split(_vq);
1939 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1942 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1943 * @_vq: the struct virtqueue we're talking about.
1945 * This re-enables callbacks; it returns current queue state
1946 * in an opaque unsigned value. This value should be later tested by
1947 * virtqueue_poll, to detect a possible race between the driver checking for
1948 * more work, and enabling callbacks.
1950 * Caller must ensure we don't call this with other virtqueue
1951 * operations at the same time (except where noted).
1953 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1955 struct vring_virtqueue *vq = to_vvq(_vq);
1957 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1958 virtqueue_enable_cb_prepare_split(_vq);
1960 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1963 * virtqueue_poll - query pending used buffers
1964 * @_vq: the struct virtqueue we're talking about.
1965 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1967 * Returns "true" if there are pending used buffers in the queue.
1969 * This does not need to be serialized.
1971 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1973 struct vring_virtqueue *vq = to_vvq(_vq);
1975 if (unlikely(vq->broken))
1978 virtio_mb(vq->weak_barriers);
1979 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1980 virtqueue_poll_split(_vq, last_used_idx);
1982 EXPORT_SYMBOL_GPL(virtqueue_poll);
1985 * virtqueue_enable_cb - restart callbacks after disable_cb.
1986 * @_vq: the struct virtqueue we're talking about.
1988 * This re-enables callbacks; it returns "false" if there are pending
1989 * buffers in the queue, to detect a possible race between the driver
1990 * checking for more work, and enabling callbacks.
1992 * Caller must ensure we don't call this with other virtqueue
1993 * operations at the same time (except where noted).
1995 bool virtqueue_enable_cb(struct virtqueue *_vq)
1997 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1999 return !virtqueue_poll(_vq, last_used_idx);
2001 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2004 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2005 * @_vq: the struct virtqueue we're talking about.
2007 * This re-enables callbacks but hints to the other side to delay
2008 * interrupts until most of the available buffers have been processed;
2009 * it returns "false" if there are many pending buffers in the queue,
2010 * to detect a possible race between the driver checking for more work,
2011 * and enabling callbacks.
2013 * Caller must ensure we don't call this with other virtqueue
2014 * operations at the same time (except where noted).
2016 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2018 struct vring_virtqueue *vq = to_vvq(_vq);
2020 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2021 virtqueue_enable_cb_delayed_split(_vq);
2023 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2026 * virtqueue_detach_unused_buf - detach first unused buffer
2027 * @_vq: the struct virtqueue we're talking about.
2029 * Returns NULL or the "data" token handed to virtqueue_add_*().
2030 * This is not valid on an active queue; it is useful only for device
2033 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2035 struct vring_virtqueue *vq = to_vvq(_vq);
2037 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2038 virtqueue_detach_unused_buf_split(_vq);
2040 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2042 static inline bool more_used(const struct vring_virtqueue *vq)
2044 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2047 irqreturn_t vring_interrupt(int irq, void *_vq)
2049 struct vring_virtqueue *vq = to_vvq(_vq);
2051 if (!more_used(vq)) {
2052 pr_debug("virtqueue interrupt with no work for %p\n", vq);
2056 if (unlikely(vq->broken))
2059 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2060 if (vq->vq.callback)
2061 vq->vq.callback(&vq->vq);
2065 EXPORT_SYMBOL_GPL(vring_interrupt);
2067 /* Only available for split ring */
2068 struct virtqueue *__vring_new_virtqueue(unsigned int index,
2070 struct virtio_device *vdev,
2073 bool (*notify)(struct virtqueue *),
2074 void (*callback)(struct virtqueue *),
2078 struct vring_virtqueue *vq;
2080 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2083 vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2087 vq->packed_ring = false;
2088 vq->vq.callback = callback;
2091 vq->vq.num_free = vring.num;
2092 vq->vq.index = index;
2093 vq->we_own_ring = false;
2094 vq->notify = notify;
2095 vq->weak_barriers = weak_barriers;
2097 vq->last_used_idx = 0;
2099 vq->use_dma_api = vring_use_dma_api(vdev);
2102 vq->last_add_time_valid = false;
2105 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2107 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2109 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2110 vq->weak_barriers = false;
2112 vq->split.queue_dma_addr = 0;
2113 vq->split.queue_size_in_bytes = 0;
2115 vq->split.vring = vring;
2116 vq->split.avail_flags_shadow = 0;
2117 vq->split.avail_idx_shadow = 0;
2119 /* No callback? Tell other side not to bother us. */
2121 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2123 vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2124 vq->split.avail_flags_shadow);
2127 vq->split.desc_state = kmalloc_array(vring.num,
2128 sizeof(struct vring_desc_state_split), GFP_KERNEL);
2129 if (!vq->split.desc_state) {
2134 /* Put everything in free lists. */
2136 for (i = 0; i < vring.num-1; i++)
2137 vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2138 memset(vq->split.desc_state, 0, vring.num *
2139 sizeof(struct vring_desc_state_split));
2141 spin_lock(&vdev->vqs_list_lock);
2142 list_add_tail(&vq->vq.list, &vdev->vqs);
2143 spin_unlock(&vdev->vqs_list_lock);
2146 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2148 struct virtqueue *vring_create_virtqueue(
2151 unsigned int vring_align,
2152 struct virtio_device *vdev,
2154 bool may_reduce_num,
2156 bool (*notify)(struct virtqueue *),
2157 void (*callback)(struct virtqueue *),
2161 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2162 return vring_create_virtqueue_packed(index, num, vring_align,
2163 vdev, weak_barriers, may_reduce_num,
2164 context, notify, callback, name);
2166 return vring_create_virtqueue_split(index, num, vring_align,
2167 vdev, weak_barriers, may_reduce_num,
2168 context, notify, callback, name);
2170 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2172 /* Only available for split ring */
2173 struct virtqueue *vring_new_virtqueue(unsigned int index,
2175 unsigned int vring_align,
2176 struct virtio_device *vdev,
2180 bool (*notify)(struct virtqueue *vq),
2181 void (*callback)(struct virtqueue *vq),
2186 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2189 vring_init(&vring, num, pages, vring_align);
2190 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2191 notify, callback, name);
2193 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2195 void vring_del_virtqueue(struct virtqueue *_vq)
2197 struct vring_virtqueue *vq = to_vvq(_vq);
2199 if (vq->we_own_ring) {
2200 if (vq->packed_ring) {
2201 vring_free_queue(vq->vq.vdev,
2202 vq->packed.ring_size_in_bytes,
2203 vq->packed.vring.desc,
2204 vq->packed.ring_dma_addr);
2206 vring_free_queue(vq->vq.vdev,
2207 vq->packed.event_size_in_bytes,
2208 vq->packed.vring.driver,
2209 vq->packed.driver_event_dma_addr);
2211 vring_free_queue(vq->vq.vdev,
2212 vq->packed.event_size_in_bytes,
2213 vq->packed.vring.device,
2214 vq->packed.device_event_dma_addr);
2216 kfree(vq->packed.desc_state);
2217 kfree(vq->packed.desc_extra);
2219 vring_free_queue(vq->vq.vdev,
2220 vq->split.queue_size_in_bytes,
2221 vq->split.vring.desc,
2222 vq->split.queue_dma_addr);
2225 if (!vq->packed_ring)
2226 kfree(vq->split.desc_state);
2227 spin_lock(&vq->vq.vdev->vqs_list_lock);
2228 list_del(&_vq->list);
2229 spin_unlock(&vq->vq.vdev->vqs_list_lock);
2232 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2234 /* Manipulates transport-specific feature bits. */
2235 void vring_transport_features(struct virtio_device *vdev)
2239 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2241 case VIRTIO_RING_F_INDIRECT_DESC:
2243 case VIRTIO_RING_F_EVENT_IDX:
2245 case VIRTIO_F_VERSION_1:
2247 case VIRTIO_F_ACCESS_PLATFORM:
2249 case VIRTIO_F_RING_PACKED:
2251 case VIRTIO_F_ORDER_PLATFORM:
2254 /* We don't understand this bit. */
2255 __virtio_clear_bit(vdev, i);
2259 EXPORT_SYMBOL_GPL(vring_transport_features);
2262 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2263 * @_vq: the struct virtqueue containing the vring of interest.
2265 * Returns the size of the vring. This is mainly used for boasting to
2266 * userspace. Unlike other operations, this need not be serialized.
2268 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2271 struct vring_virtqueue *vq = to_vvq(_vq);
2273 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2275 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2277 bool virtqueue_is_broken(struct virtqueue *_vq)
2279 struct vring_virtqueue *vq = to_vvq(_vq);
2281 return READ_ONCE(vq->broken);
2283 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2286 * This should prevent the device from being used, allowing drivers to
2287 * recover. You may need to grab appropriate locks to flush.
2289 void virtio_break_device(struct virtio_device *dev)
2291 struct virtqueue *_vq;
2293 spin_lock(&dev->vqs_list_lock);
2294 list_for_each_entry(_vq, &dev->vqs, list) {
2295 struct vring_virtqueue *vq = to_vvq(_vq);
2297 /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2298 WRITE_ONCE(vq->broken, true);
2300 spin_unlock(&dev->vqs_list_lock);
2302 EXPORT_SYMBOL_GPL(virtio_break_device);
2304 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2306 struct vring_virtqueue *vq = to_vvq(_vq);
2308 BUG_ON(!vq->we_own_ring);
2310 if (vq->packed_ring)
2311 return vq->packed.ring_dma_addr;
2313 return vq->split.queue_dma_addr;
2315 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2317 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2319 struct vring_virtqueue *vq = to_vvq(_vq);
2321 BUG_ON(!vq->we_own_ring);
2323 if (vq->packed_ring)
2324 return vq->packed.driver_event_dma_addr;
2326 return vq->split.queue_dma_addr +
2327 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2329 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2331 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2333 struct vring_virtqueue *vq = to_vvq(_vq);
2335 BUG_ON(!vq->we_own_ring);
2337 if (vq->packed_ring)
2338 return vq->packed.device_event_dma_addr;
2340 return vq->split.queue_dma_addr +
2341 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2343 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2345 /* Only available for split ring */
2346 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2348 return &to_vvq(vq)->split.vring;
2350 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2352 MODULE_LICENSE("GPL");