1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 #include <linux/vhost_iotlb.h>
16 #include <linux/irqbypass.h>
19 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21 #define VHOST_WORK_QUEUED 1
23 struct llist_node node;
28 /* Poll a file (eventfd or socket) */
29 /* Note: there's nothing vhost specific about this structure. */
32 wait_queue_head_t *wqh;
33 wait_queue_entry_t wait;
34 struct vhost_work work;
36 struct vhost_dev *dev;
39 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
40 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
41 bool vhost_has_work(struct vhost_dev *dev);
43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 __poll_t mask, struct vhost_dev *dev);
45 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46 void vhost_poll_stop(struct vhost_poll *poll);
47 void vhost_poll_flush(struct vhost_poll *poll);
48 void vhost_poll_queue(struct vhost_poll *poll);
49 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
57 enum vhost_uaddr_type {
64 struct vhost_vring_call {
65 struct eventfd_ctx *ctx;
66 struct irq_bypass_producer producer;
69 /* The virtqueue structure describes a queue attached to a device. */
70 struct vhost_virtqueue {
71 struct vhost_dev *dev;
73 /* The actual ring of buffers. */
76 vring_desc_t __user *desc;
77 vring_avail_t __user *avail;
78 vring_used_t __user *used;
79 const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
81 struct vhost_vring_call call_ctx;
82 struct eventfd_ctx *error_ctx;
83 struct eventfd_ctx *log_ctx;
85 struct vhost_poll poll;
87 /* The routine to call when the Guest pings us, or timeout. */
88 vhost_work_fn_t handle_kick;
90 /* Last available index we saw.
91 * Values are limited to 0x7fff, and the high bit is used as
92 * a wrap counter when using VIRTIO_F_RING_PACKED. */
95 /* Caches available index value from user. */
98 /* Last index we used.
99 * Values are limited to 0x7fff, and the high bit is used as
100 * a wrap counter when using VIRTIO_F_RING_PACKED. */
106 /* Last used index value we have signalled on */
109 /* Last used index value we have signalled on */
110 bool signalled_used_valid;
112 /* Log writes to used structure. */
116 struct iovec iov[UIO_MAXIOV];
117 struct iovec iotlb_iov[64];
118 struct iovec *indirect;
119 struct vring_used_elem *heads;
120 /* Protected by virtqueue mutex. */
121 struct vhost_iotlb *umem;
122 struct vhost_iotlb *iotlb;
125 u64 acked_backend_features;
126 /* Log write descriptors */
127 void __user *log_base;
128 struct vhost_log *log;
129 struct iovec log_iov[64];
131 /* Ring endianness. Defaults to legacy native endianness.
132 * Set to true when starting a modern virtio device. */
134 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
135 /* Ring endianness requested by userspace for cross-endian support. */
138 u32 busyloop_timeout;
141 struct vhost_msg_node {
143 struct vhost_msg msg;
144 struct vhost_msg_v2 msg_v2;
146 struct vhost_virtqueue *vq;
147 struct list_head node;
151 struct mm_struct *mm;
153 struct vhost_virtqueue **vqs;
155 struct eventfd_ctx *log_ctx;
156 struct llist_head work_list;
157 struct task_struct *worker;
158 struct vhost_iotlb *umem;
159 struct vhost_iotlb *iotlb;
160 spinlock_t iotlb_lock;
161 struct list_head read_list;
162 struct list_head pending_list;
163 wait_queue_head_t wait;
169 int (*msg_handler)(struct vhost_dev *dev,
170 struct vhost_iotlb_msg *msg);
173 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
174 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
175 int nvqs, int iov_limit, int weight, int byte_weight,
177 int (*msg_handler)(struct vhost_dev *dev,
178 struct vhost_iotlb_msg *msg));
179 long vhost_dev_set_owner(struct vhost_dev *dev);
180 bool vhost_dev_has_owner(struct vhost_dev *dev);
181 long vhost_dev_check_owner(struct vhost_dev *);
182 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
183 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
184 void vhost_dev_cleanup(struct vhost_dev *);
185 void vhost_dev_stop(struct vhost_dev *);
186 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
187 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
188 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
189 bool vhost_log_access_ok(struct vhost_dev *);
190 void vhost_clear_msg(struct vhost_dev *dev);
192 int vhost_get_vq_desc(struct vhost_virtqueue *,
193 struct iovec iov[], unsigned int iov_count,
194 unsigned int *out_num, unsigned int *in_num,
195 struct vhost_log *log, unsigned int *log_num);
196 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
198 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
199 int vhost_vq_init_access(struct vhost_virtqueue *);
200 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
201 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
203 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
204 unsigned int id, int len);
205 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
206 struct vring_used_elem *heads, unsigned count);
207 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
208 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
209 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
210 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
212 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
213 unsigned int log_num, u64 len,
214 struct iovec *iov, int count);
215 int vq_meta_prefetch(struct vhost_virtqueue *vq);
217 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
218 void vhost_enqueue_msg(struct vhost_dev *dev,
219 struct list_head *head,
220 struct vhost_msg_node *node);
221 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
222 struct list_head *head);
223 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
225 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
227 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
229 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
230 struct iov_iter *from);
231 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
233 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
234 struct vhost_iotlb_map *map);
236 #define vq_err(vq, fmt, ...) do { \
237 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
238 if ((vq)->error_ctx) \
239 eventfd_signal((vq)->error_ctx, 1);\
243 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
244 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
245 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
246 (1ULL << VHOST_F_LOG_ALL) |
247 (1ULL << VIRTIO_F_ANY_LAYOUT) |
248 (1ULL << VIRTIO_F_VERSION_1)
252 * vhost_vq_set_backend - Set backend.
255 * @private_data The private data.
257 * Context: Need to call with vq->mutex acquired.
259 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
262 vq->private_data = private_data;
266 * vhost_vq_get_backend - Get backend.
270 * Context: Need to call with vq->mutex acquired.
271 * Return: Private data previously set with vhost_vq_set_backend.
273 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
275 return vq->private_data;
278 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
280 return vq->acked_features & (1ULL << bit);
283 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
285 return vq->acked_backend_features & (1ULL << bit);
288 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
289 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
294 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
296 return virtio_legacy_is_little_endian() || vq->is_le;
300 /* Memory accessors */
301 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
303 return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
306 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
308 return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
311 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
313 return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
316 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
318 return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
321 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
323 return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
326 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
328 return __cpu_to_virtio64(vhost_is_little_endian(vq), val);