1 // SPDX-License-Identifier: GPL-2.0
3 * Physical device callbacks for vfio_ccw
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
13 #include <linux/vfio.h>
14 #include <linux/nospec.h>
15 #include <linux/slab.h>
17 #include "vfio_ccw_private.h"
19 static const struct vfio_device_ops vfio_ccw_dev_ops;
21 static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
24 * If the FSM state is seen as Not Operational after closing
25 * and re-opening the mdev, return an error.
27 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
28 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
29 if (private->state == VFIO_CCW_STATE_NOT_OPER)
35 static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
37 struct vfio_ccw_private *private =
38 container_of(vdev, struct vfio_ccw_private, vdev);
40 /* Drivers MUST unpin pages in response to an invalidation. */
41 if (!cp_iova_pinned(&private->cp, iova, length))
44 vfio_ccw_mdev_reset(private);
47 static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
49 struct vfio_ccw_private *private =
50 container_of(vdev, struct vfio_ccw_private, vdev);
52 mutex_init(&private->io_mutex);
53 private->state = VFIO_CCW_STATE_STANDBY;
54 INIT_LIST_HEAD(&private->crw);
55 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
56 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
58 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
60 if (!private->cp.guest_cp)
61 goto out_free_private;
63 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
64 GFP_KERNEL | GFP_DMA);
65 if (!private->io_region)
68 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
69 GFP_KERNEL | GFP_DMA);
70 if (!private->cmd_region)
73 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
74 GFP_KERNEL | GFP_DMA);
75 if (!private->schib_region)
78 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
79 GFP_KERNEL | GFP_DMA);
80 if (!private->crw_region)
86 kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
88 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
90 kmem_cache_free(vfio_ccw_io_region, private->io_region);
92 kfree(private->cp.guest_cp);
94 mutex_destroy(&private->io_mutex);
98 static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
100 struct subchannel *sch = to_subchannel(mdev->dev.parent);
101 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
102 struct vfio_ccw_private *private;
105 private = vfio_alloc_device(vfio_ccw_private, vdev, &mdev->dev,
108 return PTR_ERR(private);
110 dev_set_drvdata(&parent->dev, private);
112 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
117 ret = vfio_register_emulated_iommu_dev(&private->vdev);
120 dev_set_drvdata(&mdev->dev, private);
124 dev_set_drvdata(&parent->dev, NULL);
125 vfio_put_device(&private->vdev);
129 static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev)
131 struct vfio_ccw_private *private =
132 container_of(vdev, struct vfio_ccw_private, vdev);
133 struct vfio_ccw_crw *crw, *temp;
135 list_for_each_entry_safe(crw, temp, &private->crw, next) {
136 list_del(&crw->next);
140 kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
141 kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
142 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
143 kmem_cache_free(vfio_ccw_io_region, private->io_region);
144 kfree(private->cp.guest_cp);
145 mutex_destroy(&private->io_mutex);
148 static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
150 struct subchannel *sch = to_subchannel(mdev->dev.parent);
151 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
152 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
154 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
159 vfio_unregister_group_dev(&private->vdev);
161 dev_set_drvdata(&parent->dev, NULL);
162 vfio_put_device(&private->vdev);
165 static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
167 struct vfio_ccw_private *private =
168 container_of(vdev, struct vfio_ccw_private, vdev);
171 /* Device cannot simply be opened again from this state */
172 if (private->state == VFIO_CCW_STATE_NOT_OPER)
175 ret = vfio_ccw_register_async_dev_regions(private);
179 ret = vfio_ccw_register_schib_dev_regions(private);
183 ret = vfio_ccw_register_crw_dev_regions(private);
187 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
188 if (private->state == VFIO_CCW_STATE_NOT_OPER) {
196 vfio_ccw_unregister_dev_regions(private);
200 static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
202 struct vfio_ccw_private *private =
203 container_of(vdev, struct vfio_ccw_private, vdev);
205 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
206 vfio_ccw_unregister_dev_regions(private);
209 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
210 char __user *buf, size_t count,
213 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
214 struct ccw_io_region *region;
217 if (pos + count > sizeof(*region))
220 mutex_lock(&private->io_mutex);
221 region = private->io_region;
222 if (copy_to_user(buf, (void *)region + pos, count))
226 mutex_unlock(&private->io_mutex);
230 static ssize_t vfio_ccw_mdev_read(struct vfio_device *vdev,
235 struct vfio_ccw_private *private =
236 container_of(vdev, struct vfio_ccw_private, vdev);
237 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
239 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
243 case VFIO_CCW_CONFIG_REGION_INDEX:
244 return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
246 index -= VFIO_CCW_NUM_REGIONS;
247 return private->region[index].ops->read(private, buf, count,
254 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
255 const char __user *buf,
256 size_t count, loff_t *ppos)
258 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
259 struct ccw_io_region *region;
262 if (pos + count > sizeof(*region))
265 if (!mutex_trylock(&private->io_mutex))
268 region = private->io_region;
269 if (copy_from_user((void *)region + pos, buf, count)) {
274 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
275 ret = (region->ret_code != 0) ? region->ret_code : count;
278 mutex_unlock(&private->io_mutex);
282 static ssize_t vfio_ccw_mdev_write(struct vfio_device *vdev,
283 const char __user *buf,
287 struct vfio_ccw_private *private =
288 container_of(vdev, struct vfio_ccw_private, vdev);
289 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
291 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
295 case VFIO_CCW_CONFIG_REGION_INDEX:
296 return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
298 index -= VFIO_CCW_NUM_REGIONS;
299 return private->region[index].ops->write(private, buf, count,
306 static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private,
307 struct vfio_device_info *info)
309 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
310 info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
311 info->num_irqs = VFIO_CCW_NUM_IRQS;
316 static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private,
317 struct vfio_region_info *info,
322 switch (info->index) {
323 case VFIO_CCW_CONFIG_REGION_INDEX:
325 info->size = sizeof(struct ccw_io_region);
326 info->flags = VFIO_REGION_INFO_FLAG_READ
327 | VFIO_REGION_INFO_FLAG_WRITE;
329 default: /* all other regions are handled via capability chain */
331 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
332 struct vfio_region_info_cap_type cap_type = {
333 .header.id = VFIO_REGION_INFO_CAP_TYPE,
334 .header.version = 1 };
338 VFIO_CCW_NUM_REGIONS + private->num_regions)
341 info->index = array_index_nospec(info->index,
342 VFIO_CCW_NUM_REGIONS +
343 private->num_regions);
345 i = info->index - VFIO_CCW_NUM_REGIONS;
347 info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
348 info->size = private->region[i].size;
349 info->flags = private->region[i].flags;
351 cap_type.type = private->region[i].type;
352 cap_type.subtype = private->region[i].subtype;
354 ret = vfio_info_add_capability(&caps, &cap_type.header,
359 info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
360 if (info->argsz < sizeof(*info) + caps.size) {
361 info->argsz = sizeof(*info) + caps.size;
362 info->cap_offset = 0;
364 vfio_info_cap_shift(&caps, sizeof(*info));
365 if (copy_to_user((void __user *)arg + sizeof(*info),
366 caps.buf, caps.size)) {
370 info->cap_offset = sizeof(*info);
380 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
382 switch (info->index) {
383 case VFIO_CCW_IO_IRQ_INDEX:
384 case VFIO_CCW_CRW_IRQ_INDEX:
385 case VFIO_CCW_REQ_IRQ_INDEX:
387 info->flags = VFIO_IRQ_INFO_EVENTFD;
396 static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
401 struct eventfd_ctx **ctx;
403 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
407 case VFIO_CCW_IO_IRQ_INDEX:
408 ctx = &private->io_trigger;
410 case VFIO_CCW_CRW_IRQ_INDEX:
411 ctx = &private->crw_trigger;
413 case VFIO_CCW_REQ_IRQ_INDEX:
414 ctx = &private->req_trigger;
420 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
421 case VFIO_IRQ_SET_DATA_NONE:
424 eventfd_signal(*ctx);
427 case VFIO_IRQ_SET_DATA_BOOL:
431 if (get_user(trigger, (uint8_t __user *)data))
435 eventfd_signal(*ctx);
438 case VFIO_IRQ_SET_DATA_EVENTFD:
442 if (get_user(fd, (int32_t __user *)data))
447 eventfd_ctx_put(*ctx);
449 } else if (fd >= 0) {
450 struct eventfd_ctx *efdctx;
452 efdctx = eventfd_ctx_fdget(fd);
454 return PTR_ERR(efdctx);
457 eventfd_ctx_put(*ctx);
470 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
471 unsigned int subtype,
472 const struct vfio_ccw_regops *ops,
473 size_t size, u32 flags, void *data)
475 struct vfio_ccw_region *region;
477 region = krealloc(private->region,
478 (private->num_regions + 1) * sizeof(*region),
483 private->region = region;
484 private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
485 private->region[private->num_regions].subtype = subtype;
486 private->region[private->num_regions].ops = ops;
487 private->region[private->num_regions].size = size;
488 private->region[private->num_regions].flags = flags;
489 private->region[private->num_regions].data = data;
491 private->num_regions++;
496 void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
500 for (i = 0; i < private->num_regions; i++)
501 private->region[i].ops->release(private, &private->region[i]);
502 private->num_regions = 0;
503 kfree(private->region);
504 private->region = NULL;
507 static ssize_t vfio_ccw_mdev_ioctl(struct vfio_device *vdev,
511 struct vfio_ccw_private *private =
512 container_of(vdev, struct vfio_ccw_private, vdev);
517 case VFIO_DEVICE_GET_INFO:
519 struct vfio_device_info info;
521 minsz = offsetofend(struct vfio_device_info, num_irqs);
523 if (copy_from_user(&info, (void __user *)arg, minsz))
526 if (info.argsz < minsz)
529 ret = vfio_ccw_mdev_get_device_info(private, &info);
533 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
535 case VFIO_DEVICE_GET_REGION_INFO:
537 struct vfio_region_info info;
539 minsz = offsetofend(struct vfio_region_info, offset);
541 if (copy_from_user(&info, (void __user *)arg, minsz))
544 if (info.argsz < minsz)
547 ret = vfio_ccw_mdev_get_region_info(private, &info, arg);
551 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
553 case VFIO_DEVICE_GET_IRQ_INFO:
555 struct vfio_irq_info info;
557 minsz = offsetofend(struct vfio_irq_info, count);
559 if (copy_from_user(&info, (void __user *)arg, minsz))
562 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
565 ret = vfio_ccw_mdev_get_irq_info(&info);
569 if (info.count == -1)
572 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
574 case VFIO_DEVICE_SET_IRQS:
576 struct vfio_irq_set hdr;
580 minsz = offsetofend(struct vfio_irq_set, count);
582 if (copy_from_user(&hdr, (void __user *)arg, minsz))
585 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
591 data = (void __user *)(arg + minsz);
592 return vfio_ccw_mdev_set_irqs(private, hdr.flags, hdr.index,
595 case VFIO_DEVICE_RESET:
596 return vfio_ccw_mdev_reset(private);
602 /* Request removal of the device*/
603 static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
605 struct vfio_ccw_private *private =
606 container_of(vdev, struct vfio_ccw_private, vdev);
607 struct device *dev = vdev->dev;
609 if (private->req_trigger) {
611 dev_notice_ratelimited(dev,
612 "Relaying device request to user (#%u)\n",
615 eventfd_signal(private->req_trigger);
616 } else if (count == 0) {
618 "No device request channel registered, blocked until released by user\n");
622 static const struct vfio_device_ops vfio_ccw_dev_ops = {
623 .init = vfio_ccw_mdev_init_dev,
624 .release = vfio_ccw_mdev_release_dev,
625 .open_device = vfio_ccw_mdev_open_device,
626 .close_device = vfio_ccw_mdev_close_device,
627 .read = vfio_ccw_mdev_read,
628 .write = vfio_ccw_mdev_write,
629 .ioctl = vfio_ccw_mdev_ioctl,
630 .request = vfio_ccw_mdev_request,
631 .dma_unmap = vfio_ccw_dma_unmap,
632 .bind_iommufd = vfio_iommufd_emulated_bind,
633 .unbind_iommufd = vfio_iommufd_emulated_unbind,
634 .attach_ioas = vfio_iommufd_emulated_attach_ioas,
635 .detach_ioas = vfio_iommufd_emulated_detach_ioas,
638 struct mdev_driver vfio_ccw_mdev_driver = {
639 .device_api = VFIO_DEVICE_API_CCW_STRING,
642 .name = "vfio_ccw_mdev",
643 .owner = THIS_MODULE,
644 .mod_name = KBUILD_MODNAME,
646 .probe = vfio_ccw_mdev_probe,
647 .remove = vfio_ccw_mdev_remove,