1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
13 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
14 struct iommufd_ctx *ictx)
18 return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
21 int vfio_df_iommufd_bind(struct vfio_device_file *df)
23 struct vfio_device *vdev = df->device;
24 struct iommufd_ctx *ictx = df->iommufd;
26 lockdep_assert_held(&vdev->dev_set->lock);
28 return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
31 int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
32 struct iommufd_ctx *ictx)
37 lockdep_assert_held(&vdev->dev_set->lock);
39 /* compat noiommu does not need to do ioas attach */
40 if (vfio_device_is_noiommu(vdev))
43 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
47 /* The legacy path has no way to return the selected pt_id */
48 return vdev->ops->attach_ioas(vdev, &ioas_id);
51 void vfio_df_iommufd_unbind(struct vfio_device_file *df)
53 struct vfio_device *vdev = df->device;
55 lockdep_assert_held(&vdev->dev_set->lock);
57 if (vfio_device_is_noiommu(vdev))
60 if (vdev->ops->unbind_iommufd)
61 vdev->ops->unbind_iommufd(vdev);
64 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
66 if (vdev->iommufd_device)
67 return iommufd_device_to_ictx(vdev->iommufd_device);
70 EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
72 static int vfio_iommufd_device_id(struct vfio_device *vdev)
74 if (vdev->iommufd_device)
75 return iommufd_device_to_id(vdev->iommufd_device);
80 * Return devid for a device.
81 * valid ID for the device that is owned by the ictx
82 * -ENOENT = device is owned but there is no ID
83 * -ENODEV or other error = device is not owned
85 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
87 struct iommu_group *group;
90 if (vfio_iommufd_device_ictx(vdev) == ictx)
91 return vfio_iommufd_device_id(vdev);
93 group = iommu_group_get(vdev->dev);
97 if (iommufd_ctx_has_group(ictx, group))
102 iommu_group_put(group);
106 EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
109 * The physical standard ops mean that the iommufd_device is bound to the
110 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
111 * using this ops set should call vfio_register_group_dev()
113 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
114 struct iommufd_ctx *ictx, u32 *out_device_id)
116 struct iommufd_device *idev;
118 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
120 return PTR_ERR(idev);
121 vdev->iommufd_device = idev;
124 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
126 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
128 lockdep_assert_held(&vdev->dev_set->lock);
130 if (vdev->iommufd_attached) {
131 iommufd_device_detach(vdev->iommufd_device);
132 vdev->iommufd_attached = false;
134 iommufd_device_unbind(vdev->iommufd_device);
135 vdev->iommufd_device = NULL;
137 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
139 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
143 lockdep_assert_held(&vdev->dev_set->lock);
145 if (WARN_ON(!vdev->iommufd_device))
148 if (vdev->iommufd_attached)
149 rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
151 rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
154 vdev->iommufd_attached = true;
157 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
159 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
161 lockdep_assert_held(&vdev->dev_set->lock);
163 if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
166 iommufd_device_detach(vdev->iommufd_device);
167 vdev->iommufd_attached = false;
169 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
172 * The emulated standard ops mean that vfio_device is going to use the
173 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
174 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
175 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
178 static void vfio_emulated_unmap(void *data, unsigned long iova,
179 unsigned long length)
181 struct vfio_device *vdev = data;
183 if (vdev->ops->dma_unmap)
184 vdev->ops->dma_unmap(vdev, iova, length);
187 static const struct iommufd_access_ops vfio_user_ops = {
188 .needs_pin_pages = 1,
189 .unmap = vfio_emulated_unmap,
192 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
193 struct iommufd_ctx *ictx, u32 *out_device_id)
195 struct iommufd_access *user;
197 lockdep_assert_held(&vdev->dev_set->lock);
199 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
201 return PTR_ERR(user);
202 vdev->iommufd_access = user;
205 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
207 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
209 lockdep_assert_held(&vdev->dev_set->lock);
211 if (vdev->iommufd_access) {
212 iommufd_access_destroy(vdev->iommufd_access);
213 vdev->iommufd_attached = false;
214 vdev->iommufd_access = NULL;
217 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
219 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
223 lockdep_assert_held(&vdev->dev_set->lock);
225 if (vdev->iommufd_attached)
226 rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
228 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
231 vdev->iommufd_attached = true;
234 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
236 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
238 lockdep_assert_held(&vdev->dev_set->lock);
240 if (WARN_ON(!vdev->iommufd_access) ||
241 !vdev->iommufd_attached)
244 iommufd_access_detach(vdev->iommufd_access);
245 vdev->iommufd_attached = false;
247 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);