1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/slab.h>
13 #include <linux/vdpa.h>
14 #include <uapi/linux/vdpa.h>
15 #include <net/genetlink.h>
16 #include <linux/mod_devicetable.h>
18 static LIST_HEAD(mdev_head);
19 /* A global mutex that protects vdpa management device and device level operations. */
20 static DEFINE_MUTEX(vdpa_dev_mutex);
21 static DEFINE_IDA(vdpa_index_ida);
23 static struct genl_family vdpa_nl_family;
25 static int vdpa_dev_probe(struct device *d)
27 struct vdpa_device *vdev = dev_to_vdpa(d);
28 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
31 if (drv && drv->probe)
32 ret = drv->probe(vdev);
37 static void vdpa_dev_remove(struct device *d)
39 struct vdpa_device *vdev = dev_to_vdpa(d);
40 struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
42 if (drv && drv->remove)
46 static struct bus_type vdpa_bus = {
48 .probe = vdpa_dev_probe,
49 .remove = vdpa_dev_remove,
52 static void vdpa_release_dev(struct device *d)
54 struct vdpa_device *vdev = dev_to_vdpa(d);
55 const struct vdpa_config_ops *ops = vdev->config;
60 ida_simple_remove(&vdpa_index_ida, vdev->index);
65 * __vdpa_alloc_device - allocate and initilaize a vDPA device
66 * This allows driver to some prepartion after device is
67 * initialized but before registered.
68 * @parent: the parent device
69 * @config: the bus operations that is supported by this device
70 * @size: size of the parent structure that contains private data
71 * @name: name of the vdpa device; optional.
72 * @use_va: indicate whether virtual address must be used by this device
74 * Driver should use vdpa_alloc_device() wrapper macro instead of
75 * using this directly.
77 * Return: Returns an error when parent/config/dma_dev is not set or fail to get
80 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
81 const struct vdpa_config_ops *config,
82 size_t size, const char *name,
85 struct vdpa_device *vdev;
91 if (!!config->dma_map != !!config->dma_unmap)
94 /* It should only work for the device that use on-chip IOMMU */
95 if (use_va && !(config->dma_map || config->set_map))
99 vdev = kzalloc(size, GFP_KERNEL);
103 err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
107 vdev->dev.bus = &vdpa_bus;
108 vdev->dev.parent = parent;
109 vdev->dev.release = vdpa_release_dev;
111 vdev->config = config;
112 vdev->features_valid = false;
113 vdev->use_va = use_va;
116 err = dev_set_name(&vdev->dev, "%s", name);
118 err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
122 device_initialize(&vdev->dev);
127 ida_simple_remove(&vdpa_index_ida, vdev->index);
133 EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
135 static int vdpa_name_match(struct device *dev, const void *data)
137 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
139 return (strcmp(dev_name(&vdev->dev), data) == 0);
142 static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
148 lockdep_assert_held(&vdpa_dev_mutex);
149 dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
154 return device_add(&vdev->dev);
158 * _vdpa_register_device - register a vDPA device with vdpa lock held
159 * Caller must have a succeed call of vdpa_alloc_device() before.
160 * Caller must invoke this routine in the management device dev_add()
161 * callback after setting up valid mgmtdev for this vdpa device.
162 * @vdev: the vdpa device to be registered to vDPA bus
163 * @nvqs: number of virtqueues supported by this device
165 * Return: Returns an error when fail to add device to vDPA bus
167 int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
172 return __vdpa_register_device(vdev, nvqs);
174 EXPORT_SYMBOL_GPL(_vdpa_register_device);
177 * vdpa_register_device - register a vDPA device
178 * Callers must have a succeed call of vdpa_alloc_device() before.
179 * @vdev: the vdpa device to be registered to vDPA bus
180 * @nvqs: number of virtqueues supported by this device
182 * Return: Returns an error when fail to add to vDPA bus
184 int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
188 mutex_lock(&vdpa_dev_mutex);
189 err = __vdpa_register_device(vdev, nvqs);
190 mutex_unlock(&vdpa_dev_mutex);
193 EXPORT_SYMBOL_GPL(vdpa_register_device);
196 * _vdpa_unregister_device - unregister a vDPA device
197 * Caller must invoke this routine as part of management device dev_del()
199 * @vdev: the vdpa device to be unregisted from vDPA bus
201 void _vdpa_unregister_device(struct vdpa_device *vdev)
203 lockdep_assert_held(&vdpa_dev_mutex);
204 WARN_ON(!vdev->mdev);
205 device_unregister(&vdev->dev);
207 EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
210 * vdpa_unregister_device - unregister a vDPA device
211 * @vdev: the vdpa device to be unregisted from vDPA bus
213 void vdpa_unregister_device(struct vdpa_device *vdev)
215 mutex_lock(&vdpa_dev_mutex);
216 device_unregister(&vdev->dev);
217 mutex_unlock(&vdpa_dev_mutex);
219 EXPORT_SYMBOL_GPL(vdpa_unregister_device);
222 * __vdpa_register_driver - register a vDPA device driver
223 * @drv: the vdpa device driver to be registered
224 * @owner: module owner of the driver
226 * Return: Returns an err when fail to do the registration
228 int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
230 drv->driver.bus = &vdpa_bus;
231 drv->driver.owner = owner;
233 return driver_register(&drv->driver);
235 EXPORT_SYMBOL_GPL(__vdpa_register_driver);
238 * vdpa_unregister_driver - unregister a vDPA device driver
239 * @drv: the vdpa device driver to be unregistered
241 void vdpa_unregister_driver(struct vdpa_driver *drv)
243 driver_unregister(&drv->driver);
245 EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
248 * vdpa_mgmtdev_register - register a vdpa management device
250 * @mdev: Pointer to vdpa management device
251 * vdpa_mgmtdev_register() register a vdpa management device which supports
252 * vdpa device management.
253 * Return: Returns 0 on success or failure when required callback ops are not
256 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
258 if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
261 INIT_LIST_HEAD(&mdev->list);
262 mutex_lock(&vdpa_dev_mutex);
263 list_add_tail(&mdev->list, &mdev_head);
264 mutex_unlock(&vdpa_dev_mutex);
267 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
269 static int vdpa_match_remove(struct device *dev, void *data)
271 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
272 struct vdpa_mgmt_dev *mdev = vdev->mdev;
275 mdev->ops->dev_del(mdev, vdev);
279 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
281 mutex_lock(&vdpa_dev_mutex);
283 list_del(&mdev->list);
285 /* Filter out all the entries belong to this management device and delete it. */
286 bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
288 mutex_unlock(&vdpa_dev_mutex);
290 EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
292 static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
293 const char *busname, const char *devname)
295 /* Bus name is optional for simulated management device, so ignore the
296 * device with bus if bus attribute is provided.
298 if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
301 if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
304 if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
305 (strcmp(dev_name(mdev->device), devname) == 0))
311 static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
313 struct vdpa_mgmt_dev *mdev;
314 const char *busname = NULL;
317 if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
318 return ERR_PTR(-EINVAL);
319 devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
320 if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
321 busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
323 list_for_each_entry(mdev, &mdev_head, list) {
324 if (mgmtdev_handle_match(mdev, busname, devname))
327 return ERR_PTR(-ENODEV);
330 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
332 if (mdev->device->bus &&
333 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
335 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
340 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
341 u32 portid, u32 seq, int flags)
343 u64 supported_classes = 0;
348 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
351 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
355 while (mdev->id_table[i].device) {
356 if (mdev->id_table[i].device <= 63)
357 supported_classes |= BIT_ULL(mdev->id_table[i].device);
361 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
362 supported_classes, VDPA_ATTR_UNSPEC)) {
367 genlmsg_end(msg, hdr);
371 genlmsg_cancel(msg, hdr);
375 static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
377 struct vdpa_mgmt_dev *mdev;
381 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
385 mutex_lock(&vdpa_dev_mutex);
386 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
388 mutex_unlock(&vdpa_dev_mutex);
389 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
394 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
395 mutex_unlock(&vdpa_dev_mutex);
398 err = genlmsg_reply(msg, info);
407 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
409 struct vdpa_mgmt_dev *mdev;
410 int start = cb->args[0];
414 mutex_lock(&vdpa_dev_mutex);
415 list_for_each_entry(mdev, &mdev_head, list) {
420 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
421 cb->nlh->nlmsg_seq, NLM_F_MULTI);
427 mutex_unlock(&vdpa_dev_mutex);
432 static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
434 struct vdpa_mgmt_dev *mdev;
438 if (!info->attrs[VDPA_ATTR_DEV_NAME])
441 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
443 mutex_lock(&vdpa_dev_mutex);
444 mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
446 NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
451 err = mdev->ops->dev_add(mdev, name);
453 mutex_unlock(&vdpa_dev_mutex);
457 static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
459 struct vdpa_mgmt_dev *mdev;
460 struct vdpa_device *vdev;
465 if (!info->attrs[VDPA_ATTR_DEV_NAME])
467 name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
469 mutex_lock(&vdpa_dev_mutex);
470 dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
472 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
476 vdev = container_of(dev, struct vdpa_device, dev);
478 NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
483 mdev->ops->dev_del(mdev, vdev);
487 mutex_unlock(&vdpa_dev_mutex);
492 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
493 int flags, struct netlink_ext_ack *extack)
501 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
505 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
509 device_id = vdev->config->get_device_id(vdev);
510 vendor_id = vdev->config->get_vendor_id(vdev);
511 max_vq_size = vdev->config->get_vq_num_max(vdev);
514 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
516 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
518 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
520 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
522 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
525 genlmsg_end(msg, hdr);
529 genlmsg_cancel(msg, hdr);
533 static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
535 struct vdpa_device *vdev;
541 if (!info->attrs[VDPA_ATTR_DEV_NAME])
543 devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
544 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
548 mutex_lock(&vdpa_dev_mutex);
549 dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
551 NL_SET_ERR_MSG_MOD(info->extack, "device not found");
555 vdev = container_of(dev, struct vdpa_device, dev);
560 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
564 err = genlmsg_reply(msg, info);
566 mutex_unlock(&vdpa_dev_mutex);
572 mutex_unlock(&vdpa_dev_mutex);
577 struct vdpa_dev_dump_info {
579 struct netlink_callback *cb;
584 static int vdpa_dev_dump(struct device *dev, void *data)
586 struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
587 struct vdpa_dev_dump_info *info = data;
592 if (info->idx < info->start_idx) {
596 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
597 info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
605 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
607 struct vdpa_dev_dump_info info;
611 info.start_idx = cb->args[0];
614 mutex_lock(&vdpa_dev_mutex);
615 bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
616 mutex_unlock(&vdpa_dev_mutex);
617 cb->args[0] = info.idx;
621 static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
622 [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
623 [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
624 [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
627 static const struct genl_ops vdpa_nl_ops[] = {
629 .cmd = VDPA_CMD_MGMTDEV_GET,
630 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
631 .doit = vdpa_nl_cmd_mgmtdev_get_doit,
632 .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
635 .cmd = VDPA_CMD_DEV_NEW,
636 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
637 .doit = vdpa_nl_cmd_dev_add_set_doit,
638 .flags = GENL_ADMIN_PERM,
641 .cmd = VDPA_CMD_DEV_DEL,
642 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
643 .doit = vdpa_nl_cmd_dev_del_set_doit,
644 .flags = GENL_ADMIN_PERM,
647 .cmd = VDPA_CMD_DEV_GET,
648 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
649 .doit = vdpa_nl_cmd_dev_get_doit,
650 .dumpit = vdpa_nl_cmd_dev_get_dumpit,
654 static struct genl_family vdpa_nl_family __ro_after_init = {
655 .name = VDPA_GENL_NAME,
656 .version = VDPA_GENL_VERSION,
657 .maxattr = VDPA_ATTR_MAX,
658 .policy = vdpa_nl_policy,
660 .module = THIS_MODULE,
662 .n_ops = ARRAY_SIZE(vdpa_nl_ops),
665 static int vdpa_init(void)
669 err = bus_register(&vdpa_bus);
672 err = genl_register_family(&vdpa_nl_family);
678 bus_unregister(&vdpa_bus);
682 static void __exit vdpa_exit(void)
684 genl_unregister_family(&vdpa_nl_family);
685 bus_unregister(&vdpa_bus);
686 ida_destroy(&vdpa_index_ida);
688 core_initcall(vdpa_init);
689 module_exit(vdpa_exit);
691 MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
692 MODULE_LICENSE("GPL v2");