1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
5 #include <rdma/uverbs_ioctl.h>
6 #include <rdma/rdma_user_ioctl.h>
7 #include <linux/bitops.h>
11 static int ib_uverbs_notsupp(struct uverbs_attr_bundle *attrs)
16 static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size)
21 if (key == UVERBS_API_KEY_ERR)
22 return ERR_PTR(-EOVERFLOW);
24 elm = kzalloc(alloc_size, GFP_KERNEL);
26 return ERR_PTR(-ENOMEM);
27 rc = radix_tree_insert(&uapi->radix, key, elm);
36 static void *uapi_add_get_elm(struct uverbs_api *uapi, u32 key,
37 size_t alloc_size, bool *exists)
41 elm = uapi_add_elm(uapi, key, alloc_size);
47 if (elm != ERR_PTR(-EEXIST))
50 elm = radix_tree_lookup(&uapi->radix, key);
52 return ERR_PTR(-EINVAL);
57 static int uapi_create_write(struct uverbs_api *uapi,
58 struct ib_device *ibdev,
59 const struct uapi_definition *def,
63 struct uverbs_api_write_method *method_elm;
64 u32 method_key = obj_key;
68 method_key |= uapi_key_write_ex_method(def->write.command_num);
70 method_key |= uapi_key_write_method(def->write.command_num);
72 method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm),
74 if (IS_ERR(method_elm))
75 return PTR_ERR(method_elm);
77 if (WARN_ON(exists && (def->write.is_ex != method_elm->is_ex)))
80 method_elm->is_ex = def->write.is_ex;
81 method_elm->handler = def->func_write;
83 method_elm->disabled = !(ibdev->uverbs_ex_cmd_mask &
84 BIT_ULL(def->write.command_num));
86 method_elm->disabled = !(ibdev->uverbs_cmd_mask &
87 BIT_ULL(def->write.command_num));
89 if (!def->write.is_ex && def->func_write) {
90 method_elm->has_udata = def->write.has_udata;
91 method_elm->has_resp = def->write.has_resp;
92 method_elm->req_size = def->write.req_size;
93 method_elm->resp_size = def->write.resp_size;
96 *cur_method_key = method_key;
100 static int uapi_merge_method(struct uverbs_api *uapi,
101 struct uverbs_api_object *obj_elm, u32 obj_key,
102 const struct uverbs_method_def *method,
105 u32 method_key = obj_key | uapi_key_ioctl_method(method->id);
106 struct uverbs_api_ioctl_method *method_elm;
113 method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm),
115 if (IS_ERR(method_elm))
116 return PTR_ERR(method_elm);
119 * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE
121 if (WARN_ON(method->handler))
124 WARN_ON(!method->handler);
125 rcu_assign_pointer(method_elm->handler, method->handler);
126 if (method->handler != uverbs_destroy_def_handler)
127 method_elm->driver_method = is_driver;
130 for (i = 0; i != method->num_attrs; i++) {
131 const struct uverbs_attr_def *attr = (*method->attrs)[i];
132 struct uverbs_api_attr *attr_slot;
138 * ENUM_IN contains the 'ids' pointer to the driver's .rodata,
139 * so if it is specified by a driver then it always makes this
140 * into a driver method.
142 if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
143 method_elm->driver_method |= is_driver;
146 * Like other uobject based things we only support a single
147 * uobject being NEW'd or DESTROY'd
149 if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
150 u8 access = attr->attr.u2.objs_arr.access;
152 if (WARN_ON(access == UVERBS_ACCESS_NEW ||
153 access == UVERBS_ACCESS_DESTROY))
158 uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
160 /* Attributes are not allowed to be modified by drivers */
161 if (IS_ERR(attr_slot))
162 return PTR_ERR(attr_slot);
164 attr_slot->spec = attr->attr;
170 static int uapi_merge_obj_tree(struct uverbs_api *uapi,
171 const struct uverbs_object_def *obj,
174 struct uverbs_api_object *obj_elm;
180 obj_key = uapi_key_obj(obj->id);
181 obj_elm = uapi_add_get_elm(uapi, obj_key, sizeof(*obj_elm), &exists);
183 return PTR_ERR(obj_elm);
185 if (obj->type_attrs) {
186 if (WARN_ON(obj_elm->type_attrs))
189 obj_elm->id = obj->id;
190 obj_elm->type_attrs = obj->type_attrs;
191 obj_elm->type_class = obj->type_attrs->type_class;
193 * Today drivers are only permitted to use idr_class and
194 * fd_class types. We can revoke the IDR types during
195 * disassociation, and the FD types require the driver to use
196 * struct file_operations.owner to prevent the driver module
197 * code from unloading while the file is open. This provides
198 * enough safety that uverbs_uobject_fd_release() will
199 * continue to work. Drivers using FD are responsible to
200 * handle disassociation of the device on their own.
202 if (WARN_ON(is_driver &&
203 obj->type_attrs->type_class != &uverbs_idr_class &&
204 obj->type_attrs->type_class != &uverbs_fd_class))
211 for (i = 0; i != obj->num_methods; i++) {
212 const struct uverbs_method_def *method = (*obj->methods)[i];
217 rc = uapi_merge_method(uapi, obj_elm, obj_key, method,
226 static int uapi_disable_elm(struct uverbs_api *uapi,
227 const struct uapi_definition *def,
233 if (def->scope == UAPI_SCOPE_OBJECT) {
234 struct uverbs_api_object *obj_elm;
236 obj_elm = uapi_add_get_elm(
237 uapi, obj_key, sizeof(*obj_elm), &exists);
239 return PTR_ERR(obj_elm);
240 obj_elm->disabled = 1;
244 if (def->scope == UAPI_SCOPE_METHOD &&
245 uapi_key_is_ioctl_method(method_key)) {
246 struct uverbs_api_ioctl_method *method_elm;
248 method_elm = uapi_add_get_elm(uapi, method_key,
249 sizeof(*method_elm), &exists);
250 if (IS_ERR(method_elm))
251 return PTR_ERR(method_elm);
252 method_elm->disabled = 1;
256 if (def->scope == UAPI_SCOPE_METHOD &&
257 (uapi_key_is_write_method(method_key) ||
258 uapi_key_is_write_ex_method(method_key))) {
259 struct uverbs_api_write_method *write_elm;
261 write_elm = uapi_add_get_elm(uapi, method_key,
262 sizeof(*write_elm), &exists);
263 if (IS_ERR(write_elm))
264 return PTR_ERR(write_elm);
265 write_elm->disabled = 1;
273 static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev,
274 const struct uapi_definition *def_list,
277 const struct uapi_definition *def = def_list;
278 u32 cur_obj_key = UVERBS_API_KEY_ERR;
279 u32 cur_method_key = UVERBS_API_KEY_ERR;
287 switch ((enum uapi_definition_kind)def->kind) {
289 rc = uapi_merge_def(uapi, ibdev, def->chain, is_driver);
294 case UAPI_DEF_CHAIN_OBJ_TREE:
295 if (WARN_ON(def->object_start.object_id !=
296 def->chain_obj_tree->id))
299 cur_obj_key = uapi_key_obj(def->object_start.object_id);
300 rc = uapi_merge_obj_tree(uapi, def->chain_obj_tree,
309 case UAPI_DEF_IS_SUPPORTED_DEV_FN: {
311 (void *)(&ibdev->ops) + def->needs_fn_offset;
315 rc = uapi_disable_elm(
316 uapi, def, cur_obj_key, cur_method_key);
322 case UAPI_DEF_IS_SUPPORTED_FUNC:
323 if (def->func_is_supported(ibdev))
325 rc = uapi_disable_elm(
326 uapi, def, cur_obj_key, cur_method_key);
331 case UAPI_DEF_OBJECT_START: {
332 struct uverbs_api_object *obj_elm;
334 cur_obj_key = uapi_key_obj(def->object_start.object_id);
335 obj_elm = uapi_add_get_elm(uapi, cur_obj_key,
336 sizeof(*obj_elm), &exists);
338 return PTR_ERR(obj_elm);
343 rc = uapi_create_write(
344 uapi, ibdev, def, cur_obj_key, &cur_method_key);
355 uapi_finalize_ioctl_method(struct uverbs_api *uapi,
356 struct uverbs_api_ioctl_method *method_elm,
359 struct radix_tree_iter iter;
360 unsigned int num_attrs = 0;
361 unsigned int max_bkey = 0;
362 bool single_uobj = false;
365 method_elm->destroy_bkey = UVERBS_API_ATTR_BKEY_LEN;
366 radix_tree_for_each_slot (slot, &uapi->radix, &iter,
367 uapi_key_attrs_start(method_key)) {
368 struct uverbs_api_attr *elm =
369 rcu_dereference_protected(*slot, true);
370 u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
371 u32 attr_bkey = uapi_bkey_attr(attr_key);
372 u8 type = elm->spec.type;
374 if (uapi_key_attr_to_ioctl_method(iter.index) !=
375 uapi_key_attr_to_ioctl_method(method_key))
378 if (elm->spec.mandatory)
379 __set_bit(attr_bkey, method_elm->attr_mandatory);
381 if (elm->spec.is_udata)
382 method_elm->has_udata = true;
384 if (type == UVERBS_ATTR_TYPE_IDR ||
385 type == UVERBS_ATTR_TYPE_FD) {
386 u8 access = elm->spec.u.obj.access;
389 * Verbs specs may only have one NEW/DESTROY, we don't
390 * have the infrastructure to abort multiple NEW's or
391 * cope with multiple DESTROY failure.
393 if (access == UVERBS_ACCESS_NEW ||
394 access == UVERBS_ACCESS_DESTROY) {
395 if (WARN_ON(single_uobj))
399 if (WARN_ON(!elm->spec.mandatory))
403 if (access == UVERBS_ACCESS_DESTROY)
404 method_elm->destroy_bkey = attr_bkey;
407 max_bkey = max(max_bkey, attr_bkey);
411 method_elm->key_bitmap_len = max_bkey + 1;
412 WARN_ON(method_elm->key_bitmap_len > UVERBS_API_ATTR_BKEY_LEN);
414 uapi_compute_bundle_size(method_elm, num_attrs);
418 static int uapi_finalize(struct uverbs_api *uapi)
420 const struct uverbs_api_write_method **data;
421 unsigned long max_write_ex = 0;
422 unsigned long max_write = 0;
423 struct radix_tree_iter iter;
428 radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
429 struct uverbs_api_ioctl_method *method_elm =
430 rcu_dereference_protected(*slot, true);
432 if (uapi_key_is_ioctl_method(iter.index)) {
433 rc = uapi_finalize_ioctl_method(uapi, method_elm,
439 if (uapi_key_is_write_method(iter.index))
440 max_write = max(max_write,
441 iter.index & UVERBS_API_ATTR_KEY_MASK);
442 if (uapi_key_is_write_ex_method(iter.index))
445 iter.index & UVERBS_API_ATTR_KEY_MASK);
448 uapi->notsupp_method.handler = ib_uverbs_notsupp;
449 uapi->num_write = max_write + 1;
450 uapi->num_write_ex = max_write_ex + 1;
451 data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
452 sizeof(*uapi->write_methods), GFP_KERNEL);
456 for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
457 data[i] = &uapi->notsupp_method;
458 uapi->write_methods = data;
459 uapi->write_ex_methods = data + uapi->num_write;
461 radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
462 if (uapi_key_is_write_method(iter.index))
463 uapi->write_methods[iter.index &
464 UVERBS_API_ATTR_KEY_MASK] =
465 rcu_dereference_protected(*slot, true);
466 if (uapi_key_is_write_ex_method(iter.index))
467 uapi->write_ex_methods[iter.index &
468 UVERBS_API_ATTR_KEY_MASK] =
469 rcu_dereference_protected(*slot, true);
475 static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last)
477 struct radix_tree_iter iter;
480 radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) {
481 if (iter.index > last)
483 kfree(rcu_dereference_protected(*slot, true));
484 radix_tree_iter_delete(&uapi->radix, &iter, slot);
488 static void uapi_remove_object(struct uverbs_api *uapi, u32 obj_key)
490 uapi_remove_range(uapi, obj_key,
491 obj_key | UVERBS_API_METHOD_KEY_MASK |
492 UVERBS_API_ATTR_KEY_MASK);
495 static void uapi_remove_method(struct uverbs_api *uapi, u32 method_key)
497 uapi_remove_range(uapi, method_key,
498 method_key | UVERBS_API_ATTR_KEY_MASK);
502 static u32 uapi_get_obj_id(struct uverbs_attr_spec *spec)
504 if (spec->type == UVERBS_ATTR_TYPE_IDR ||
505 spec->type == UVERBS_ATTR_TYPE_FD)
506 return spec->u.obj.obj_type;
507 if (spec->type == UVERBS_ATTR_TYPE_IDRS_ARRAY)
508 return spec->u2.objs_arr.obj_type;
509 return UVERBS_API_KEY_ERR;
512 static void uapi_key_okay(u32 key)
514 unsigned int count = 0;
516 if (uapi_key_is_object(key))
518 if (uapi_key_is_ioctl_method(key))
520 if (uapi_key_is_write_method(key))
522 if (uapi_key_is_write_ex_method(key))
524 if (uapi_key_is_attr(key))
526 WARN(count != 1, "Bad count %d key=%x", count, key);
529 static void uapi_finalize_disable(struct uverbs_api *uapi)
531 struct radix_tree_iter iter;
532 u32 starting_key = 0;
533 bool scan_again = false;
537 radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) {
538 uapi_key_okay(iter.index);
540 if (uapi_key_is_object(iter.index)) {
541 struct uverbs_api_object *obj_elm =
542 rcu_dereference_protected(*slot, true);
544 if (obj_elm->disabled) {
545 /* Have to check all the attrs again */
547 starting_key = iter.index;
548 uapi_remove_object(uapi, iter.index);
554 if (uapi_key_is_ioctl_method(iter.index)) {
555 struct uverbs_api_ioctl_method *method_elm =
556 rcu_dereference_protected(*slot, true);
558 if (method_elm->disabled) {
559 starting_key = iter.index;
560 uapi_remove_method(uapi, iter.index);
566 if (uapi_key_is_write_method(iter.index) ||
567 uapi_key_is_write_ex_method(iter.index)) {
568 struct uverbs_api_write_method *method_elm =
569 rcu_dereference_protected(*slot, true);
571 if (method_elm->disabled) {
573 radix_tree_iter_delete(&uapi->radix, &iter, slot);
578 if (uapi_key_is_attr(iter.index)) {
579 struct uverbs_api_attr *attr_elm =
580 rcu_dereference_protected(*slot, true);
581 const struct uverbs_api_object *tmp_obj;
585 * If the method has a mandatory object handle
586 * attribute which relies on an object which is not
587 * present then the entire method is uncallable.
589 if (!attr_elm->spec.mandatory)
591 obj_key = uapi_get_obj_id(&attr_elm->spec);
592 if (obj_key == UVERBS_API_KEY_ERR)
594 tmp_obj = uapi_get_object(uapi, obj_key);
595 if (IS_ERR(tmp_obj)) {
596 if (PTR_ERR(tmp_obj) == -ENOMSG)
599 if (!tmp_obj->disabled)
603 starting_key = iter.index;
606 iter.index & (UVERBS_API_OBJ_KEY_MASK |
607 UVERBS_API_METHOD_KEY_MASK));
621 void uverbs_destroy_api(struct uverbs_api *uapi)
626 uapi_remove_range(uapi, 0, U32_MAX);
627 kfree(uapi->write_methods);
631 static const struct uapi_definition uverbs_core_api[] = {
632 UAPI_DEF_CHAIN(uverbs_def_obj_async_fd),
633 UAPI_DEF_CHAIN(uverbs_def_obj_counters),
634 UAPI_DEF_CHAIN(uverbs_def_obj_cq),
635 UAPI_DEF_CHAIN(uverbs_def_obj_device),
636 UAPI_DEF_CHAIN(uverbs_def_obj_dm),
637 UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
638 UAPI_DEF_CHAIN(uverbs_def_obj_intf),
639 UAPI_DEF_CHAIN(uverbs_def_obj_mr),
640 UAPI_DEF_CHAIN(uverbs_def_obj_qp),
641 UAPI_DEF_CHAIN(uverbs_def_obj_srq),
642 UAPI_DEF_CHAIN(uverbs_def_obj_wq),
643 UAPI_DEF_CHAIN(uverbs_def_write_intf),
647 struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev)
649 struct uverbs_api *uapi;
652 uapi = kzalloc(sizeof(*uapi), GFP_KERNEL);
654 return ERR_PTR(-ENOMEM);
656 INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL);
657 uapi->driver_id = ibdev->ops.driver_id;
659 rc = uapi_merge_def(uapi, ibdev, uverbs_core_api, false);
662 rc = uapi_merge_def(uapi, ibdev, ibdev->driver_def, true);
666 uapi_finalize_disable(uapi);
667 rc = uapi_finalize(uapi);
675 "Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n",
678 uverbs_destroy_api(uapi);
683 * The pre version is done before destroying the HW objects, it only blocks
684 * off method access. All methods that require the ib_dev or the module data
685 * must test one of these assignments prior to continuing.
687 void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev)
689 struct uverbs_api *uapi = uverbs_dev->uapi;
690 struct radix_tree_iter iter;
693 rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
695 radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
696 if (uapi_key_is_ioctl_method(iter.index)) {
697 struct uverbs_api_ioctl_method *method_elm =
698 rcu_dereference_protected(*slot, true);
700 if (method_elm->driver_method)
701 rcu_assign_pointer(method_elm->handler, NULL);
705 synchronize_srcu(&uverbs_dev->disassociate_srcu);
709 * Called when a driver disassociates from the ib_uverbs_device. The
710 * assumption is that the driver module will unload after. Replace everything
711 * related to the driver with NULL as a safety measure.
713 void uverbs_disassociate_api(struct uverbs_api *uapi)
715 struct radix_tree_iter iter;
718 radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
719 if (uapi_key_is_object(iter.index)) {
720 struct uverbs_api_object *object_elm =
721 rcu_dereference_protected(*slot, true);
724 * Some type_attrs are in the driver module. We don't
725 * bother to keep track of which since there should be
726 * no use of this after disassociate.
728 object_elm->type_attrs = NULL;
729 } else if (uapi_key_is_attr(iter.index)) {
730 struct uverbs_api_attr *elm =
731 rcu_dereference_protected(*slot, true);
733 if (elm->spec.type == UVERBS_ATTR_TYPE_ENUM_IN)
734 elm->spec.u2.enum_def.ids = NULL;