2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
55 #include "core_priv.h"
56 #include <trace/events/rdma_core.h>
58 static int ib_resolve_eth_dmac(struct ib_device *device,
59 struct rdma_ah_attr *ah_attr);
61 static const char * const ib_events[] = {
62 [IB_EVENT_CQ_ERR] = "CQ error",
63 [IB_EVENT_QP_FATAL] = "QP fatal error",
64 [IB_EVENT_QP_REQ_ERR] = "QP request error",
65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
66 [IB_EVENT_COMM_EST] = "communication established",
67 [IB_EVENT_SQ_DRAINED] = "send queue drained",
68 [IB_EVENT_PATH_MIG] = "path migration successful",
69 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
70 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
71 [IB_EVENT_PORT_ACTIVE] = "port active",
72 [IB_EVENT_PORT_ERR] = "port error",
73 [IB_EVENT_LID_CHANGE] = "LID change",
74 [IB_EVENT_PKEY_CHANGE] = "P_key change",
75 [IB_EVENT_SM_CHANGE] = "SM change",
76 [IB_EVENT_SRQ_ERR] = "SRQ error",
77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
80 [IB_EVENT_GID_CHANGE] = "GID changed",
83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
88 ib_events[index] : "unrecognized event";
90 EXPORT_SYMBOL(ib_event_msg);
92 static const char * const wc_statuses[] = {
93 [IB_WC_SUCCESS] = "success",
94 [IB_WC_LOC_LEN_ERR] = "local length error",
95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
97 [IB_WC_LOC_PROT_ERR] = "local protection error",
98 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
99 [IB_WC_MW_BIND_ERR] = "memory bind operation error",
100 [IB_WC_BAD_RESP_ERR] = "bad response error",
101 [IB_WC_LOC_ACCESS_ERR] = "local access error",
102 [IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
103 [IB_WC_REM_ACCESS_ERR] = "remote access error",
104 [IB_WC_REM_OP_ERR] = "remote operation error",
105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
109 [IB_WC_REM_ABORT_ERR] = "operation aborted",
110 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
112 [IB_WC_FATAL_ERR] = "fatal error",
113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
114 [IB_WC_GENERAL_ERR] = "general error",
117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
119 size_t index = status;
121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
122 wc_statuses[index] : "unrecognized status";
124 EXPORT_SYMBOL(ib_wc_status_msg);
126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
129 case IB_RATE_2_5_GBPS: return 1;
130 case IB_RATE_5_GBPS: return 2;
131 case IB_RATE_10_GBPS: return 4;
132 case IB_RATE_20_GBPS: return 8;
133 case IB_RATE_30_GBPS: return 12;
134 case IB_RATE_40_GBPS: return 16;
135 case IB_RATE_60_GBPS: return 24;
136 case IB_RATE_80_GBPS: return 32;
137 case IB_RATE_120_GBPS: return 48;
138 case IB_RATE_14_GBPS: return 6;
139 case IB_RATE_56_GBPS: return 22;
140 case IB_RATE_112_GBPS: return 45;
141 case IB_RATE_168_GBPS: return 67;
142 case IB_RATE_25_GBPS: return 10;
143 case IB_RATE_100_GBPS: return 40;
144 case IB_RATE_200_GBPS: return 80;
145 case IB_RATE_300_GBPS: return 120;
146 case IB_RATE_28_GBPS: return 11;
147 case IB_RATE_50_GBPS: return 20;
148 case IB_RATE_400_GBPS: return 160;
149 case IB_RATE_600_GBPS: return 240;
150 case IB_RATE_800_GBPS: return 320;
154 EXPORT_SYMBOL(ib_rate_to_mult);
156 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
159 case 1: return IB_RATE_2_5_GBPS;
160 case 2: return IB_RATE_5_GBPS;
161 case 4: return IB_RATE_10_GBPS;
162 case 8: return IB_RATE_20_GBPS;
163 case 12: return IB_RATE_30_GBPS;
164 case 16: return IB_RATE_40_GBPS;
165 case 24: return IB_RATE_60_GBPS;
166 case 32: return IB_RATE_80_GBPS;
167 case 48: return IB_RATE_120_GBPS;
168 case 6: return IB_RATE_14_GBPS;
169 case 22: return IB_RATE_56_GBPS;
170 case 45: return IB_RATE_112_GBPS;
171 case 67: return IB_RATE_168_GBPS;
172 case 10: return IB_RATE_25_GBPS;
173 case 40: return IB_RATE_100_GBPS;
174 case 80: return IB_RATE_200_GBPS;
175 case 120: return IB_RATE_300_GBPS;
176 case 11: return IB_RATE_28_GBPS;
177 case 20: return IB_RATE_50_GBPS;
178 case 160: return IB_RATE_400_GBPS;
179 case 240: return IB_RATE_600_GBPS;
180 case 320: return IB_RATE_800_GBPS;
181 default: return IB_RATE_PORT_CURRENT;
184 EXPORT_SYMBOL(mult_to_ib_rate);
186 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
189 case IB_RATE_2_5_GBPS: return 2500;
190 case IB_RATE_5_GBPS: return 5000;
191 case IB_RATE_10_GBPS: return 10000;
192 case IB_RATE_20_GBPS: return 20000;
193 case IB_RATE_30_GBPS: return 30000;
194 case IB_RATE_40_GBPS: return 40000;
195 case IB_RATE_60_GBPS: return 60000;
196 case IB_RATE_80_GBPS: return 80000;
197 case IB_RATE_120_GBPS: return 120000;
198 case IB_RATE_14_GBPS: return 14062;
199 case IB_RATE_56_GBPS: return 56250;
200 case IB_RATE_112_GBPS: return 112500;
201 case IB_RATE_168_GBPS: return 168750;
202 case IB_RATE_25_GBPS: return 25781;
203 case IB_RATE_100_GBPS: return 103125;
204 case IB_RATE_200_GBPS: return 206250;
205 case IB_RATE_300_GBPS: return 309375;
206 case IB_RATE_28_GBPS: return 28125;
207 case IB_RATE_50_GBPS: return 53125;
208 case IB_RATE_400_GBPS: return 425000;
209 case IB_RATE_600_GBPS: return 637500;
210 case IB_RATE_800_GBPS: return 850000;
214 EXPORT_SYMBOL(ib_rate_to_mbps);
216 __attribute_const__ enum rdma_transport_type
217 rdma_node_get_transport(unsigned int node_type)
220 if (node_type == RDMA_NODE_USNIC)
221 return RDMA_TRANSPORT_USNIC;
222 if (node_type == RDMA_NODE_USNIC_UDP)
223 return RDMA_TRANSPORT_USNIC_UDP;
224 if (node_type == RDMA_NODE_RNIC)
225 return RDMA_TRANSPORT_IWARP;
226 if (node_type == RDMA_NODE_UNSPECIFIED)
227 return RDMA_TRANSPORT_UNSPECIFIED;
229 return RDMA_TRANSPORT_IB;
231 EXPORT_SYMBOL(rdma_node_get_transport);
233 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
236 enum rdma_transport_type lt;
237 if (device->ops.get_link_layer)
238 return device->ops.get_link_layer(device, port_num);
240 lt = rdma_node_get_transport(device->node_type);
241 if (lt == RDMA_TRANSPORT_IB)
242 return IB_LINK_LAYER_INFINIBAND;
244 return IB_LINK_LAYER_ETHERNET;
246 EXPORT_SYMBOL(rdma_port_get_link_layer);
248 /* Protection domains */
251 * __ib_alloc_pd - Allocates an unused protection domain.
252 * @device: The device on which to allocate the protection domain.
253 * @flags: protection domain flags
254 * @caller: caller's build-time module name
256 * A protection domain object provides an association between QPs, shared
257 * receive queues, address handles, memory regions, and memory windows.
259 * Every PD has a local_dma_lkey which can be used as the lkey value for local
262 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
266 int mr_access_flags = 0;
269 pd = rdma_zalloc_drv_obj(device, ib_pd);
271 return ERR_PTR(-ENOMEM);
276 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
277 rdma_restrack_set_name(&pd->res, caller);
279 ret = device->ops.alloc_pd(pd, NULL);
281 rdma_restrack_put(&pd->res);
285 rdma_restrack_add(&pd->res);
287 if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
288 pd->local_dma_lkey = device->local_dma_lkey;
290 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
292 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
293 pr_warn("%s: enabling unsafe global rkey\n", caller);
294 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
297 if (mr_access_flags) {
300 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
306 mr->device = pd->device;
308 mr->type = IB_MR_TYPE_DMA;
310 mr->need_inval = false;
312 pd->__internal_mr = mr;
314 if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
315 pd->local_dma_lkey = pd->__internal_mr->lkey;
317 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
318 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
323 EXPORT_SYMBOL(__ib_alloc_pd);
326 * ib_dealloc_pd_user - Deallocates a protection domain.
327 * @pd: The protection domain to deallocate.
328 * @udata: Valid user data or NULL for kernel object
330 * It is an error to call this function while any resources in the pd still
331 * exist. The caller is responsible to synchronously destroy them and
332 * guarantee no new allocations will happen.
334 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
338 if (pd->__internal_mr) {
339 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
341 pd->__internal_mr = NULL;
344 ret = pd->device->ops.dealloc_pd(pd, udata);
348 rdma_restrack_del(&pd->res);
352 EXPORT_SYMBOL(ib_dealloc_pd_user);
354 /* Address handles */
357 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
358 * @dest: Pointer to destination ah_attr. Contents of the destination
359 * pointer is assumed to be invalid and attribute are overwritten.
360 * @src: Pointer to source ah_attr.
362 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
363 const struct rdma_ah_attr *src)
366 if (dest->grh.sgid_attr)
367 rdma_hold_gid_attr(dest->grh.sgid_attr);
369 EXPORT_SYMBOL(rdma_copy_ah_attr);
372 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
373 * @old: Pointer to existing ah_attr which needs to be replaced.
374 * old is assumed to be valid or zero'd
375 * @new: Pointer to the new ah_attr.
377 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
378 * old the ah_attr is valid; after that it copies the new attribute and holds
379 * the reference to the replaced ah_attr.
381 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
382 const struct rdma_ah_attr *new)
384 rdma_destroy_ah_attr(old);
386 if (old->grh.sgid_attr)
387 rdma_hold_gid_attr(old->grh.sgid_attr);
389 EXPORT_SYMBOL(rdma_replace_ah_attr);
392 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
393 * @dest: Pointer to destination ah_attr to copy to.
394 * dest is assumed to be valid or zero'd
395 * @src: Pointer to the new ah_attr.
397 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
398 * if it is valid. This also transfers ownership of internal references from
399 * src to dest, making src invalid in the process. No new reference of the src
402 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
404 rdma_destroy_ah_attr(dest);
406 src->grh.sgid_attr = NULL;
408 EXPORT_SYMBOL(rdma_move_ah_attr);
411 * Validate that the rdma_ah_attr is valid for the device before passing it
414 static int rdma_check_ah_attr(struct ib_device *device,
415 struct rdma_ah_attr *ah_attr)
417 if (!rdma_is_port_valid(device, ah_attr->port_num))
420 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
421 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
422 !(ah_attr->ah_flags & IB_AH_GRH))
425 if (ah_attr->grh.sgid_attr) {
427 * Make sure the passed sgid_attr is consistent with the
430 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
431 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
438 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
439 * On success the caller is responsible to call rdma_unfill_sgid_attr().
441 static int rdma_fill_sgid_attr(struct ib_device *device,
442 struct rdma_ah_attr *ah_attr,
443 const struct ib_gid_attr **old_sgid_attr)
445 const struct ib_gid_attr *sgid_attr;
446 struct ib_global_route *grh;
449 *old_sgid_attr = ah_attr->grh.sgid_attr;
451 ret = rdma_check_ah_attr(device, ah_attr);
455 if (!(ah_attr->ah_flags & IB_AH_GRH))
458 grh = rdma_ah_retrieve_grh(ah_attr);
463 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
464 if (IS_ERR(sgid_attr))
465 return PTR_ERR(sgid_attr);
467 /* Move ownerhip of the kref into the ah_attr */
468 grh->sgid_attr = sgid_attr;
472 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
473 const struct ib_gid_attr *old_sgid_attr)
476 * Fill didn't change anything, the caller retains ownership of
479 if (ah_attr->grh.sgid_attr == old_sgid_attr)
483 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
484 * doesn't see any change in the rdma_ah_attr. If we get here
485 * old_sgid_attr is NULL.
487 rdma_destroy_ah_attr(ah_attr);
490 static const struct ib_gid_attr *
491 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
492 const struct ib_gid_attr *old_attr)
495 rdma_put_gid_attr(old_attr);
496 if (ah_attr->ah_flags & IB_AH_GRH) {
497 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
498 return ah_attr->grh.sgid_attr;
503 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
504 struct rdma_ah_attr *ah_attr,
506 struct ib_udata *udata,
507 struct net_device *xmit_slave)
509 struct rdma_ah_init_attr init_attr = {};
510 struct ib_device *device = pd->device;
514 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
516 if (!udata && !device->ops.create_ah)
517 return ERR_PTR(-EOPNOTSUPP);
519 ah = rdma_zalloc_drv_obj_gfp(
521 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
523 return ERR_PTR(-ENOMEM);
527 ah->type = ah_attr->type;
528 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
529 init_attr.ah_attr = ah_attr;
530 init_attr.flags = flags;
531 init_attr.xmit_slave = xmit_slave;
534 ret = device->ops.create_user_ah(ah, &init_attr, udata);
536 ret = device->ops.create_ah(ah, &init_attr, NULL);
539 rdma_put_gid_attr(ah->sgid_attr);
544 atomic_inc(&pd->usecnt);
549 * rdma_create_ah - Creates an address handle for the
550 * given address vector.
551 * @pd: The protection domain associated with the address handle.
552 * @ah_attr: The attributes of the address vector.
553 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
555 * It returns 0 on success and returns appropriate error code on error.
556 * The address handle is used to reference a local or global destination
557 * in all UD QP post sends.
559 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
562 const struct ib_gid_attr *old_sgid_attr;
563 struct net_device *slave;
567 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
570 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
571 (flags & RDMA_CREATE_AH_SLEEPABLE) ?
572 GFP_KERNEL : GFP_ATOMIC);
574 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
575 return (void *)slave;
577 ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
578 rdma_lag_put_ah_roce_slave(slave);
579 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
582 EXPORT_SYMBOL(rdma_create_ah);
585 * rdma_create_user_ah - Creates an address handle for the
586 * given address vector.
587 * It resolves destination mac address for ah attribute of RoCE type.
588 * @pd: The protection domain associated with the address handle.
589 * @ah_attr: The attributes of the address vector.
590 * @udata: pointer to user's input output buffer information need by
593 * It returns 0 on success and returns appropriate error code on error.
594 * The address handle is used to reference a local or global destination
595 * in all UD QP post sends.
597 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
598 struct rdma_ah_attr *ah_attr,
599 struct ib_udata *udata)
601 const struct ib_gid_attr *old_sgid_attr;
605 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
609 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
610 err = ib_resolve_eth_dmac(pd->device, ah_attr);
617 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
621 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
624 EXPORT_SYMBOL(rdma_create_user_ah);
626 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
628 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
629 struct iphdr ip4h_checked;
630 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
632 /* If it's IPv6, the version must be 6, otherwise, the first
633 * 20 bytes (before the IPv4 header) are garbled.
635 if (ip6h->version != 6)
636 return (ip4h->version == 4) ? 4 : 0;
637 /* version may be 6 or 4 because the first 20 bytes could be garbled */
639 /* RoCE v2 requires no options, thus header length
646 * We can't write on scattered buffers so we need to copy to
649 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
650 ip4h_checked.check = 0;
651 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
652 /* if IPv4 header checksum is OK, believe it */
653 if (ip4h->check == ip4h_checked.check)
657 EXPORT_SYMBOL(ib_get_rdma_header_version);
659 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
661 const struct ib_grh *grh)
665 if (rdma_protocol_ib(device, port_num))
666 return RDMA_NETWORK_IB;
668 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
670 if (grh_version == 4)
671 return RDMA_NETWORK_IPV4;
673 if (grh->next_hdr == IPPROTO_UDP)
674 return RDMA_NETWORK_IPV6;
676 return RDMA_NETWORK_ROCE_V1;
679 struct find_gid_index_context {
681 enum ib_gid_type gid_type;
684 static bool find_gid_index(const union ib_gid *gid,
685 const struct ib_gid_attr *gid_attr,
688 struct find_gid_index_context *ctx = context;
689 u16 vlan_id = 0xffff;
692 if (ctx->gid_type != gid_attr->gid_type)
695 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
699 return ctx->vlan_id == vlan_id;
702 static const struct ib_gid_attr *
703 get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
704 u16 vlan_id, const union ib_gid *sgid,
705 enum ib_gid_type gid_type)
707 struct find_gid_index_context context = {.vlan_id = vlan_id,
708 .gid_type = gid_type};
710 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
714 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
715 enum rdma_network_type net_type,
716 union ib_gid *sgid, union ib_gid *dgid)
718 struct sockaddr_in src_in;
719 struct sockaddr_in dst_in;
720 __be32 src_saddr, dst_saddr;
725 if (net_type == RDMA_NETWORK_IPV4) {
726 memcpy(&src_in.sin_addr.s_addr,
727 &hdr->roce4grh.saddr, 4);
728 memcpy(&dst_in.sin_addr.s_addr,
729 &hdr->roce4grh.daddr, 4);
730 src_saddr = src_in.sin_addr.s_addr;
731 dst_saddr = dst_in.sin_addr.s_addr;
732 ipv6_addr_set_v4mapped(src_saddr,
733 (struct in6_addr *)sgid);
734 ipv6_addr_set_v4mapped(dst_saddr,
735 (struct in6_addr *)dgid);
737 } else if (net_type == RDMA_NETWORK_IPV6 ||
738 net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
739 *dgid = hdr->ibgrh.dgid;
740 *sgid = hdr->ibgrh.sgid;
746 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
748 /* Resolve destination mac address and hop limit for unicast destination
749 * GID entry, considering the source GID entry as well.
750 * ah_attribute must have valid port_num, sgid_index.
752 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
753 struct rdma_ah_attr *ah_attr)
755 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
756 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
757 int hop_limit = 0xff;
760 /* If destination is link local and source GID is RoCEv1,
761 * IP stack is not used.
763 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
764 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
765 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
770 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
772 sgid_attr, &hop_limit);
774 grh->hop_limit = hop_limit;
779 * This function initializes address handle attributes from the incoming packet.
780 * Incoming packet has dgid of the receiver node on which this code is
781 * getting executed and, sgid contains the GID of the sender.
783 * When resolving mac address of destination, the arrived dgid is used
784 * as sgid and, sgid is used as dgid because sgid contains destinations
785 * GID whom to respond to.
787 * On success the caller is responsible to call rdma_destroy_ah_attr on the
790 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
791 const struct ib_wc *wc, const struct ib_grh *grh,
792 struct rdma_ah_attr *ah_attr)
796 enum rdma_network_type net_type = RDMA_NETWORK_IB;
797 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
798 const struct ib_gid_attr *sgid_attr;
805 memset(ah_attr, 0, sizeof *ah_attr);
806 ah_attr->type = rdma_ah_find_type(device, port_num);
807 if (rdma_cap_eth_ah(device, port_num)) {
808 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
809 net_type = wc->network_hdr_type;
811 net_type = ib_get_net_type_by_grh(device, port_num, grh);
812 gid_type = ib_network_to_gid_type(net_type);
814 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
819 rdma_ah_set_sl(ah_attr, wc->sl);
820 rdma_ah_set_port_num(ah_attr, port_num);
822 if (rdma_protocol_roce(device, port_num)) {
823 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
824 wc->vlan_id : 0xffff;
826 if (!(wc->wc_flags & IB_WC_GRH))
829 sgid_attr = get_sgid_attr_from_eth(device, port_num,
832 if (IS_ERR(sgid_attr))
833 return PTR_ERR(sgid_attr);
835 flow_class = be32_to_cpu(grh->version_tclass_flow);
836 rdma_move_grh_sgid_attr(ah_attr,
838 flow_class & 0xFFFFF,
840 (flow_class >> 20) & 0xFF,
843 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
845 rdma_destroy_ah_attr(ah_attr);
849 rdma_ah_set_dlid(ah_attr, wc->slid);
850 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
852 if ((wc->wc_flags & IB_WC_GRH) == 0)
855 if (dgid.global.interface_id !=
856 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
857 sgid_attr = rdma_find_gid_by_port(
858 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
860 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
862 if (IS_ERR(sgid_attr))
863 return PTR_ERR(sgid_attr);
864 flow_class = be32_to_cpu(grh->version_tclass_flow);
865 rdma_move_grh_sgid_attr(ah_attr,
867 flow_class & 0xFFFFF,
869 (flow_class >> 20) & 0xFF,
875 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
878 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
881 * @attr: Pointer to AH attribute structure
882 * @dgid: Destination GID
883 * @flow_label: Flow label
884 * @hop_limit: Hop limit
885 * @traffic_class: traffic class
886 * @sgid_attr: Pointer to SGID attribute
888 * This takes ownership of the sgid_attr reference. The caller must ensure
889 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
890 * calling this function.
892 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
893 u32 flow_label, u8 hop_limit, u8 traffic_class,
894 const struct ib_gid_attr *sgid_attr)
896 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
898 attr->grh.sgid_attr = sgid_attr;
900 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
903 * rdma_destroy_ah_attr - Release reference to SGID attribute of
905 * @ah_attr: Pointer to ah attribute
907 * Release reference to the SGID attribute of the ah attribute if it is
908 * non NULL. It is safe to call this multiple times, and safe to call it on
909 * a zero initialized ah_attr.
911 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
913 if (ah_attr->grh.sgid_attr) {
914 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
915 ah_attr->grh.sgid_attr = NULL;
918 EXPORT_SYMBOL(rdma_destroy_ah_attr);
920 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
921 const struct ib_grh *grh, u32 port_num)
923 struct rdma_ah_attr ah_attr;
927 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
931 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
933 rdma_destroy_ah_attr(&ah_attr);
936 EXPORT_SYMBOL(ib_create_ah_from_wc);
938 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
940 const struct ib_gid_attr *old_sgid_attr;
943 if (ah->type != ah_attr->type)
946 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
950 ret = ah->device->ops.modify_ah ?
951 ah->device->ops.modify_ah(ah, ah_attr) :
954 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
955 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
958 EXPORT_SYMBOL(rdma_modify_ah);
960 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
962 ah_attr->grh.sgid_attr = NULL;
964 return ah->device->ops.query_ah ?
965 ah->device->ops.query_ah(ah, ah_attr) :
968 EXPORT_SYMBOL(rdma_query_ah);
970 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
972 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
976 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
980 ret = ah->device->ops.destroy_ah(ah, flags);
984 atomic_dec(&pd->usecnt);
986 rdma_put_gid_attr(sgid_attr);
991 EXPORT_SYMBOL(rdma_destroy_ah_user);
993 /* Shared receive queues */
996 * ib_create_srq_user - Creates a SRQ associated with the specified protection
998 * @pd: The protection domain associated with the SRQ.
999 * @srq_init_attr: A list of initial attributes required to create the
1000 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1001 * the actual capabilities of the created SRQ.
1002 * @uobject: uobject pointer if this is not a kernel SRQ
1003 * @udata: udata pointer if this is not a kernel SRQ
1005 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1006 * requested size of the SRQ, and set to the actual values allocated
1007 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1008 * will always be at least as large as the requested values.
1010 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1011 struct ib_srq_init_attr *srq_init_attr,
1012 struct ib_usrq_object *uobject,
1013 struct ib_udata *udata)
1018 srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1020 return ERR_PTR(-ENOMEM);
1022 srq->device = pd->device;
1024 srq->event_handler = srq_init_attr->event_handler;
1025 srq->srq_context = srq_init_attr->srq_context;
1026 srq->srq_type = srq_init_attr->srq_type;
1027 srq->uobject = uobject;
1029 if (ib_srq_has_cq(srq->srq_type)) {
1030 srq->ext.cq = srq_init_attr->ext.cq;
1031 atomic_inc(&srq->ext.cq->usecnt);
1033 if (srq->srq_type == IB_SRQT_XRC) {
1034 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1035 if (srq->ext.xrc.xrcd)
1036 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1038 atomic_inc(&pd->usecnt);
1040 rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1041 rdma_restrack_parent_name(&srq->res, &pd->res);
1043 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1045 rdma_restrack_put(&srq->res);
1046 atomic_dec(&pd->usecnt);
1047 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1048 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1049 if (ib_srq_has_cq(srq->srq_type))
1050 atomic_dec(&srq->ext.cq->usecnt);
1052 return ERR_PTR(ret);
1055 rdma_restrack_add(&srq->res);
1059 EXPORT_SYMBOL(ib_create_srq_user);
1061 int ib_modify_srq(struct ib_srq *srq,
1062 struct ib_srq_attr *srq_attr,
1063 enum ib_srq_attr_mask srq_attr_mask)
1065 return srq->device->ops.modify_srq ?
1066 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1067 NULL) : -EOPNOTSUPP;
1069 EXPORT_SYMBOL(ib_modify_srq);
1071 int ib_query_srq(struct ib_srq *srq,
1072 struct ib_srq_attr *srq_attr)
1074 return srq->device->ops.query_srq ?
1075 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1077 EXPORT_SYMBOL(ib_query_srq);
1079 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1083 if (atomic_read(&srq->usecnt))
1086 ret = srq->device->ops.destroy_srq(srq, udata);
1090 atomic_dec(&srq->pd->usecnt);
1091 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1092 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1093 if (ib_srq_has_cq(srq->srq_type))
1094 atomic_dec(&srq->ext.cq->usecnt);
1095 rdma_restrack_del(&srq->res);
1100 EXPORT_SYMBOL(ib_destroy_srq_user);
1104 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1106 struct ib_qp *qp = context;
1107 unsigned long flags;
1109 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1110 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1111 if (event->element.qp->event_handler)
1112 event->element.qp->event_handler(event, event->element.qp->qp_context);
1113 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1116 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1117 void (*event_handler)(struct ib_event *, void *),
1121 unsigned long flags;
1124 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1126 return ERR_PTR(-ENOMEM);
1128 qp->real_qp = real_qp;
1129 err = ib_open_shared_qp_security(qp, real_qp->device);
1132 return ERR_PTR(err);
1135 qp->real_qp = real_qp;
1136 atomic_inc(&real_qp->usecnt);
1137 qp->device = real_qp->device;
1138 qp->event_handler = event_handler;
1139 qp->qp_context = qp_context;
1140 qp->qp_num = real_qp->qp_num;
1141 qp->qp_type = real_qp->qp_type;
1143 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1144 list_add(&qp->open_list, &real_qp->open_list);
1145 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1150 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1151 struct ib_qp_open_attr *qp_open_attr)
1153 struct ib_qp *qp, *real_qp;
1155 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1156 return ERR_PTR(-EINVAL);
1158 down_read(&xrcd->tgt_qps_rwsem);
1159 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1161 up_read(&xrcd->tgt_qps_rwsem);
1162 return ERR_PTR(-EINVAL);
1164 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1165 qp_open_attr->qp_context);
1166 up_read(&xrcd->tgt_qps_rwsem);
1169 EXPORT_SYMBOL(ib_open_qp);
1171 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1172 struct ib_qp_init_attr *qp_init_attr)
1174 struct ib_qp *real_qp = qp;
1177 qp->event_handler = __ib_shared_qp_event_handler;
1178 qp->qp_context = qp;
1180 qp->send_cq = qp->recv_cq = NULL;
1182 qp->xrcd = qp_init_attr->xrcd;
1183 atomic_inc(&qp_init_attr->xrcd->usecnt);
1184 INIT_LIST_HEAD(&qp->open_list);
1186 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1187 qp_init_attr->qp_context);
1191 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1192 real_qp, GFP_KERNEL));
1195 return ERR_PTR(err);
1200 static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1201 struct ib_qp_init_attr *attr,
1202 struct ib_udata *udata,
1203 struct ib_uqp_object *uobj, const char *caller)
1205 struct ib_udata dummy = {};
1209 if (!dev->ops.create_qp)
1210 return ERR_PTR(-EOPNOTSUPP);
1212 qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1214 return ERR_PTR(-ENOMEM);
1221 qp->qp_type = attr->qp_type;
1222 qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1223 qp->srq = attr->srq;
1224 qp->event_handler = attr->event_handler;
1225 qp->port = attr->port_num;
1226 qp->qp_context = attr->qp_context;
1228 spin_lock_init(&qp->mr_lock);
1229 INIT_LIST_HEAD(&qp->rdma_mrs);
1230 INIT_LIST_HEAD(&qp->sig_mrs);
1232 qp->send_cq = attr->send_cq;
1233 qp->recv_cq = attr->recv_cq;
1235 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1236 WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1237 rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1238 ret = dev->ops.create_qp(qp, attr, udata);
1243 * TODO: The mlx4 internally overwrites send_cq and recv_cq.
1244 * Unfortunately, it is not an easy task to fix that driver.
1246 qp->send_cq = attr->send_cq;
1247 qp->recv_cq = attr->recv_cq;
1249 ret = ib_create_qp_security(qp, dev);
1253 rdma_restrack_add(&qp->res);
1257 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1259 rdma_restrack_put(&qp->res);
1261 return ERR_PTR(ret);
1266 * ib_create_qp_user - Creates a QP associated with the specified protection
1269 * @pd: The protection domain associated with the QP.
1270 * @attr: A list of initial attributes required to create the
1271 * QP. If QP creation succeeds, then the attributes are updated to
1272 * the actual capabilities of the created QP.
1274 * @uobj: uverbs obect
1275 * @caller: caller's build-time module name
1277 struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1278 struct ib_qp_init_attr *attr,
1279 struct ib_udata *udata,
1280 struct ib_uqp_object *uobj, const char *caller)
1282 struct ib_qp *qp, *xrc_qp;
1284 if (attr->qp_type == IB_QPT_XRC_TGT)
1285 qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1287 qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1288 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1291 xrc_qp = create_xrc_qp_user(qp, attr);
1292 if (IS_ERR(xrc_qp)) {
1297 xrc_qp->uobject = uobj;
1300 EXPORT_SYMBOL(ib_create_qp_user);
1302 void ib_qp_usecnt_inc(struct ib_qp *qp)
1305 atomic_inc(&qp->pd->usecnt);
1307 atomic_inc(&qp->send_cq->usecnt);
1309 atomic_inc(&qp->recv_cq->usecnt);
1311 atomic_inc(&qp->srq->usecnt);
1312 if (qp->rwq_ind_tbl)
1313 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1315 EXPORT_SYMBOL(ib_qp_usecnt_inc);
1317 void ib_qp_usecnt_dec(struct ib_qp *qp)
1319 if (qp->rwq_ind_tbl)
1320 atomic_dec(&qp->rwq_ind_tbl->usecnt);
1322 atomic_dec(&qp->srq->usecnt);
1324 atomic_dec(&qp->recv_cq->usecnt);
1326 atomic_dec(&qp->send_cq->usecnt);
1328 atomic_dec(&qp->pd->usecnt);
1330 EXPORT_SYMBOL(ib_qp_usecnt_dec);
1332 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1333 struct ib_qp_init_attr *qp_init_attr,
1336 struct ib_device *device = pd->device;
1341 * If the callers is using the RDMA API calculate the resources
1342 * needed for the RDMA READ/WRITE operations.
1344 * Note that these callers need to pass in a port number.
1346 if (qp_init_attr->cap.max_rdma_ctxs)
1347 rdma_rw_init_qp(device, qp_init_attr);
1349 qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1353 ib_qp_usecnt_inc(qp);
1355 if (qp_init_attr->cap.max_rdma_ctxs) {
1356 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1362 * Note: all hw drivers guarantee that max_send_sge is lower than
1363 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1364 * max_send_sge <= max_sge_rd.
1366 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1367 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1368 device->attrs.max_sge_rd);
1369 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1370 qp->integrity_en = true;
1376 return ERR_PTR(ret);
1379 EXPORT_SYMBOL(ib_create_qp_kernel);
1381 static const struct {
1383 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1384 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1385 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1387 [IB_QPS_RESET] = { .valid = 1 },
1391 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1394 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1395 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1397 IB_QP_ACCESS_FLAGS),
1398 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1400 IB_QP_ACCESS_FLAGS),
1401 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1403 IB_QP_ACCESS_FLAGS),
1404 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1406 IB_QP_ACCESS_FLAGS),
1407 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1409 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1415 [IB_QPS_RESET] = { .valid = 1 },
1416 [IB_QPS_ERR] = { .valid = 1 },
1420 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1423 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1425 IB_QP_ACCESS_FLAGS),
1426 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1428 IB_QP_ACCESS_FLAGS),
1429 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1431 IB_QP_ACCESS_FLAGS),
1432 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1434 IB_QP_ACCESS_FLAGS),
1435 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1437 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1444 [IB_QPT_UC] = (IB_QP_AV |
1448 [IB_QPT_RC] = (IB_QP_AV |
1452 IB_QP_MAX_DEST_RD_ATOMIC |
1453 IB_QP_MIN_RNR_TIMER),
1454 [IB_QPT_XRC_INI] = (IB_QP_AV |
1458 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1462 IB_QP_MAX_DEST_RD_ATOMIC |
1463 IB_QP_MIN_RNR_TIMER),
1466 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1468 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1469 IB_QP_ACCESS_FLAGS |
1471 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1472 IB_QP_ACCESS_FLAGS |
1474 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1475 IB_QP_ACCESS_FLAGS |
1477 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1478 IB_QP_ACCESS_FLAGS |
1480 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1482 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1488 [IB_QPS_RESET] = { .valid = 1 },
1489 [IB_QPS_ERR] = { .valid = 1 },
1493 [IB_QPT_UD] = IB_QP_SQ_PSN,
1494 [IB_QPT_UC] = IB_QP_SQ_PSN,
1495 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1499 IB_QP_MAX_QP_RD_ATOMIC),
1500 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1504 IB_QP_MAX_QP_RD_ATOMIC),
1505 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1507 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1508 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1511 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1513 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1515 IB_QP_ACCESS_FLAGS |
1516 IB_QP_PATH_MIG_STATE),
1517 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1519 IB_QP_ACCESS_FLAGS |
1520 IB_QP_MIN_RNR_TIMER |
1521 IB_QP_PATH_MIG_STATE),
1522 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1524 IB_QP_ACCESS_FLAGS |
1525 IB_QP_PATH_MIG_STATE),
1526 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1528 IB_QP_ACCESS_FLAGS |
1529 IB_QP_MIN_RNR_TIMER |
1530 IB_QP_PATH_MIG_STATE),
1531 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1533 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1535 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1540 [IB_QPS_RESET] = { .valid = 1 },
1541 [IB_QPS_ERR] = { .valid = 1 },
1545 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1547 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1548 IB_QP_ACCESS_FLAGS |
1550 IB_QP_PATH_MIG_STATE),
1551 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1552 IB_QP_ACCESS_FLAGS |
1554 IB_QP_PATH_MIG_STATE |
1555 IB_QP_MIN_RNR_TIMER),
1556 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1557 IB_QP_ACCESS_FLAGS |
1559 IB_QP_PATH_MIG_STATE),
1560 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1561 IB_QP_ACCESS_FLAGS |
1563 IB_QP_PATH_MIG_STATE |
1564 IB_QP_MIN_RNR_TIMER),
1565 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1567 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1569 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1575 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1576 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1577 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1578 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1579 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1580 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1581 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1586 [IB_QPS_RESET] = { .valid = 1 },
1587 [IB_QPS_ERR] = { .valid = 1 },
1591 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1593 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1595 IB_QP_ACCESS_FLAGS |
1596 IB_QP_PATH_MIG_STATE),
1597 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1599 IB_QP_ACCESS_FLAGS |
1600 IB_QP_MIN_RNR_TIMER |
1601 IB_QP_PATH_MIG_STATE),
1602 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1604 IB_QP_ACCESS_FLAGS |
1605 IB_QP_PATH_MIG_STATE),
1606 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1608 IB_QP_ACCESS_FLAGS |
1609 IB_QP_MIN_RNR_TIMER |
1610 IB_QP_PATH_MIG_STATE),
1611 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1613 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1620 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1622 [IB_QPT_UC] = (IB_QP_AV |
1624 IB_QP_ACCESS_FLAGS |
1626 IB_QP_PATH_MIG_STATE),
1627 [IB_QPT_RC] = (IB_QP_PORT |
1632 IB_QP_MAX_QP_RD_ATOMIC |
1633 IB_QP_MAX_DEST_RD_ATOMIC |
1635 IB_QP_ACCESS_FLAGS |
1637 IB_QP_MIN_RNR_TIMER |
1638 IB_QP_PATH_MIG_STATE),
1639 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1644 IB_QP_MAX_QP_RD_ATOMIC |
1646 IB_QP_ACCESS_FLAGS |
1648 IB_QP_PATH_MIG_STATE),
1649 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1652 IB_QP_MAX_DEST_RD_ATOMIC |
1654 IB_QP_ACCESS_FLAGS |
1656 IB_QP_MIN_RNR_TIMER |
1657 IB_QP_PATH_MIG_STATE),
1658 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1660 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1666 [IB_QPS_RESET] = { .valid = 1 },
1667 [IB_QPS_ERR] = { .valid = 1 },
1671 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1673 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1674 IB_QP_ACCESS_FLAGS),
1675 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1677 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1683 [IB_QPS_RESET] = { .valid = 1 },
1684 [IB_QPS_ERR] = { .valid = 1 }
1688 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1689 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1691 enum ib_qp_attr_mask req_param, opt_param;
1693 if (mask & IB_QP_CUR_STATE &&
1694 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1695 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1698 if (!qp_state_table[cur_state][next_state].valid)
1701 req_param = qp_state_table[cur_state][next_state].req_param[type];
1702 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1704 if ((mask & req_param) != req_param)
1707 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1712 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1715 * ib_resolve_eth_dmac - Resolve destination mac address
1716 * @device: Device to consider
1717 * @ah_attr: address handle attribute which describes the
1718 * source and destination parameters
1719 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1720 * returns 0 on success or appropriate error code. It initializes the
1721 * necessary ah_attr fields when call is successful.
1723 static int ib_resolve_eth_dmac(struct ib_device *device,
1724 struct rdma_ah_attr *ah_attr)
1728 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1729 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1732 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1733 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1735 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1736 (char *)ah_attr->roce.dmac);
1739 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1744 static bool is_qp_type_connected(const struct ib_qp *qp)
1746 return (qp->qp_type == IB_QPT_UC ||
1747 qp->qp_type == IB_QPT_RC ||
1748 qp->qp_type == IB_QPT_XRC_INI ||
1749 qp->qp_type == IB_QPT_XRC_TGT);
1753 * IB core internal function to perform QP attributes modification.
1755 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1756 int attr_mask, struct ib_udata *udata)
1758 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1759 const struct ib_gid_attr *old_sgid_attr_av;
1760 const struct ib_gid_attr *old_sgid_attr_alt_av;
1763 attr->xmit_slave = NULL;
1764 if (attr_mask & IB_QP_AV) {
1765 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1770 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1771 is_qp_type_connected(qp)) {
1772 struct net_device *slave;
1775 * If the user provided the qp_attr then we have to
1776 * resolve it. Kerne users have to provide already
1777 * resolved rdma_ah_attr's.
1780 ret = ib_resolve_eth_dmac(qp->device,
1785 slave = rdma_lag_get_ah_roce_slave(qp->device,
1788 if (IS_ERR(slave)) {
1789 ret = PTR_ERR(slave);
1792 attr->xmit_slave = slave;
1795 if (attr_mask & IB_QP_ALT_PATH) {
1797 * FIXME: This does not track the migration state, so if the
1798 * user loads a new alternate path after the HW has migrated
1799 * from primary->alternate we will keep the wrong
1800 * references. This is OK for IB because the reference
1801 * counting does not serve any functional purpose.
1803 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1804 &old_sgid_attr_alt_av);
1809 * Today the core code can only handle alternate paths and APM
1810 * for IB. Ban them in roce mode.
1812 if (!(rdma_protocol_ib(qp->device,
1813 attr->alt_ah_attr.port_num) &&
1814 rdma_protocol_ib(qp->device, port))) {
1820 if (rdma_ib_or_roce(qp->device, port)) {
1821 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1822 dev_warn(&qp->device->dev,
1823 "%s rq_psn overflow, masking to 24 bits\n",
1825 attr->rq_psn &= 0xffffff;
1828 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1829 dev_warn(&qp->device->dev,
1830 " %s sq_psn overflow, masking to 24 bits\n",
1832 attr->sq_psn &= 0xffffff;
1837 * Bind this qp to a counter automatically based on the rdma counter
1838 * rules. This only set in RST2INIT with port specified
1840 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1841 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1842 rdma_counter_bind_qp_auto(qp, attr->port_num);
1844 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1848 if (attr_mask & IB_QP_PORT)
1849 qp->port = attr->port_num;
1850 if (attr_mask & IB_QP_AV)
1852 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1853 if (attr_mask & IB_QP_ALT_PATH)
1854 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1855 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1858 if (attr_mask & IB_QP_ALT_PATH)
1859 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1861 if (attr_mask & IB_QP_AV) {
1862 rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1863 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1869 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1870 * @ib_qp: The QP to modify.
1871 * @attr: On input, specifies the QP attributes to modify. On output,
1872 * the current values of selected QP attributes are returned.
1873 * @attr_mask: A bit-mask used to specify which attributes of the QP
1874 * are being modified.
1875 * @udata: pointer to user's input output buffer information
1876 * are being modified.
1877 * It returns 0 on success and returns appropriate error code on error.
1879 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1880 int attr_mask, struct ib_udata *udata)
1882 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1884 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1886 static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
1887 u16 *speed, u8 *width)
1890 if (netdev_speed <= SPEED_1000) {
1891 *width = IB_WIDTH_1X;
1892 *speed = IB_SPEED_SDR;
1893 } else if (netdev_speed <= SPEED_10000) {
1894 *width = IB_WIDTH_1X;
1895 *speed = IB_SPEED_FDR10;
1896 } else if (netdev_speed <= SPEED_20000) {
1897 *width = IB_WIDTH_4X;
1898 *speed = IB_SPEED_DDR;
1899 } else if (netdev_speed <= SPEED_25000) {
1900 *width = IB_WIDTH_1X;
1901 *speed = IB_SPEED_EDR;
1902 } else if (netdev_speed <= SPEED_40000) {
1903 *width = IB_WIDTH_4X;
1904 *speed = IB_SPEED_FDR10;
1905 } else if (netdev_speed <= SPEED_50000) {
1906 *width = IB_WIDTH_2X;
1907 *speed = IB_SPEED_EDR;
1908 } else if (netdev_speed <= SPEED_100000) {
1909 *width = IB_WIDTH_4X;
1910 *speed = IB_SPEED_EDR;
1911 } else if (netdev_speed <= SPEED_200000) {
1912 *width = IB_WIDTH_4X;
1913 *speed = IB_SPEED_HDR;
1915 *width = IB_WIDTH_4X;
1916 *speed = IB_SPEED_NDR;
1924 *width = IB_WIDTH_1X;
1927 *width = IB_WIDTH_2X;
1930 *width = IB_WIDTH_4X;
1933 *width = IB_WIDTH_8X;
1936 *width = IB_WIDTH_12X;
1939 *width = IB_WIDTH_1X;
1942 switch (netdev_speed / lanes) {
1944 *speed = IB_SPEED_SDR;
1947 *speed = IB_SPEED_DDR;
1950 *speed = IB_SPEED_FDR10;
1953 *speed = IB_SPEED_FDR;
1956 *speed = IB_SPEED_EDR;
1959 *speed = IB_SPEED_HDR;
1962 *speed = IB_SPEED_NDR;
1965 *speed = IB_SPEED_SDR;
1969 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1973 struct net_device *netdev;
1974 struct ethtool_link_ksettings lksettings = {};
1976 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1979 netdev = ib_device_get_netdev(dev, port_num);
1984 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1989 if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1990 netdev_speed = lksettings.base.speed;
1992 netdev_speed = SPEED_1000;
1994 pr_warn("%s speed is unknown, defaulting to %u\n",
1995 netdev->name, netdev_speed);
1998 ib_get_width_and_speed(netdev_speed, lksettings.lanes,
2003 EXPORT_SYMBOL(ib_get_eth_speed);
2005 int ib_modify_qp(struct ib_qp *qp,
2006 struct ib_qp_attr *qp_attr,
2009 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
2011 EXPORT_SYMBOL(ib_modify_qp);
2013 int ib_query_qp(struct ib_qp *qp,
2014 struct ib_qp_attr *qp_attr,
2016 struct ib_qp_init_attr *qp_init_attr)
2018 qp_attr->ah_attr.grh.sgid_attr = NULL;
2019 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
2021 return qp->device->ops.query_qp ?
2022 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
2023 qp_init_attr) : -EOPNOTSUPP;
2025 EXPORT_SYMBOL(ib_query_qp);
2027 int ib_close_qp(struct ib_qp *qp)
2029 struct ib_qp *real_qp;
2030 unsigned long flags;
2032 real_qp = qp->real_qp;
2036 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
2037 list_del(&qp->open_list);
2038 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
2040 atomic_dec(&real_qp->usecnt);
2042 ib_close_shared_qp_security(qp->qp_sec);
2047 EXPORT_SYMBOL(ib_close_qp);
2049 static int __ib_destroy_shared_qp(struct ib_qp *qp)
2051 struct ib_xrcd *xrcd;
2052 struct ib_qp *real_qp;
2055 real_qp = qp->real_qp;
2056 xrcd = real_qp->xrcd;
2057 down_write(&xrcd->tgt_qps_rwsem);
2059 if (atomic_read(&real_qp->usecnt) == 0)
2060 xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
2063 up_write(&xrcd->tgt_qps_rwsem);
2066 ret = ib_destroy_qp(real_qp);
2068 atomic_dec(&xrcd->usecnt);
2074 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2076 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2077 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2078 struct ib_qp_security *sec;
2081 WARN_ON_ONCE(qp->mrs_used > 0);
2083 if (atomic_read(&qp->usecnt))
2086 if (qp->real_qp != qp)
2087 return __ib_destroy_shared_qp(qp);
2091 ib_destroy_qp_security_begin(sec);
2094 rdma_rw_cleanup_mrs(qp);
2096 rdma_counter_unbind_qp(qp, true);
2097 ret = qp->device->ops.destroy_qp(qp, udata);
2100 ib_destroy_qp_security_abort(sec);
2104 if (alt_path_sgid_attr)
2105 rdma_put_gid_attr(alt_path_sgid_attr);
2107 rdma_put_gid_attr(av_sgid_attr);
2109 ib_qp_usecnt_dec(qp);
2111 ib_destroy_qp_security_end(sec);
2113 rdma_restrack_del(&qp->res);
2117 EXPORT_SYMBOL(ib_destroy_qp_user);
2119 /* Completion queues */
2121 struct ib_cq *__ib_create_cq(struct ib_device *device,
2122 ib_comp_handler comp_handler,
2123 void (*event_handler)(struct ib_event *, void *),
2125 const struct ib_cq_init_attr *cq_attr,
2131 cq = rdma_zalloc_drv_obj(device, ib_cq);
2133 return ERR_PTR(-ENOMEM);
2135 cq->device = device;
2137 cq->comp_handler = comp_handler;
2138 cq->event_handler = event_handler;
2139 cq->cq_context = cq_context;
2140 atomic_set(&cq->usecnt, 0);
2142 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2143 rdma_restrack_set_name(&cq->res, caller);
2145 ret = device->ops.create_cq(cq, cq_attr, NULL);
2147 rdma_restrack_put(&cq->res);
2149 return ERR_PTR(ret);
2152 rdma_restrack_add(&cq->res);
2155 EXPORT_SYMBOL(__ib_create_cq);
2157 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2162 return cq->device->ops.modify_cq ?
2163 cq->device->ops.modify_cq(cq, cq_count,
2164 cq_period) : -EOPNOTSUPP;
2166 EXPORT_SYMBOL(rdma_set_cq_moderation);
2168 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2172 if (WARN_ON_ONCE(cq->shared))
2175 if (atomic_read(&cq->usecnt))
2178 ret = cq->device->ops.destroy_cq(cq, udata);
2182 rdma_restrack_del(&cq->res);
2186 EXPORT_SYMBOL(ib_destroy_cq_user);
2188 int ib_resize_cq(struct ib_cq *cq, int cqe)
2193 return cq->device->ops.resize_cq ?
2194 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2196 EXPORT_SYMBOL(ib_resize_cq);
2198 /* Memory regions */
2200 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2201 u64 virt_addr, int access_flags)
2205 if (access_flags & IB_ACCESS_ON_DEMAND) {
2206 if (!(pd->device->attrs.kernel_cap_flags &
2207 IBK_ON_DEMAND_PAGING)) {
2208 pr_debug("ODP support not available\n");
2209 return ERR_PTR(-EINVAL);
2213 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2214 access_flags, NULL);
2219 mr->device = pd->device;
2220 mr->type = IB_MR_TYPE_USER;
2223 atomic_inc(&pd->usecnt);
2224 mr->iova = virt_addr;
2225 mr->length = length;
2227 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2228 rdma_restrack_parent_name(&mr->res, &pd->res);
2229 rdma_restrack_add(&mr->res);
2233 EXPORT_SYMBOL(ib_reg_user_mr);
2235 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2236 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2238 if (!pd->device->ops.advise_mr)
2244 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2247 EXPORT_SYMBOL(ib_advise_mr);
2249 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2251 struct ib_pd *pd = mr->pd;
2252 struct ib_dm *dm = mr->dm;
2253 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2257 rdma_restrack_del(&mr->res);
2258 ret = mr->device->ops.dereg_mr(mr, udata);
2260 atomic_dec(&pd->usecnt);
2262 atomic_dec(&dm->usecnt);
2268 EXPORT_SYMBOL(ib_dereg_mr_user);
2271 * ib_alloc_mr() - Allocates a memory region
2272 * @pd: protection domain associated with the region
2273 * @mr_type: memory region type
2274 * @max_num_sg: maximum sg entries available for registration.
2277 * Memory registeration page/sg lists must not exceed max_num_sg.
2278 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2279 * max_num_sg * used_page_size.
2282 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2287 if (!pd->device->ops.alloc_mr) {
2288 mr = ERR_PTR(-EOPNOTSUPP);
2292 if (mr_type == IB_MR_TYPE_INTEGRITY) {
2294 mr = ERR_PTR(-EINVAL);
2298 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2302 mr->device = pd->device;
2306 atomic_inc(&pd->usecnt);
2307 mr->need_inval = false;
2309 mr->sig_attrs = NULL;
2311 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2312 rdma_restrack_parent_name(&mr->res, &pd->res);
2313 rdma_restrack_add(&mr->res);
2315 trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2318 EXPORT_SYMBOL(ib_alloc_mr);
2321 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2322 * @pd: protection domain associated with the region
2323 * @max_num_data_sg: maximum data sg entries available for registration
2324 * @max_num_meta_sg: maximum metadata sg entries available for
2328 * Memory registration page/sg lists must not exceed max_num_sg,
2329 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2332 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2333 u32 max_num_data_sg,
2334 u32 max_num_meta_sg)
2337 struct ib_sig_attrs *sig_attrs;
2339 if (!pd->device->ops.alloc_mr_integrity ||
2340 !pd->device->ops.map_mr_sg_pi) {
2341 mr = ERR_PTR(-EOPNOTSUPP);
2345 if (!max_num_meta_sg) {
2346 mr = ERR_PTR(-EINVAL);
2350 sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2352 mr = ERR_PTR(-ENOMEM);
2356 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2363 mr->device = pd->device;
2367 atomic_inc(&pd->usecnt);
2368 mr->need_inval = false;
2369 mr->type = IB_MR_TYPE_INTEGRITY;
2370 mr->sig_attrs = sig_attrs;
2372 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2373 rdma_restrack_parent_name(&mr->res, &pd->res);
2374 rdma_restrack_add(&mr->res);
2376 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2379 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2381 /* Multicast groups */
2383 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2385 struct ib_qp_init_attr init_attr = {};
2386 struct ib_qp_attr attr = {};
2387 int num_eth_ports = 0;
2390 /* If QP state >= init, it is assigned to a port and we can check this
2393 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2394 if (attr.qp_state >= IB_QPS_INIT) {
2395 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2396 IB_LINK_LAYER_INFINIBAND)
2402 /* Can't get a quick answer, iterate over all ports */
2403 rdma_for_each_port(qp->device, port)
2404 if (rdma_port_get_link_layer(qp->device, port) !=
2405 IB_LINK_LAYER_INFINIBAND)
2408 /* If we have at lease one Ethernet port, RoCE annex declares that
2409 * multicast LID should be ignored. We can't tell at this step if the
2410 * QP belongs to an IB or Ethernet port.
2415 /* If all the ports are IB, we can check according to IB spec. */
2417 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2418 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2421 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2425 if (!qp->device->ops.attach_mcast)
2428 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2429 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2432 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2434 atomic_inc(&qp->usecnt);
2437 EXPORT_SYMBOL(ib_attach_mcast);
2439 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2443 if (!qp->device->ops.detach_mcast)
2446 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2447 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2450 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2452 atomic_dec(&qp->usecnt);
2455 EXPORT_SYMBOL(ib_detach_mcast);
2458 * ib_alloc_xrcd_user - Allocates an XRC domain.
2459 * @device: The device on which to allocate the XRC domain.
2460 * @inode: inode to connect XRCD
2461 * @udata: Valid user data or NULL for kernel object
2463 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2464 struct inode *inode, struct ib_udata *udata)
2466 struct ib_xrcd *xrcd;
2469 if (!device->ops.alloc_xrcd)
2470 return ERR_PTR(-EOPNOTSUPP);
2472 xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2474 return ERR_PTR(-ENOMEM);
2476 xrcd->device = device;
2477 xrcd->inode = inode;
2478 atomic_set(&xrcd->usecnt, 0);
2479 init_rwsem(&xrcd->tgt_qps_rwsem);
2480 xa_init(&xrcd->tgt_qps);
2482 ret = device->ops.alloc_xrcd(xrcd, udata);
2488 return ERR_PTR(ret);
2490 EXPORT_SYMBOL(ib_alloc_xrcd_user);
2493 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2494 * @xrcd: The XRC domain to deallocate.
2495 * @udata: Valid user data or NULL for kernel object
2497 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2501 if (atomic_read(&xrcd->usecnt))
2504 WARN_ON(!xa_empty(&xrcd->tgt_qps));
2505 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2511 EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2514 * ib_create_wq - Creates a WQ associated with the specified protection
2516 * @pd: The protection domain associated with the WQ.
2517 * @wq_attr: A list of initial attributes required to create the
2518 * WQ. If WQ creation succeeds, then the attributes are updated to
2519 * the actual capabilities of the created WQ.
2521 * wq_attr->max_wr and wq_attr->max_sge determine
2522 * the requested size of the WQ, and set to the actual values allocated
2524 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2525 * at least as large as the requested values.
2527 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2528 struct ib_wq_init_attr *wq_attr)
2532 if (!pd->device->ops.create_wq)
2533 return ERR_PTR(-EOPNOTSUPP);
2535 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2537 wq->event_handler = wq_attr->event_handler;
2538 wq->wq_context = wq_attr->wq_context;
2539 wq->wq_type = wq_attr->wq_type;
2540 wq->cq = wq_attr->cq;
2541 wq->device = pd->device;
2544 atomic_inc(&pd->usecnt);
2545 atomic_inc(&wq_attr->cq->usecnt);
2546 atomic_set(&wq->usecnt, 0);
2550 EXPORT_SYMBOL(ib_create_wq);
2553 * ib_destroy_wq_user - Destroys the specified user WQ.
2554 * @wq: The WQ to destroy.
2555 * @udata: Valid user data
2557 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2559 struct ib_cq *cq = wq->cq;
2560 struct ib_pd *pd = wq->pd;
2563 if (atomic_read(&wq->usecnt))
2566 ret = wq->device->ops.destroy_wq(wq, udata);
2570 atomic_dec(&pd->usecnt);
2571 atomic_dec(&cq->usecnt);
2574 EXPORT_SYMBOL(ib_destroy_wq_user);
2576 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2577 struct ib_mr_status *mr_status)
2579 if (!mr->device->ops.check_mr_status)
2582 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2584 EXPORT_SYMBOL(ib_check_mr_status);
2586 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2589 if (!device->ops.set_vf_link_state)
2592 return device->ops.set_vf_link_state(device, vf, port, state);
2594 EXPORT_SYMBOL(ib_set_vf_link_state);
2596 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2597 struct ifla_vf_info *info)
2599 if (!device->ops.get_vf_config)
2602 return device->ops.get_vf_config(device, vf, port, info);
2604 EXPORT_SYMBOL(ib_get_vf_config);
2606 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2607 struct ifla_vf_stats *stats)
2609 if (!device->ops.get_vf_stats)
2612 return device->ops.get_vf_stats(device, vf, port, stats);
2614 EXPORT_SYMBOL(ib_get_vf_stats);
2616 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2619 if (!device->ops.set_vf_guid)
2622 return device->ops.set_vf_guid(device, vf, port, guid, type);
2624 EXPORT_SYMBOL(ib_set_vf_guid);
2626 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2627 struct ifla_vf_guid *node_guid,
2628 struct ifla_vf_guid *port_guid)
2630 if (!device->ops.get_vf_guid)
2633 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2635 EXPORT_SYMBOL(ib_get_vf_guid);
2637 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2638 * information) and set an appropriate memory region for registration.
2639 * @mr: memory region
2640 * @data_sg: dma mapped scatterlist for data
2641 * @data_sg_nents: number of entries in data_sg
2642 * @data_sg_offset: offset in bytes into data_sg
2643 * @meta_sg: dma mapped scatterlist for metadata
2644 * @meta_sg_nents: number of entries in meta_sg
2645 * @meta_sg_offset: offset in bytes into meta_sg
2646 * @page_size: page vector desired page size
2649 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2651 * Return: 0 on success.
2653 * After this completes successfully, the memory region
2654 * is ready for registration.
2656 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2657 int data_sg_nents, unsigned int *data_sg_offset,
2658 struct scatterlist *meta_sg, int meta_sg_nents,
2659 unsigned int *meta_sg_offset, unsigned int page_size)
2661 if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2662 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2665 mr->page_size = page_size;
2667 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2668 data_sg_offset, meta_sg,
2669 meta_sg_nents, meta_sg_offset);
2671 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2674 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2675 * and set it the memory region.
2676 * @mr: memory region
2677 * @sg: dma mapped scatterlist
2678 * @sg_nents: number of entries in sg
2679 * @sg_offset: offset in bytes into sg
2680 * @page_size: page vector desired page size
2684 * - The first sg element is allowed to have an offset.
2685 * - Each sg element must either be aligned to page_size or virtually
2686 * contiguous to the previous element. In case an sg element has a
2687 * non-contiguous offset, the mapping prefix will not include it.
2688 * - The last sg element is allowed to have length less than page_size.
2689 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2690 * then only max_num_sg entries will be mapped.
2691 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2692 * constraints holds and the page_size argument is ignored.
2694 * Returns the number of sg elements that were mapped to the memory region.
2696 * After this completes successfully, the memory region
2697 * is ready for registration.
2699 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2700 unsigned int *sg_offset, unsigned int page_size)
2702 if (unlikely(!mr->device->ops.map_mr_sg))
2705 mr->page_size = page_size;
2707 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2709 EXPORT_SYMBOL(ib_map_mr_sg);
2712 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2714 * @mr: memory region
2715 * @sgl: dma mapped scatterlist
2716 * @sg_nents: number of entries in sg
2717 * @sg_offset_p: ==== =======================================================
2718 * IN start offset in bytes into sg
2719 * OUT offset in bytes for element n of the sg of the first
2720 * byte that has not been processed where n is the return
2721 * value of this function.
2722 * ==== =======================================================
2723 * @set_page: driver page assignment function pointer
2725 * Core service helper for drivers to convert the largest
2726 * prefix of given sg list to a page vector. The sg list
2727 * prefix converted is the prefix that meet the requirements
2730 * Returns the number of sg elements that were assigned to
2733 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2734 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2736 struct scatterlist *sg;
2737 u64 last_end_dma_addr = 0;
2738 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2739 unsigned int last_page_off = 0;
2740 u64 page_mask = ~((u64)mr->page_size - 1);
2743 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2746 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2749 for_each_sg(sgl, sg, sg_nents, i) {
2750 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2751 u64 prev_addr = dma_addr;
2752 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2753 u64 end_dma_addr = dma_addr + dma_len;
2754 u64 page_addr = dma_addr & page_mask;
2757 * For the second and later elements, check whether either the
2758 * end of element i-1 or the start of element i is not aligned
2759 * on a page boundary.
2761 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2762 /* Stop mapping if there is a gap. */
2763 if (last_end_dma_addr != dma_addr)
2767 * Coalesce this element with the last. If it is small
2768 * enough just update mr->length. Otherwise start
2769 * mapping from the next page.
2775 ret = set_page(mr, page_addr);
2776 if (unlikely(ret < 0)) {
2777 sg_offset = prev_addr - sg_dma_address(sg);
2778 mr->length += prev_addr - dma_addr;
2780 *sg_offset_p = sg_offset;
2781 return i || sg_offset ? i : ret;
2783 prev_addr = page_addr;
2785 page_addr += mr->page_size;
2786 } while (page_addr < end_dma_addr);
2788 mr->length += dma_len;
2789 last_end_dma_addr = end_dma_addr;
2790 last_page_off = end_dma_addr & ~page_mask;
2799 EXPORT_SYMBOL(ib_sg_to_pages);
2801 struct ib_drain_cqe {
2803 struct completion done;
2806 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2808 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2811 complete(&cqe->done);
2815 * Post a WR and block until its completion is reaped for the SQ.
2817 static void __ib_drain_sq(struct ib_qp *qp)
2819 struct ib_cq *cq = qp->send_cq;
2820 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2821 struct ib_drain_cqe sdrain;
2822 struct ib_rdma_wr swr = {
2825 { .wr_cqe = &sdrain.cqe, },
2826 .opcode = IB_WR_RDMA_WRITE,
2831 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2833 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2837 sdrain.cqe.done = ib_drain_qp_done;
2838 init_completion(&sdrain.done);
2840 ret = ib_post_send(qp, &swr.wr, NULL);
2842 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2846 if (cq->poll_ctx == IB_POLL_DIRECT)
2847 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2848 ib_process_cq_direct(cq, -1);
2850 wait_for_completion(&sdrain.done);
2854 * Post a WR and block until its completion is reaped for the RQ.
2856 static void __ib_drain_rq(struct ib_qp *qp)
2858 struct ib_cq *cq = qp->recv_cq;
2859 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2860 struct ib_drain_cqe rdrain;
2861 struct ib_recv_wr rwr = {};
2864 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2866 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2870 rwr.wr_cqe = &rdrain.cqe;
2871 rdrain.cqe.done = ib_drain_qp_done;
2872 init_completion(&rdrain.done);
2874 ret = ib_post_recv(qp, &rwr, NULL);
2876 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2880 if (cq->poll_ctx == IB_POLL_DIRECT)
2881 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2882 ib_process_cq_direct(cq, -1);
2884 wait_for_completion(&rdrain.done);
2888 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2890 * @qp: queue pair to drain
2892 * If the device has a provider-specific drain function, then
2893 * call that. Otherwise call the generic drain function
2898 * ensure there is room in the CQ and SQ for the drain work request and
2901 * allocate the CQ using ib_alloc_cq().
2903 * ensure that there are no other contexts that are posting WRs concurrently.
2904 * Otherwise the drain is not guaranteed.
2906 void ib_drain_sq(struct ib_qp *qp)
2908 if (qp->device->ops.drain_sq)
2909 qp->device->ops.drain_sq(qp);
2912 trace_cq_drain_complete(qp->send_cq);
2914 EXPORT_SYMBOL(ib_drain_sq);
2917 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2919 * @qp: queue pair to drain
2921 * If the device has a provider-specific drain function, then
2922 * call that. Otherwise call the generic drain function
2927 * ensure there is room in the CQ and RQ for the drain work request and
2930 * allocate the CQ using ib_alloc_cq().
2932 * ensure that there are no other contexts that are posting WRs concurrently.
2933 * Otherwise the drain is not guaranteed.
2935 void ib_drain_rq(struct ib_qp *qp)
2937 if (qp->device->ops.drain_rq)
2938 qp->device->ops.drain_rq(qp);
2941 trace_cq_drain_complete(qp->recv_cq);
2943 EXPORT_SYMBOL(ib_drain_rq);
2946 * ib_drain_qp() - Block until all CQEs have been consumed by the
2947 * application on both the RQ and SQ.
2948 * @qp: queue pair to drain
2952 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2955 * allocate the CQs using ib_alloc_cq().
2957 * ensure that there are no other contexts that are posting WRs concurrently.
2958 * Otherwise the drain is not guaranteed.
2960 void ib_drain_qp(struct ib_qp *qp)
2966 EXPORT_SYMBOL(ib_drain_qp);
2968 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2969 enum rdma_netdev_t type, const char *name,
2970 unsigned char name_assign_type,
2971 void (*setup)(struct net_device *))
2973 struct rdma_netdev_alloc_params params;
2974 struct net_device *netdev;
2977 if (!device->ops.rdma_netdev_get_params)
2978 return ERR_PTR(-EOPNOTSUPP);
2980 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2985 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2986 setup, params.txqs, params.rxqs);
2988 return ERR_PTR(-ENOMEM);
2992 EXPORT_SYMBOL(rdma_alloc_netdev);
2994 int rdma_init_netdev(struct ib_device *device, u32 port_num,
2995 enum rdma_netdev_t type, const char *name,
2996 unsigned char name_assign_type,
2997 void (*setup)(struct net_device *),
2998 struct net_device *netdev)
3000 struct rdma_netdev_alloc_params params;
3003 if (!device->ops.rdma_netdev_get_params)
3006 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
3011 return params.initialize_rdma_netdev(device, port_num,
3012 netdev, params.param);
3014 EXPORT_SYMBOL(rdma_init_netdev);
3016 void __rdma_block_iter_start(struct ib_block_iter *biter,
3017 struct scatterlist *sglist, unsigned int nents,
3020 memset(biter, 0, sizeof(struct ib_block_iter));
3021 biter->__sg = sglist;
3022 biter->__sg_nents = nents;
3024 /* Driver provides best block size to use */
3025 biter->__pg_bit = __fls(pgsz);
3027 EXPORT_SYMBOL(__rdma_block_iter_start);
3029 bool __rdma_block_iter_next(struct ib_block_iter *biter)
3031 unsigned int block_offset;
3032 unsigned int sg_delta;
3034 if (!biter->__sg_nents || !biter->__sg)
3037 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
3038 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
3039 sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
3041 if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
3042 biter->__sg_advance += sg_delta;
3044 biter->__sg_advance = 0;
3045 biter->__sg = sg_next(biter->__sg);
3046 biter->__sg_nents--;
3051 EXPORT_SYMBOL(__rdma_block_iter_next);
3054 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
3056 * @descs: array of static descriptors
3057 * @num_counters: number of elements in array
3058 * @lifespan: milliseconds between updates
3060 struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
3061 const struct rdma_stat_desc *descs, int num_counters,
3062 unsigned long lifespan)
3064 struct rdma_hw_stats *stats;
3066 stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
3070 stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
3071 sizeof(*stats->is_disabled), GFP_KERNEL);
3072 if (!stats->is_disabled)
3075 stats->descs = descs;
3076 stats->num_counters = num_counters;
3077 stats->lifespan = msecs_to_jiffies(lifespan);
3078 mutex_init(&stats->lock);
3086 EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
3089 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3090 * @stats: statistics to release
3092 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
3097 kfree(stats->is_disabled);
3100 EXPORT_SYMBOL(rdma_free_hw_stats_struct);