1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/pci.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 #include <linux/string.h>
51 #include "qed_init_ops.h"
55 #include "qed_reg_addr.h"
56 #include <linux/qed/qed_rdma_if.h>
62 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
63 struct qed_bmap *bmap, u32 max_count, char *name)
65 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
67 bmap->max_count = max_count;
69 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
74 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
76 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
80 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
81 struct qed_bmap *bmap, u32 *id_num)
83 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
84 if (*id_num >= bmap->max_count)
87 __set_bit(*id_num, bmap->bitmap);
89 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
95 void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
96 struct qed_bmap *bmap, u32 id_num)
98 if (id_num >= bmap->max_count)
101 __set_bit(id_num, bmap->bitmap);
104 void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
105 struct qed_bmap *bmap, u32 id_num)
109 if (id_num >= bmap->max_count)
112 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
114 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
119 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
123 int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
124 struct qed_bmap *bmap, u32 id_num)
126 if (id_num >= bmap->max_count)
129 return test_bit(id_num, bmap->bitmap);
132 static bool qed_bmap_is_empty(struct qed_bmap *bmap)
134 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
137 u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
139 /* First sb id for RoCE is after all the l2 sb */
140 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
143 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
144 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params)
147 struct qed_rdma_info *p_rdma_info;
148 u32 num_cons, num_tasks;
151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
153 /* Allocate a struct with current pf rdma info */
154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
158 p_hwfn->p_rdma_info = p_rdma_info;
159 p_rdma_info->proto = PROTOCOLID_ROCE;
161 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
164 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
165 p_rdma_info->num_qps = num_cons;
167 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
169 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
171 /* Each MR uses a single task */
172 p_rdma_info->num_mrs = num_tasks;
174 /* Queue zone lines are shared between RoCE and L2 in such a way that
175 * they can be used by each without obstructing the other.
177 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
178 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
180 /* Allocate a struct with device params and fill it */
181 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
182 if (!p_rdma_info->dev)
185 /* Allocate a struct with port params and fill it */
186 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
187 if (!p_rdma_info->port)
190 /* Allocate bit map for pd's */
191 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
194 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
195 "Failed to allocate pd_map, rc = %d\n",
200 /* Allocate DPI bitmap */
201 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
202 p_hwfn->dpi_count, "DPI");
204 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
205 "Failed to allocate DPI bitmap, rc = %d\n", rc);
209 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
210 * twice the number of QPs.
212 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
213 p_rdma_info->num_qps * 2, "CQ");
215 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
216 "Failed to allocate cq bitmap, rc = %d\n", rc);
220 /* Allocate bitmap for toggle bit for cq icids
221 * We toggle the bit every time we create or resize cq for a given icid.
222 * The maximum number of CQs is bounded to twice the number of QPs.
224 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
225 p_rdma_info->num_qps * 2, "Toggle");
227 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
228 "Failed to allocate toogle bits, rc = %d\n", rc);
232 /* Allocate bitmap for itids */
233 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
234 p_rdma_info->num_mrs, "MR");
236 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
237 "Failed to allocate itids bitmaps, rc = %d\n", rc);
238 goto free_toggle_map;
241 /* Allocate bitmap for cids used for qps. */
242 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
245 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
246 "Failed to allocate cid bitmap, rc = %d\n", rc);
250 /* Allocate bitmap for cids used for responders/requesters. */
251 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
254 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
255 "Failed to allocate real cid bitmap, rc = %d\n", rc);
259 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
260 rc = qed_iwarp_alloc(p_hwfn);
265 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
269 kfree(p_rdma_info->cid_map.bitmap);
271 kfree(p_rdma_info->tid_map.bitmap);
273 kfree(p_rdma_info->toggle_bits.bitmap);
275 kfree(p_rdma_info->cq_map.bitmap);
277 kfree(p_rdma_info->dpi_map.bitmap);
279 kfree(p_rdma_info->pd_map.bitmap);
281 kfree(p_rdma_info->port);
283 kfree(p_rdma_info->dev);
290 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
291 struct qed_bmap *bmap, bool check)
293 int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
294 int last_line = bmap->max_count / (64 * 8);
295 int last_item = last_line * 8 +
296 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
297 u64 *pmap = (u64 *)bmap->bitmap;
298 int line, item, offset;
299 u8 str_last_line[200] = { 0 };
301 if (!weight || !check)
305 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
306 bmap->name, bmap->max_count, weight);
308 /* print aligned non-zero lines, if any */
309 for (item = 0, line = 0; line < last_line; line++, item += 8)
310 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
312 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
320 pmap[item + 6], pmap[item + 7]);
322 /* print last unaligned non-zero line, if any */
323 if ((bmap->max_count % (64 * 8)) &&
324 (bitmap_weight((unsigned long *)&pmap[item],
325 bmap->max_count - item * 64))) {
326 offset = sprintf(str_last_line, "line 0x%04x: ", line);
327 for (; item < last_item; item++)
328 offset += sprintf(str_last_line + offset,
329 "0x%016llx ", pmap[item]);
330 DP_NOTICE(p_hwfn, "%s\n", str_last_line);
338 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
340 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
342 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
343 qed_iwarp_resc_free(p_hwfn);
345 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
346 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
347 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
348 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
349 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
350 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
352 kfree(p_rdma_info->port);
353 kfree(p_rdma_info->dev);
358 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
360 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
362 qed_rdma_resc_free(p_hwfn);
363 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
366 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
368 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
369 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
370 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
373 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
374 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
375 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
378 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
379 struct qed_rdma_start_in_params *params)
381 struct qed_rdma_events *events;
383 events = &p_hwfn->p_rdma_info->events;
385 events->unaffiliated_event = params->events->unaffiliated_event;
386 events->affiliated_event = params->events->affiliated_event;
387 events->context = params->events->context;
390 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
391 struct qed_rdma_start_in_params *params)
393 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
394 struct qed_dev *cdev = p_hwfn->cdev;
395 u32 pci_status_control;
398 /* Vendor specific information */
399 dev->vendor_id = cdev->vendor_id;
400 dev->vendor_part_id = cdev->device_id;
401 dev->hw_ver = cdev->chip_rev;
402 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
403 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
405 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
406 dev->node_guid = dev->sys_image_guid;
408 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
409 RDMA_MAX_SGE_PER_RQ_WQE);
411 if (cdev->rdma_max_sge)
412 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
414 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
416 dev->max_inline = (cdev->rdma_max_inline) ?
417 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
420 dev->max_wqe = QED_RDMA_MAX_WQE;
421 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
423 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
424 * it is up-aligned to 16 and then to ILT page size within qed cxt.
425 * This is OK in terms of ILT but we don't want to configure the FW
426 * above its abilities
428 num_qps = ROCE_MAX_QPS;
429 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
430 dev->max_qp = num_qps;
432 /* CQs uses the same icids that QPs use hence they are limited by the
433 * number of icids. There are two icids per QP.
435 dev->max_cq = num_qps * 2;
437 /* The number of mrs is smaller by 1 since the first is reserved */
438 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
439 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
441 /* The maximum CQE capacity per CQ supported.
442 * max number of cqes will be in two layer pbl,
443 * 8 is the pointer size in bytes
444 * 32 is the size of cq element in bytes
446 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
447 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
449 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
452 dev->max_fmr = QED_RDMA_MAX_FMR;
453 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
454 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
455 dev->max_pkey = QED_RDMA_MAX_P_KEY;
457 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
458 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
459 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
460 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
461 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
462 p_hwfn->p_rdma_info->num_qps;
463 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
464 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
465 dev->max_pd = RDMA_MAX_PDS;
466 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
467 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
469 /* Set capablities */
471 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
472 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
473 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
474 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
475 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
476 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
477 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
478 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
480 /* Check atomic operations support in PCI configuration space. */
481 pci_read_config_dword(cdev->pdev,
482 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
483 &pci_status_control);
485 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
486 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
488 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
489 qed_iwarp_init_devinfo(p_hwfn);
492 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
494 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
495 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
497 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
498 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
500 port->max_msg_size = min_t(u64,
501 (dev->max_mr_mw_fmr_size *
502 p_hwfn->cdev->rdma_max_sge),
505 port->pkey_bad_counter = 0;
508 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
512 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
513 p_hwfn->b_rdma_enabled_in_prs = false;
515 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
516 qed_iwarp_init_hw(p_hwfn, p_ptt);
518 rc = qed_roce_init_hw(p_hwfn, p_ptt);
523 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
524 struct qed_rdma_start_in_params *params,
525 struct qed_ptt *p_ptt)
527 struct rdma_init_func_ramrod_data *p_ramrod;
528 struct qed_rdma_cnq_params *p_cnq_pbl_list;
529 struct rdma_init_func_hdr *p_params_header;
530 struct rdma_cnq_params *p_cnq_params;
531 struct qed_sp_init_data init_data;
532 struct qed_spq_entry *p_ent;
537 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
539 /* Save the number of cnqs for the function close ramrod */
540 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
543 memset(&init_data, 0, sizeof(init_data));
544 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
545 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
547 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
548 p_hwfn->p_rdma_info->proto, &init_data);
552 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
553 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
555 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
557 p_params_header = &p_ramrod->params_header;
558 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
560 p_params_header->num_cnqs = params->desired_cnq;
562 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
563 p_params_header->cq_ring_mode = 1;
565 p_params_header->cq_ring_mode = 0;
567 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
568 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
569 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
570 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
571 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
572 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
574 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
575 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
577 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
578 p_cnq_pbl_list->pbl_ptr);
580 /* we assume here that cnq_id and qz_offset are the same */
581 p_cnq_params->queue_zone_num =
582 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
586 return qed_spq_post(p_hwfn, p_ent, NULL);
589 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
591 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
594 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
596 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
597 rc = qed_rdma_bmap_alloc_id(p_hwfn,
598 &p_hwfn->p_rdma_info->tid_map, itid);
599 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
603 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
605 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
609 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
611 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
613 /* The first DPI is reserved for the Kernel */
614 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
616 /* Tid 0 will be used as the key for "reserved MR".
617 * The driver should allocate memory for it so it can be loaded but no
618 * ramrod should be passed on it.
620 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
621 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
623 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
630 static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
631 struct qed_ptt *p_ptt,
632 struct qed_rdma_start_in_params *params)
636 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
638 spin_lock_init(&p_hwfn->p_rdma_info->lock);
640 qed_rdma_init_devinfo(p_hwfn, params);
641 qed_rdma_init_port(p_hwfn);
642 qed_rdma_init_events(p_hwfn, params);
644 rc = qed_rdma_reserve_lkey(p_hwfn);
648 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
652 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
653 rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
657 rc = qed_roce_setup(p_hwfn);
662 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
665 int qed_rdma_stop(void *rdma_cxt)
667 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
668 struct rdma_close_func_ramrod_data *p_ramrod;
669 struct qed_sp_init_data init_data;
670 struct qed_spq_entry *p_ent;
671 struct qed_ptt *p_ptt;
672 u32 ll2_ethertype_en;
675 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
677 p_ptt = qed_ptt_acquire(p_hwfn);
679 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
683 /* Disable RoCE search */
684 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
685 p_hwfn->b_rdma_enabled_in_prs = false;
687 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
689 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
691 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
692 (ll2_ethertype_en & 0xFFFE));
694 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
695 rc = qed_iwarp_stop(p_hwfn, p_ptt);
697 qed_ptt_release(p_hwfn, p_ptt);
701 qed_roce_stop(p_hwfn);
704 qed_ptt_release(p_hwfn, p_ptt);
707 memset(&init_data, 0, sizeof(init_data));
708 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
709 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
712 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
713 p_hwfn->p_rdma_info->proto, &init_data);
717 p_ramrod = &p_ent->ramrod.rdma_close_func;
719 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
720 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
722 rc = qed_spq_post(p_hwfn, p_ent, NULL);
725 qed_rdma_free(p_hwfn);
727 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
731 static int qed_rdma_add_user(void *rdma_cxt,
732 struct qed_rdma_add_user_out_params *out_params)
734 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
735 u32 dpi_start_offset;
739 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
742 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
743 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
745 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
747 out_params->dpi = (u16)returned_id;
749 /* Calculate the corresponding DPI address */
750 dpi_start_offset = p_hwfn->dpi_start_offset;
752 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
754 ((out_params->dpi) * p_hwfn->dpi_size));
756 out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
758 ((out_params->dpi) * p_hwfn->dpi_size);
760 out_params->dpi_size = p_hwfn->dpi_size;
761 out_params->wid_count = p_hwfn->wid_count;
763 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
767 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
769 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
770 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
772 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
774 /* Link may have changed */
775 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
776 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
778 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
780 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
785 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
787 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
789 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
791 /* Return struct with device parameters */
792 return p_hwfn->p_rdma_info->dev;
795 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
797 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
799 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
801 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
802 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
803 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
806 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
808 struct qed_hwfn *p_hwfn;
812 p_hwfn = (struct qed_hwfn *)rdma_cxt;
814 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
816 "queue zone offset %d is too large (max is %d)\n",
817 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
821 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
822 addr = GTT_BAR0_MAP_REG_USDM_RAM +
823 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
825 REG_WR16(p_hwfn, addr, prod);
827 /* keep prod updates ordered */
831 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
832 struct qed_dev_rdma_info *info)
834 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
836 memset(info, 0, sizeof(*info));
838 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
839 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
841 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
843 qed_fill_dev_info(cdev, &info->common);
848 static int qed_rdma_get_sb_start(struct qed_dev *cdev)
852 if (cdev->num_hwfns > 1)
853 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
855 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
861 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
863 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
864 int n_msix = cdev->int_params.rdma_msix_cnt;
866 return min_t(int, n_cnq, n_msix);
869 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
873 /* Mark the fastpath as free/used */
874 cdev->int_params.fp_initialized = cnt ? true : false;
876 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
878 "qed roce supports only MSI-X interrupts (detected %d).\n",
879 cdev->int_params.out.int_mode);
881 } else if (cdev->int_params.fp_msix_cnt) {
882 limit = cdev->int_params.rdma_msix_cnt;
888 return min_t(int, cnt, limit);
891 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
893 memset(info, 0, sizeof(*info));
895 if (!cdev->int_params.fp_initialized) {
897 "Protocol driver requested interrupt information, but its support is not yet configured\n");
901 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
902 int msix_base = cdev->int_params.rdma_msix_base;
904 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
905 info->msix = &cdev->int_params.msix_table[msix_base];
907 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
908 info->msix_cnt, msix_base);
914 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
916 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
920 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
922 /* Allocates an unused protection domain */
923 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
924 rc = qed_rdma_bmap_alloc_id(p_hwfn,
925 &p_hwfn->p_rdma_info->pd_map, &returned_id);
926 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
928 *pd = (u16)returned_id;
930 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
934 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
936 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
938 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
940 /* Returns a previously allocated protection domain for reuse */
941 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
942 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
943 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
946 static enum qed_rdma_toggle_bit
947 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
949 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
950 enum qed_rdma_toggle_bit toggle_bit;
953 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
955 /* the function toggle the bit that is related to a given icid
956 * and returns the new toggle bit's value
958 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
960 spin_lock_bh(&p_info->lock);
961 toggle_bit = !test_and_change_bit(bmap_id,
962 p_info->toggle_bits.bitmap);
963 spin_unlock_bh(&p_info->lock);
965 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
971 static int qed_rdma_create_cq(void *rdma_cxt,
972 struct qed_rdma_create_cq_in_params *params,
975 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
976 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
977 struct rdma_create_cq_ramrod_data *p_ramrod;
978 enum qed_rdma_toggle_bit toggle_bit;
979 struct qed_sp_init_data init_data;
980 struct qed_spq_entry *p_ent;
981 u32 returned_id, start_cid;
984 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
985 params->cq_handle_hi, params->cq_handle_lo);
988 spin_lock_bh(&p_info->lock);
989 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
990 spin_unlock_bh(&p_info->lock);
993 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
997 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
999 *icid = returned_id + start_cid;
1001 /* Check if icid requires a page allocation */
1002 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1007 memset(&init_data, 0, sizeof(init_data));
1008 init_data.cid = *icid;
1009 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1010 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1012 /* Send create CQ ramrod */
1013 rc = qed_sp_init_request(p_hwfn, &p_ent,
1014 RDMA_RAMROD_CREATE_CQ,
1015 p_info->proto, &init_data);
1019 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1021 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1022 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1023 p_ramrod->dpi = cpu_to_le16(params->dpi);
1024 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1025 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1026 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1027 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1028 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1030 p_ramrod->int_timeout = params->int_timeout;
1032 /* toggle the bit for every resize or create cq for a given icid */
1033 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1035 p_ramrod->toggle_bit = toggle_bit;
1037 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1039 /* restore toggle bit */
1040 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1044 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1048 /* release allocated icid */
1049 spin_lock_bh(&p_info->lock);
1050 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1051 spin_unlock_bh(&p_info->lock);
1052 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1058 qed_rdma_destroy_cq(void *rdma_cxt,
1059 struct qed_rdma_destroy_cq_in_params *in_params,
1060 struct qed_rdma_destroy_cq_out_params *out_params)
1062 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1063 struct rdma_destroy_cq_output_params *p_ramrod_res;
1064 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1065 struct qed_sp_init_data init_data;
1066 struct qed_spq_entry *p_ent;
1067 dma_addr_t ramrod_res_phys;
1068 enum protocol_type proto;
1071 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1074 (struct rdma_destroy_cq_output_params *)
1075 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1076 sizeof(struct rdma_destroy_cq_output_params),
1077 &ramrod_res_phys, GFP_KERNEL);
1078 if (!p_ramrod_res) {
1080 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1085 memset(&init_data, 0, sizeof(init_data));
1086 init_data.cid = in_params->icid;
1087 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1088 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1089 proto = p_hwfn->p_rdma_info->proto;
1090 /* Send destroy CQ ramrod */
1091 rc = qed_sp_init_request(p_hwfn, &p_ent,
1092 RDMA_RAMROD_DESTROY_CQ,
1097 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1098 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1100 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1104 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1106 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1107 sizeof(struct rdma_destroy_cq_output_params),
1108 p_ramrod_res, ramrod_res_phys);
1111 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1113 qed_bmap_release_id(p_hwfn,
1114 &p_hwfn->p_rdma_info->cq_map,
1116 qed_cxt_get_proto_cid_start(p_hwfn, proto)));
1118 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1120 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1123 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1124 sizeof(struct rdma_destroy_cq_output_params),
1125 p_ramrod_res, ramrod_res_phys);
1130 void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1132 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1133 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1134 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1137 static int qed_rdma_query_qp(void *rdma_cxt,
1138 struct qed_rdma_qp *qp,
1139 struct qed_rdma_query_qp_out_params *out_params)
1141 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1144 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1146 /* The following fields are filled in from qp and not FW as they can't
1149 out_params->mtu = qp->mtu;
1150 out_params->dest_qp = qp->dest_qp;
1151 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1152 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1153 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1154 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1155 out_params->dgid = qp->dgid;
1156 out_params->flow_label = qp->flow_label;
1157 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1158 out_params->traffic_class_tos = qp->traffic_class_tos;
1159 out_params->timeout = qp->ack_timeout;
1160 out_params->rnr_retry = qp->rnr_retry_cnt;
1161 out_params->retry_cnt = qp->retry_cnt;
1162 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1163 out_params->pkey_index = 0;
1164 out_params->max_rd_atomic = qp->max_rd_atomic_req;
1165 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1166 out_params->sqd_async = qp->sqd_async;
1168 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1169 qed_iwarp_query_qp(qp, out_params);
1171 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1173 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1177 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1179 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1182 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1184 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1185 rc = qed_iwarp_destroy_qp(p_hwfn, qp);
1187 rc = qed_roce_destroy_qp(p_hwfn, qp);
1189 /* free qp params struct */
1192 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1196 static struct qed_rdma_qp *
1197 qed_rdma_create_qp(void *rdma_cxt,
1198 struct qed_rdma_create_qp_in_params *in_params,
1199 struct qed_rdma_create_qp_out_params *out_params)
1201 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1202 struct qed_rdma_qp *qp;
1203 u8 max_stats_queues;
1206 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1207 DP_ERR(p_hwfn->cdev,
1208 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1209 rdma_cxt, in_params, out_params);
1213 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1214 "qed rdma create qp called with qp_handle = %08x%08x\n",
1215 in_params->qp_handle_hi, in_params->qp_handle_lo);
1217 /* Some sanity checks... */
1218 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1219 if (in_params->stats_queue >= max_stats_queues) {
1220 DP_ERR(p_hwfn->cdev,
1221 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1222 in_params->stats_queue, max_stats_queues);
1226 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1227 if (in_params->sq_num_pages * sizeof(struct regpair) >
1228 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1229 DP_NOTICE(p_hwfn->cdev,
1230 "Sq num pages: %d exceeds maximum\n",
1231 in_params->sq_num_pages);
1234 if (in_params->rq_num_pages * sizeof(struct regpair) >
1235 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1236 DP_NOTICE(p_hwfn->cdev,
1237 "Rq num pages: %d exceeds maximum\n",
1238 in_params->rq_num_pages);
1243 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1247 qp->cur_state = QED_ROCE_QP_STATE_RESET;
1248 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1249 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1250 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
1251 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
1252 qp->use_srq = in_params->use_srq;
1253 qp->signal_all = in_params->signal_all;
1254 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1255 qp->pd = in_params->pd;
1256 qp->dpi = in_params->dpi;
1257 qp->sq_cq_id = in_params->sq_cq_id;
1258 qp->sq_num_pages = in_params->sq_num_pages;
1259 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1260 qp->rq_cq_id = in_params->rq_cq_id;
1261 qp->rq_num_pages = in_params->rq_num_pages;
1262 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1263 qp->srq_id = in_params->srq_id;
1264 qp->req_offloaded = false;
1265 qp->resp_offloaded = false;
1266 qp->e2e_flow_control_en = qp->use_srq ? false : true;
1267 qp->stats_queue = in_params->stats_queue;
1269 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1270 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
1271 qp->qpid = qp->icid;
1273 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1274 qp->qpid = ((0xFF << 16) | qp->icid);
1282 out_params->icid = qp->icid;
1283 out_params->qp_id = qp->qpid;
1285 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
1289 static int qed_rdma_modify_qp(void *rdma_cxt,
1290 struct qed_rdma_qp *qp,
1291 struct qed_rdma_modify_qp_in_params *params)
1293 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1294 enum qed_roce_qp_state prev_state;
1297 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
1298 qp->icid, params->new_state);
1301 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1305 if (GET_FIELD(params->modify_flags,
1306 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
1307 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1308 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1309 qp->incoming_atomic_en = params->incoming_atomic_en;
1312 /* Update QP structure with the updated values */
1313 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1314 qp->roce_mode = params->roce_mode;
1315 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
1316 qp->pkey = params->pkey;
1317 if (GET_FIELD(params->modify_flags,
1318 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1319 qp->e2e_flow_control_en = params->e2e_flow_control_en;
1320 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
1321 qp->dest_qp = params->dest_qp;
1322 if (GET_FIELD(params->modify_flags,
1323 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
1324 /* Indicates that the following parameters have changed:
1325 * Traffic class, flow label, hop limit, source GID,
1326 * destination GID, loopback indicator
1328 qp->traffic_class_tos = params->traffic_class_tos;
1329 qp->flow_label = params->flow_label;
1330 qp->hop_limit_ttl = params->hop_limit_ttl;
1332 qp->sgid = params->sgid;
1333 qp->dgid = params->dgid;
1334 qp->udp_src_port = 0;
1335 qp->vlan_id = params->vlan_id;
1336 qp->mtu = params->mtu;
1337 qp->lb_indication = params->lb_indication;
1338 memcpy((u8 *)&qp->remote_mac_addr[0],
1339 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
1340 if (params->use_local_mac) {
1341 memcpy((u8 *)&qp->local_mac_addr[0],
1342 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
1344 memcpy((u8 *)&qp->local_mac_addr[0],
1345 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1348 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
1349 qp->rq_psn = params->rq_psn;
1350 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
1351 qp->sq_psn = params->sq_psn;
1352 if (GET_FIELD(params->modify_flags,
1353 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1354 qp->max_rd_atomic_req = params->max_rd_atomic_req;
1355 if (GET_FIELD(params->modify_flags,
1356 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1357 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1358 if (GET_FIELD(params->modify_flags,
1359 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1360 qp->ack_timeout = params->ack_timeout;
1361 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1362 qp->retry_cnt = params->retry_cnt;
1363 if (GET_FIELD(params->modify_flags,
1364 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1365 qp->rnr_retry_cnt = params->rnr_retry_cnt;
1366 if (GET_FIELD(params->modify_flags,
1367 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1368 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1370 qp->sqd_async = params->sqd_async;
1372 prev_state = qp->cur_state;
1373 if (GET_FIELD(params->modify_flags,
1374 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
1375 qp->cur_state = params->new_state;
1376 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
1380 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1381 enum qed_iwarp_qp_state new_state =
1382 qed_roce2iwarp_state(qp->cur_state);
1384 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1386 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
1389 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1394 qed_rdma_register_tid(void *rdma_cxt,
1395 struct qed_rdma_register_tid_in_params *params)
1397 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1398 struct rdma_register_tid_ramrod_data *p_ramrod;
1399 struct qed_sp_init_data init_data;
1400 struct qed_spq_entry *p_ent;
1401 enum rdma_tid_type tid_type;
1405 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
1408 memset(&init_data, 0, sizeof(init_data));
1409 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1410 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1412 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1413 p_hwfn->p_rdma_info->proto, &init_data);
1415 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1419 if (p_hwfn->p_rdma_info->last_tid < params->itid)
1420 p_hwfn->p_rdma_info->last_tid = params->itid;
1422 p_ramrod = &p_ent->ramrod.rdma_register_tid;
1424 p_ramrod->flags = 0;
1425 SET_FIELD(p_ramrod->flags,
1426 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1427 params->pbl_two_level);
1429 SET_FIELD(p_ramrod->flags,
1430 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
1432 SET_FIELD(p_ramrod->flags,
1433 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
1435 /* Don't initialize D/C field, as it may override other bits. */
1436 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
1437 SET_FIELD(p_ramrod->flags,
1438 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1439 params->page_size_log - 12);
1441 SET_FIELD(p_ramrod->flags,
1442 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1443 params->remote_read);
1445 SET_FIELD(p_ramrod->flags,
1446 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1447 params->remote_write);
1449 SET_FIELD(p_ramrod->flags,
1450 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1451 params->remote_atomic);
1453 SET_FIELD(p_ramrod->flags,
1454 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1455 params->local_write);
1457 SET_FIELD(p_ramrod->flags,
1458 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
1460 SET_FIELD(p_ramrod->flags,
1461 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1464 SET_FIELD(p_ramrod->flags1,
1465 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1466 params->pbl_page_size_log - 12);
1468 SET_FIELD(p_ramrod->flags2,
1469 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
1471 switch (params->tid_type) {
1472 case QED_RDMA_TID_REGISTERED_MR:
1473 tid_type = RDMA_TID_REGISTERED_MR;
1475 case QED_RDMA_TID_FMR:
1476 tid_type = RDMA_TID_FMR;
1478 case QED_RDMA_TID_MW_TYPE1:
1479 tid_type = RDMA_TID_MW_TYPE1;
1481 case QED_RDMA_TID_MW_TYPE2A:
1482 tid_type = RDMA_TID_MW_TYPE2A;
1486 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1489 SET_FIELD(p_ramrod->flags1,
1490 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
1492 p_ramrod->itid = cpu_to_le32(params->itid);
1493 p_ramrod->key = params->key;
1494 p_ramrod->pd = cpu_to_le16(params->pd);
1495 p_ramrod->length_hi = (u8)(params->length >> 32);
1496 p_ramrod->length_lo = DMA_LO_LE(params->length);
1498 /* Lower 32 bits of the registered MR address.
1499 * In case of zero based MR, will hold FBO
1501 p_ramrod->va.hi = 0;
1502 p_ramrod->va.lo = cpu_to_le32(params->fbo);
1504 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1506 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1509 if (params->dif_enabled) {
1510 SET_FIELD(p_ramrod->flags2,
1511 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1512 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1513 params->dif_error_addr);
1514 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
1517 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1521 if (fw_return_code != RDMA_RETURN_OK) {
1522 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1526 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
1530 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
1532 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1533 struct rdma_deregister_tid_ramrod_data *p_ramrod;
1534 struct qed_sp_init_data init_data;
1535 struct qed_spq_entry *p_ent;
1536 struct qed_ptt *p_ptt;
1540 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
1543 memset(&init_data, 0, sizeof(init_data));
1544 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1545 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1547 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
1548 p_hwfn->p_rdma_info->proto, &init_data);
1550 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1554 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1555 p_ramrod->itid = cpu_to_le32(itid);
1557 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1559 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1563 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
1564 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1566 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
1567 /* Bit indicating that the TID is in use and a nig drain is
1568 * required before sending the ramrod again
1570 p_ptt = qed_ptt_acquire(p_hwfn);
1573 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1574 "Failed to acquire PTT\n");
1578 rc = qed_mcp_drain(p_hwfn, p_ptt);
1580 qed_ptt_release(p_hwfn, p_ptt);
1581 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1586 qed_ptt_release(p_hwfn, p_ptt);
1588 /* Resend the ramrod */
1589 rc = qed_sp_init_request(p_hwfn, &p_ent,
1590 RDMA_RAMROD_DEREGISTER_MR,
1591 p_hwfn->p_rdma_info->proto,
1594 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1595 "Failed to init sp-element\n");
1599 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1601 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1606 if (fw_return_code != RDMA_RETURN_OK) {
1607 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
1613 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
1617 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1619 return QED_LEADING_HWFN(cdev);
1622 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1626 /* if rdma info has not been allocated, naturally there are no qps */
1627 if (!p_hwfn->p_rdma_info)
1630 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1631 if (!p_hwfn->p_rdma_info->cid_map.bitmap)
1634 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
1635 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1639 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1643 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1645 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1646 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
1647 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1648 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1652 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1654 p_hwfn->db_bar_no_edpm = true;
1656 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1659 static int qed_rdma_start(void *rdma_cxt,
1660 struct qed_rdma_start_in_params *params)
1662 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1663 struct qed_ptt *p_ptt;
1666 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1667 "desired_cnq = %08x\n", params->desired_cnq);
1669 p_ptt = qed_ptt_acquire(p_hwfn);
1673 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
1677 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
1681 qed_ptt_release(p_hwfn, p_ptt);
1686 qed_rdma_free(p_hwfn);
1688 qed_ptt_release(p_hwfn, p_ptt);
1690 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
1694 static int qed_rdma_init(struct qed_dev *cdev,
1695 struct qed_rdma_start_in_params *params)
1697 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
1700 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
1702 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1704 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
1706 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1707 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
1708 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1711 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
1712 u8 *old_mac_address,
1713 u8 *new_mac_address)
1715 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1716 struct qed_ptt *p_ptt;
1719 p_ptt = qed_ptt_acquire(p_hwfn);
1722 "qed roce ll2 mac filter set: failed to acquire PTT\n");
1726 if (old_mac_address)
1727 qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address);
1728 if (new_mac_address)
1729 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address);
1731 qed_ptt_release(p_hwfn, p_ptt);
1735 "qed roce ll2 mac filter set: failed to add MAC filter\n");
1740 static const struct qed_rdma_ops qed_rdma_ops_pass = {
1741 .common = &qed_common_ops_pass,
1742 .fill_dev_info = &qed_fill_rdma_dev_info,
1743 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
1744 .rdma_init = &qed_rdma_init,
1745 .rdma_add_user = &qed_rdma_add_user,
1746 .rdma_remove_user = &qed_rdma_remove_user,
1747 .rdma_stop = &qed_rdma_stop,
1748 .rdma_query_port = &qed_rdma_query_port,
1749 .rdma_query_device = &qed_rdma_query_device,
1750 .rdma_get_start_sb = &qed_rdma_get_sb_start,
1751 .rdma_get_rdma_int = &qed_rdma_get_int,
1752 .rdma_set_rdma_int = &qed_rdma_set_int,
1753 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
1754 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
1755 .rdma_alloc_pd = &qed_rdma_alloc_pd,
1756 .rdma_dealloc_pd = &qed_rdma_free_pd,
1757 .rdma_create_cq = &qed_rdma_create_cq,
1758 .rdma_destroy_cq = &qed_rdma_destroy_cq,
1759 .rdma_create_qp = &qed_rdma_create_qp,
1760 .rdma_modify_qp = &qed_rdma_modify_qp,
1761 .rdma_query_qp = &qed_rdma_query_qp,
1762 .rdma_destroy_qp = &qed_rdma_destroy_qp,
1763 .rdma_alloc_tid = &qed_rdma_alloc_tid,
1764 .rdma_free_tid = &qed_rdma_free_tid,
1765 .rdma_register_tid = &qed_rdma_register_tid,
1766 .rdma_deregister_tid = &qed_rdma_deregister_tid,
1767 .ll2_acquire_connection = &qed_ll2_acquire_connection,
1768 .ll2_establish_connection = &qed_ll2_establish_connection,
1769 .ll2_terminate_connection = &qed_ll2_terminate_connection,
1770 .ll2_release_connection = &qed_ll2_release_connection,
1771 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
1772 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
1773 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
1774 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
1775 .ll2_get_stats = &qed_ll2_get_stats,
1776 .iwarp_connect = &qed_iwarp_connect,
1777 .iwarp_create_listen = &qed_iwarp_create_listen,
1778 .iwarp_destroy_listen = &qed_iwarp_destroy_listen,
1779 .iwarp_accept = &qed_iwarp_accept,
1780 .iwarp_reject = &qed_iwarp_reject,
1781 .iwarp_send_rtr = &qed_iwarp_send_rtr,
1784 const struct qed_rdma_ops *qed_get_rdma_ops(void)
1786 return &qed_rdma_ops_pass;
1788 EXPORT_SYMBOL(qed_get_rdma_ops);