1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
43 #include <linux/ipv6.h>
44 #include <linux/kernel.h>
45 #include <linux/list.h>
46 #include <linux/module.h>
47 #include <linux/mutex.h>
48 #include <linux/pci.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/tcp.h>
53 #include <linux/bitops.h>
54 #include <linux/qed/qed_roce_if.h>
55 #include <linux/qed/qed_roce_if.h>
60 #include "qed_init_ops.h"
64 #include "qed_reg_addr.h"
69 void qed_async_roce_event(struct qed_hwfn *p_hwfn,
70 struct event_ring_entry *p_eqe)
72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
74 p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
75 p_eqe->opcode, &p_eqe->data);
78 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
79 struct qed_bmap *bmap, u32 max_count)
81 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
83 bmap->max_count = max_count;
85 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
89 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
93 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
98 static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
99 struct qed_bmap *bmap, u32 *id_num)
101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
103 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
105 if (*id_num >= bmap->max_count) {
106 DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
111 __set_bit(*id_num, bmap->bitmap);
116 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
117 struct qed_bmap *bmap, u32 id_num)
121 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
122 if (id_num >= bmap->max_count)
125 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
127 DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
132 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
134 /* First sb id for RoCE is after all the l2 sb */
135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
138 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
139 struct qed_ptt *p_ptt,
140 struct qed_rdma_start_in_params *params)
142 struct qed_rdma_info *p_rdma_info;
143 u32 num_cons, num_tasks;
146 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
148 /* Allocate a struct with current pf rdma info */
149 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
152 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
157 p_hwfn->p_rdma_info = p_rdma_info;
158 p_rdma_info->proto = PROTOCOLID_ROCE;
160 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
163 p_rdma_info->num_qps = num_cons / 2;
165 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
167 /* Each MR uses a single task */
168 p_rdma_info->num_mrs = num_tasks;
170 /* Queue zone lines are shared between RoCE and L2 in such a way that
171 * they can be used by each without obstructing the other.
173 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
175 /* Allocate a struct with device params and fill it */
176 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
177 if (!p_rdma_info->dev) {
179 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
184 /* Allocate a struct with port params and fill it */
185 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
186 if (!p_rdma_info->port) {
188 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
193 /* Allocate bit map for pd's */
194 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
196 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
197 "Failed to allocate pd_map, rc = %d\n",
202 /* Allocate DPI bitmap */
203 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
206 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
207 "Failed to allocate DPI bitmap, rc = %d\n", rc);
211 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
212 * twice the number of QPs.
214 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
215 p_rdma_info->num_qps * 2);
217 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
218 "Failed to allocate cq bitmap, rc = %d\n", rc);
222 /* Allocate bitmap for toggle bit for cq icids
223 * We toggle the bit every time we create or resize cq for a given icid.
224 * The maximum number of CQs is bounded to twice the number of QPs.
226 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
227 p_rdma_info->num_qps * 2);
229 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
230 "Failed to allocate toogle bits, rc = %d\n", rc);
234 /* Allocate bitmap for itids */
235 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
236 p_rdma_info->num_mrs);
238 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
239 "Failed to allocate itids bitmaps, rc = %d\n", rc);
240 goto free_toggle_map;
243 /* Allocate bitmap for cids used for qps. */
244 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
246 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
247 "Failed to allocate cid bitmap, rc = %d\n", rc);
251 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
255 kfree(p_rdma_info->tid_map.bitmap);
257 kfree(p_rdma_info->toggle_bits.bitmap);
259 kfree(p_rdma_info->cq_map.bitmap);
261 kfree(p_rdma_info->dpi_map.bitmap);
263 kfree(p_rdma_info->pd_map.bitmap);
265 kfree(p_rdma_info->port);
267 kfree(p_rdma_info->dev);
274 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
276 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
278 kfree(p_rdma_info->cid_map.bitmap);
279 kfree(p_rdma_info->tid_map.bitmap);
280 kfree(p_rdma_info->toggle_bits.bitmap);
281 kfree(p_rdma_info->cq_map.bitmap);
282 kfree(p_rdma_info->dpi_map.bitmap);
283 kfree(p_rdma_info->pd_map.bitmap);
285 kfree(p_rdma_info->port);
286 kfree(p_rdma_info->dev);
291 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
293 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
295 qed_rdma_resc_free(p_hwfn);
298 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
300 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
301 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
302 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
305 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
306 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
307 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
310 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
311 struct qed_rdma_start_in_params *params)
313 struct qed_rdma_events *events;
315 events = &p_hwfn->p_rdma_info->events;
317 events->unaffiliated_event = params->events->unaffiliated_event;
318 events->affiliated_event = params->events->affiliated_event;
319 events->context = params->events->context;
322 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
323 struct qed_rdma_start_in_params *params)
325 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
326 struct qed_dev *cdev = p_hwfn->cdev;
327 u32 pci_status_control;
330 /* Vendor specific information */
331 dev->vendor_id = cdev->vendor_id;
332 dev->vendor_part_id = cdev->device_id;
334 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
335 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
337 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
338 dev->node_guid = dev->sys_image_guid;
340 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
341 RDMA_MAX_SGE_PER_RQ_WQE);
343 if (cdev->rdma_max_sge)
344 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
346 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
348 dev->max_inline = (cdev->rdma_max_inline) ?
349 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
352 dev->max_wqe = QED_RDMA_MAX_WQE;
353 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
355 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
356 * it is up-aligned to 16 and then to ILT page size within qed cxt.
357 * This is OK in terms of ILT but we don't want to configure the FW
358 * above its abilities
360 num_qps = ROCE_MAX_QPS;
361 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
362 dev->max_qp = num_qps;
364 /* CQs uses the same icids that QPs use hence they are limited by the
365 * number of icids. There are two icids per QP.
367 dev->max_cq = num_qps * 2;
369 /* The number of mrs is smaller by 1 since the first is reserved */
370 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
371 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
373 /* The maximum CQE capacity per CQ supported.
374 * max number of cqes will be in two layer pbl,
375 * 8 is the pointer size in bytes
376 * 32 is the size of cq element in bytes
378 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
379 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
381 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
384 dev->max_fmr = QED_RDMA_MAX_FMR;
385 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
386 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
387 dev->max_pkey = QED_RDMA_MAX_P_KEY;
389 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
390 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
391 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
392 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
393 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
394 p_hwfn->p_rdma_info->num_qps;
395 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
396 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
397 dev->max_pd = RDMA_MAX_PDS;
398 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
399 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
401 /* Set capablities */
403 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
404 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
405 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
406 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
407 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
408 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
409 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
410 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
412 /* Check atomic operations support in PCI configuration space. */
413 pci_read_config_dword(cdev->pdev,
414 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
415 &pci_status_control);
417 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
418 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
421 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
423 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
424 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
426 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
427 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
429 port->max_msg_size = min_t(u64,
430 (dev->max_mr_mw_fmr_size *
431 p_hwfn->cdev->rdma_max_sge),
434 port->pkey_bad_counter = 0;
437 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
439 u32 ll2_ethertype_en;
441 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
442 p_hwfn->b_rdma_enabled_in_prs = false;
444 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
446 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
448 /* We delay writing to this reg until first cid is allocated. See
449 * qed_cxt_dynamic_ilt_alloc function for more details
451 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
452 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
453 (ll2_ethertype_en | 0x01));
455 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
456 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
460 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
464 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
465 struct qed_rdma_start_in_params *params,
466 struct qed_ptt *p_ptt)
468 struct rdma_init_func_ramrod_data *p_ramrod;
469 struct qed_rdma_cnq_params *p_cnq_pbl_list;
470 struct rdma_init_func_hdr *p_params_header;
471 struct rdma_cnq_params *p_cnq_params;
472 struct qed_sp_init_data init_data;
473 struct qed_spq_entry *p_ent;
477 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
479 /* Save the number of cnqs for the function close ramrod */
480 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
483 memset(&init_data, 0, sizeof(init_data));
484 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
485 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
487 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
488 p_hwfn->p_rdma_info->proto, &init_data);
492 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
494 p_params_header = &p_ramrod->params_header;
495 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
497 p_params_header->num_cnqs = params->desired_cnq;
499 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
500 p_params_header->cq_ring_mode = 1;
502 p_params_header->cq_ring_mode = 0;
504 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
505 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
506 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
507 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
508 p_cnq_params->sb_num =
509 cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
511 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
512 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
514 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
515 p_cnq_pbl_list->pbl_ptr);
517 /* we assume here that cnq_id and qz_offset are the same */
518 p_cnq_params->queue_zone_num =
519 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
523 return qed_spq_post(p_hwfn, p_ent, NULL);
526 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
528 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
531 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
533 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
534 rc = qed_rdma_bmap_alloc_id(p_hwfn,
535 &p_hwfn->p_rdma_info->tid_map, itid);
536 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
540 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
542 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
546 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
548 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
550 /* The first DPI is reserved for the Kernel */
551 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
553 /* Tid 0 will be used as the key for "reserved MR".
554 * The driver should allocate memory for it so it can be loaded but no
555 * ramrod should be passed on it.
557 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
558 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
560 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
567 static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
568 struct qed_ptt *p_ptt,
569 struct qed_rdma_start_in_params *params)
573 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
575 spin_lock_init(&p_hwfn->p_rdma_info->lock);
577 qed_rdma_init_devinfo(p_hwfn, params);
578 qed_rdma_init_port(p_hwfn);
579 qed_rdma_init_events(p_hwfn, params);
581 rc = qed_rdma_reserve_lkey(p_hwfn);
585 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
589 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
592 static int qed_rdma_stop(void *rdma_cxt)
594 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
595 struct rdma_close_func_ramrod_data *p_ramrod;
596 struct qed_sp_init_data init_data;
597 struct qed_spq_entry *p_ent;
598 struct qed_ptt *p_ptt;
599 u32 ll2_ethertype_en;
602 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
604 p_ptt = qed_ptt_acquire(p_hwfn);
606 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
610 /* Disable RoCE search */
611 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
612 p_hwfn->b_rdma_enabled_in_prs = false;
614 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
616 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
618 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
619 (ll2_ethertype_en & 0xFFFE));
621 qed_ptt_release(p_hwfn, p_ptt);
624 memset(&init_data, 0, sizeof(init_data));
625 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
626 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
629 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
630 p_hwfn->p_rdma_info->proto, &init_data);
634 p_ramrod = &p_ent->ramrod.rdma_close_func;
636 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
637 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
639 rc = qed_spq_post(p_hwfn, p_ent, NULL);
642 qed_rdma_free(p_hwfn);
644 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
648 static int qed_rdma_add_user(void *rdma_cxt,
649 struct qed_rdma_add_user_out_params *out_params)
651 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
652 u32 dpi_start_offset;
656 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
659 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
660 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
662 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
664 out_params->dpi = (u16)returned_id;
666 /* Calculate the corresponding DPI address */
667 dpi_start_offset = p_hwfn->dpi_start_offset;
669 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
671 ((out_params->dpi) * p_hwfn->dpi_size));
673 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
675 ((out_params->dpi) * p_hwfn->dpi_size);
677 out_params->dpi_size = p_hwfn->dpi_size;
679 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
683 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
686 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
688 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
690 /* Link may have changed */
691 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
692 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
694 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
699 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
701 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
703 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
705 /* Return struct with device parameters */
706 return p_hwfn->p_rdma_info->dev;
709 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
711 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
713 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
715 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
716 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
717 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
720 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
722 struct qed_hwfn *p_hwfn;
726 p_hwfn = (struct qed_hwfn *)rdma_cxt;
727 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
728 addr = GTT_BAR0_MAP_REG_USDM_RAM +
729 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
731 REG_WR16(p_hwfn, addr, prod);
733 /* keep prod updates ordered */
737 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
738 struct qed_dev_rdma_info *info)
740 memset(info, 0, sizeof(*info));
742 info->rdma_type = QED_RDMA_TYPE_ROCE;
744 qed_fill_dev_info(cdev, &info->common);
749 static int qed_rdma_get_sb_start(struct qed_dev *cdev)
753 if (cdev->num_hwfns > 1)
754 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
756 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
762 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
764 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
765 int n_msix = cdev->int_params.rdma_msix_cnt;
767 return min_t(int, n_cnq, n_msix);
770 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
774 /* Mark the fastpath as free/used */
775 cdev->int_params.fp_initialized = cnt ? true : false;
777 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
779 "qed roce supports only MSI-X interrupts (detected %d).\n",
780 cdev->int_params.out.int_mode);
782 } else if (cdev->int_params.fp_msix_cnt) {
783 limit = cdev->int_params.rdma_msix_cnt;
789 return min_t(int, cnt, limit);
792 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
794 memset(info, 0, sizeof(*info));
796 if (!cdev->int_params.fp_initialized) {
798 "Protocol driver requested interrupt information, but its support is not yet configured\n");
802 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
803 int msix_base = cdev->int_params.rdma_msix_base;
805 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
806 info->msix = &cdev->int_params.msix_table[msix_base];
808 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
809 info->msix_cnt, msix_base);
815 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
817 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
821 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
823 /* Allocates an unused protection domain */
824 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
825 rc = qed_rdma_bmap_alloc_id(p_hwfn,
826 &p_hwfn->p_rdma_info->pd_map, &returned_id);
827 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
829 *pd = (u16)returned_id;
831 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
835 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
837 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
839 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
841 /* Returns a previously allocated protection domain for reuse */
842 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
843 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
844 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
847 static enum qed_rdma_toggle_bit
848 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
850 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
851 enum qed_rdma_toggle_bit toggle_bit;
854 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
856 /* the function toggle the bit that is related to a given icid
857 * and returns the new toggle bit's value
859 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
861 spin_lock_bh(&p_info->lock);
862 toggle_bit = !test_and_change_bit(bmap_id,
863 p_info->toggle_bits.bitmap);
864 spin_unlock_bh(&p_info->lock);
866 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
872 static int qed_rdma_create_cq(void *rdma_cxt,
873 struct qed_rdma_create_cq_in_params *params,
876 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
877 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
878 struct rdma_create_cq_ramrod_data *p_ramrod;
879 enum qed_rdma_toggle_bit toggle_bit;
880 struct qed_sp_init_data init_data;
881 struct qed_spq_entry *p_ent;
882 u32 returned_id, start_cid;
885 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
886 params->cq_handle_hi, params->cq_handle_lo);
889 spin_lock_bh(&p_info->lock);
890 rc = qed_rdma_bmap_alloc_id(p_hwfn,
891 &p_info->cq_map, &returned_id);
892 spin_unlock_bh(&p_info->lock);
895 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
899 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
901 *icid = returned_id + start_cid;
903 /* Check if icid requires a page allocation */
904 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
909 memset(&init_data, 0, sizeof(init_data));
910 init_data.cid = *icid;
911 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
912 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
914 /* Send create CQ ramrod */
915 rc = qed_sp_init_request(p_hwfn, &p_ent,
916 RDMA_RAMROD_CREATE_CQ,
917 p_info->proto, &init_data);
921 p_ramrod = &p_ent->ramrod.rdma_create_cq;
923 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
924 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
925 p_ramrod->dpi = cpu_to_le16(params->dpi);
926 p_ramrod->is_two_level_pbl = params->pbl_two_level;
927 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
928 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
929 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
930 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
932 p_ramrod->int_timeout = params->int_timeout;
934 /* toggle the bit for every resize or create cq for a given icid */
935 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
937 p_ramrod->toggle_bit = toggle_bit;
939 rc = qed_spq_post(p_hwfn, p_ent, NULL);
941 /* restore toggle bit */
942 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
946 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
950 /* release allocated icid */
951 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
952 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
958 qed_rdma_destroy_cq(void *rdma_cxt,
959 struct qed_rdma_destroy_cq_in_params *in_params,
960 struct qed_rdma_destroy_cq_out_params *out_params)
962 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
963 struct rdma_destroy_cq_output_params *p_ramrod_res;
964 struct rdma_destroy_cq_ramrod_data *p_ramrod;
965 struct qed_sp_init_data init_data;
966 struct qed_spq_entry *p_ent;
967 dma_addr_t ramrod_res_phys;
970 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
973 (struct rdma_destroy_cq_output_params *)
974 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
975 sizeof(struct rdma_destroy_cq_output_params),
976 &ramrod_res_phys, GFP_KERNEL);
979 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
984 memset(&init_data, 0, sizeof(init_data));
985 init_data.cid = in_params->icid;
986 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
987 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
989 /* Send destroy CQ ramrod */
990 rc = qed_sp_init_request(p_hwfn, &p_ent,
991 RDMA_RAMROD_DESTROY_CQ,
992 p_hwfn->p_rdma_info->proto, &init_data);
996 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
997 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
999 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1003 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1005 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1006 sizeof(struct rdma_destroy_cq_output_params),
1007 p_ramrod_res, ramrod_res_phys);
1010 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1012 qed_bmap_release_id(p_hwfn,
1013 &p_hwfn->p_rdma_info->cq_map,
1015 qed_cxt_get_proto_cid_start(p_hwfn,
1017 p_rdma_info->proto)));
1019 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1021 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1024 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1025 sizeof(struct rdma_destroy_cq_output_params),
1026 p_ramrod_res, ramrod_res_phys);
1031 static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1033 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1034 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1035 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1038 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1043 if (qp->roce_mode == ROCE_V2_IPV4) {
1044 /* The IPv4 addresses shall be aligned to the highest word.
1045 * The lower words must be zero.
1047 memset(src_gid, 0, sizeof(union qed_gid));
1048 memset(dst_gid, 0, sizeof(union qed_gid));
1049 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1050 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1052 /* GIDs and IPv6 addresses coincide in location and size */
1053 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1054 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1055 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1060 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1062 switch (roce_mode) {
1070 return MAX_ROCE_FLAVOR;
1074 static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1076 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1081 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1082 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1085 spin_unlock_bh(&p_rdma_info->lock);
1089 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1092 spin_unlock_bh(&p_rdma_info->lock);
1096 /* the two icid's should be adjacent */
1097 if ((requester_icid - responder_icid) != 1) {
1098 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1103 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1104 p_rdma_info->proto);
1105 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1106 p_rdma_info->proto);
1108 /* If these icids require a new ILT line allocate DMA-able context for
1111 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1115 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1119 *cid = (u16)responder_icid;
1123 spin_lock_bh(&p_rdma_info->lock);
1124 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1125 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1127 spin_unlock_bh(&p_rdma_info->lock);
1128 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1129 "Allocate CID - failed, rc = %d\n", rc);
1133 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1134 struct qed_rdma_qp *qp)
1136 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1137 struct qed_sp_init_data init_data;
1138 union qed_qm_pq_params qm_params;
1139 enum roce_flavor roce_flavor;
1140 struct qed_spq_entry *p_ent;
1141 u16 physical_queue0 = 0;
1144 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1146 /* Allocate DMA-able memory for IRQ */
1147 qp->irq_num_pages = 1;
1148 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1149 RDMA_RING_PAGE_SIZE,
1150 &qp->irq_phys_addr, GFP_KERNEL);
1154 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1160 memset(&init_data, 0, sizeof(init_data));
1161 init_data.cid = qp->icid;
1162 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1163 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1165 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1166 PROTOCOLID_ROCE, &init_data);
1170 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1172 p_ramrod->flags = 0;
1174 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1175 SET_FIELD(p_ramrod->flags,
1176 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1178 SET_FIELD(p_ramrod->flags,
1179 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1180 qp->incoming_rdma_read_en);
1182 SET_FIELD(p_ramrod->flags,
1183 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1184 qp->incoming_rdma_write_en);
1186 SET_FIELD(p_ramrod->flags,
1187 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1188 qp->incoming_atomic_en);
1190 SET_FIELD(p_ramrod->flags,
1191 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1192 qp->e2e_flow_control_en);
1194 SET_FIELD(p_ramrod->flags,
1195 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1197 SET_FIELD(p_ramrod->flags,
1198 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1199 qp->fmr_and_reserved_lkey);
1201 SET_FIELD(p_ramrod->flags,
1202 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1203 qp->min_rnr_nak_timer);
1205 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1206 p_ramrod->traffic_class = qp->traffic_class_tos;
1207 p_ramrod->hop_limit = qp->hop_limit_ttl;
1208 p_ramrod->irq_num_pages = qp->irq_num_pages;
1209 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1210 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1211 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1212 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1213 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1214 p_ramrod->pd = cpu_to_le16(qp->pd);
1215 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1216 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1217 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1218 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1219 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1220 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1221 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1222 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1223 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1224 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1227 memset(&qm_params, 0, sizeof(qm_params));
1228 qm_params.roce.qpid = qp->icid >> 1;
1229 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1231 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1232 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1234 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1235 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1237 p_ramrod->udp_src_port = qp->udp_src_port;
1238 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1239 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1240 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1242 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1245 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1247 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
1248 rc, physical_queue0);
1253 qp->resp_offloaded = true;
1258 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1259 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1260 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1261 qp->irq, qp->irq_phys_addr);
1266 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1267 struct qed_rdma_qp *qp)
1269 struct roce_create_qp_req_ramrod_data *p_ramrod;
1270 struct qed_sp_init_data init_data;
1271 union qed_qm_pq_params qm_params;
1272 enum roce_flavor roce_flavor;
1273 struct qed_spq_entry *p_ent;
1274 u16 physical_queue0 = 0;
1277 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1279 /* Allocate DMA-able memory for ORQ */
1280 qp->orq_num_pages = 1;
1281 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1282 RDMA_RING_PAGE_SIZE,
1283 &qp->orq_phys_addr, GFP_KERNEL);
1287 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1293 memset(&init_data, 0, sizeof(init_data));
1294 init_data.cid = qp->icid + 1;
1295 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1296 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1298 rc = qed_sp_init_request(p_hwfn, &p_ent,
1299 ROCE_RAMROD_CREATE_QP,
1300 PROTOCOLID_ROCE, &init_data);
1304 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1306 p_ramrod->flags = 0;
1308 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1309 SET_FIELD(p_ramrod->flags,
1310 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1312 SET_FIELD(p_ramrod->flags,
1313 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1314 qp->fmr_and_reserved_lkey);
1316 SET_FIELD(p_ramrod->flags,
1317 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1319 SET_FIELD(p_ramrod->flags,
1320 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1322 SET_FIELD(p_ramrod->flags,
1323 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1326 p_ramrod->max_ord = qp->max_rd_atomic_req;
1327 p_ramrod->traffic_class = qp->traffic_class_tos;
1328 p_ramrod->hop_limit = qp->hop_limit_ttl;
1329 p_ramrod->orq_num_pages = qp->orq_num_pages;
1330 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1331 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1332 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1333 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1334 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1335 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1336 p_ramrod->pd = cpu_to_le16(qp->pd);
1337 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1338 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1339 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1340 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1341 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1342 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1343 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1344 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1345 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1346 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1349 memset(&qm_params, 0, sizeof(qm_params));
1350 qm_params.roce.qpid = qp->icid >> 1;
1351 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1353 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1354 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1356 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1357 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1359 p_ramrod->udp_src_port = qp->udp_src_port;
1360 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1361 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1364 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1366 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1371 qp->req_offloaded = true;
1376 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1377 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1378 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1379 qp->orq, qp->orq_phys_addr);
1383 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1384 struct qed_rdma_qp *qp,
1385 bool move_to_err, u32 modify_flags)
1387 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1388 struct qed_sp_init_data init_data;
1389 struct qed_spq_entry *p_ent;
1392 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1394 if (move_to_err && !qp->resp_offloaded)
1398 memset(&init_data, 0, sizeof(init_data));
1399 init_data.cid = qp->icid;
1400 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1401 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1403 rc = qed_sp_init_request(p_hwfn, &p_ent,
1404 ROCE_EVENT_MODIFY_QP,
1405 PROTOCOLID_ROCE, &init_data);
1407 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1411 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1413 p_ramrod->flags = 0;
1415 SET_FIELD(p_ramrod->flags,
1416 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1418 SET_FIELD(p_ramrod->flags,
1419 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1420 qp->incoming_rdma_read_en);
1422 SET_FIELD(p_ramrod->flags,
1423 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1424 qp->incoming_rdma_write_en);
1426 SET_FIELD(p_ramrod->flags,
1427 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1428 qp->incoming_atomic_en);
1430 SET_FIELD(p_ramrod->flags,
1431 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1432 qp->e2e_flow_control_en);
1434 SET_FIELD(p_ramrod->flags,
1435 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1436 GET_FIELD(modify_flags,
1437 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1439 SET_FIELD(p_ramrod->flags,
1440 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1441 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1443 SET_FIELD(p_ramrod->flags,
1444 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1445 GET_FIELD(modify_flags,
1446 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1448 SET_FIELD(p_ramrod->flags,
1449 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1450 GET_FIELD(modify_flags,
1451 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1453 SET_FIELD(p_ramrod->flags,
1454 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1455 GET_FIELD(modify_flags,
1456 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1458 p_ramrod->fields = 0;
1459 SET_FIELD(p_ramrod->fields,
1460 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1461 qp->min_rnr_nak_timer);
1463 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1464 p_ramrod->traffic_class = qp->traffic_class_tos;
1465 p_ramrod->hop_limit = qp->hop_limit_ttl;
1466 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1467 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1468 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1469 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1470 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1472 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1476 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1477 struct qed_rdma_qp *qp,
1479 bool move_to_err, u32 modify_flags)
1481 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1482 struct qed_sp_init_data init_data;
1483 struct qed_spq_entry *p_ent;
1486 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1488 if (move_to_err && !(qp->req_offloaded))
1492 memset(&init_data, 0, sizeof(init_data));
1493 init_data.cid = qp->icid + 1;
1494 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1495 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1497 rc = qed_sp_init_request(p_hwfn, &p_ent,
1498 ROCE_EVENT_MODIFY_QP,
1499 PROTOCOLID_ROCE, &init_data);
1501 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1505 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1507 p_ramrod->flags = 0;
1509 SET_FIELD(p_ramrod->flags,
1510 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1512 SET_FIELD(p_ramrod->flags,
1513 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1515 SET_FIELD(p_ramrod->flags,
1516 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1519 SET_FIELD(p_ramrod->flags,
1520 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1521 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1523 SET_FIELD(p_ramrod->flags,
1524 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1525 GET_FIELD(modify_flags,
1526 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1528 SET_FIELD(p_ramrod->flags,
1529 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1530 GET_FIELD(modify_flags,
1531 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1533 SET_FIELD(p_ramrod->flags,
1534 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1535 GET_FIELD(modify_flags,
1536 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1538 SET_FIELD(p_ramrod->flags,
1539 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1540 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1542 SET_FIELD(p_ramrod->flags,
1543 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1544 GET_FIELD(modify_flags,
1545 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1547 p_ramrod->fields = 0;
1548 SET_FIELD(p_ramrod->fields,
1549 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1551 SET_FIELD(p_ramrod->fields,
1552 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1555 p_ramrod->max_ord = qp->max_rd_atomic_req;
1556 p_ramrod->traffic_class = qp->traffic_class_tos;
1557 p_ramrod->hop_limit = qp->hop_limit_ttl;
1558 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1559 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1560 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1561 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1562 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1563 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1565 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1569 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1570 struct qed_rdma_qp *qp,
1571 u32 *num_invalidated_mw)
1573 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1574 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1575 struct qed_sp_init_data init_data;
1576 struct qed_spq_entry *p_ent;
1577 dma_addr_t ramrod_res_phys;
1580 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1582 if (!qp->resp_offloaded)
1586 memset(&init_data, 0, sizeof(init_data));
1587 init_data.cid = qp->icid;
1588 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1589 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1591 rc = qed_sp_init_request(p_hwfn, &p_ent,
1592 ROCE_RAMROD_DESTROY_QP,
1593 PROTOCOLID_ROCE, &init_data);
1597 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1599 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1600 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1601 &ramrod_res_phys, GFP_KERNEL);
1603 if (!p_ramrod_res) {
1606 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1611 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1613 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1617 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1619 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1620 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1621 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1622 qp->irq, qp->irq_phys_addr);
1624 qp->resp_offloaded = false;
1626 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1629 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1630 sizeof(struct roce_destroy_qp_resp_output_params),
1631 p_ramrod_res, ramrod_res_phys);
1636 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1637 struct qed_rdma_qp *qp,
1640 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1641 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1642 struct qed_sp_init_data init_data;
1643 struct qed_spq_entry *p_ent;
1644 dma_addr_t ramrod_res_phys;
1647 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1649 if (!qp->req_offloaded)
1652 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1653 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1654 sizeof(*p_ramrod_res),
1655 &ramrod_res_phys, GFP_KERNEL);
1656 if (!p_ramrod_res) {
1658 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1663 memset(&init_data, 0, sizeof(init_data));
1664 init_data.cid = qp->icid + 1;
1665 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1666 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1668 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1669 PROTOCOLID_ROCE, &init_data);
1673 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1674 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1676 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1680 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1682 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1683 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1684 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1685 qp->orq, qp->orq_phys_addr);
1687 qp->req_offloaded = false;
1689 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1692 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1693 p_ramrod_res, ramrod_res_phys);
1698 static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1699 struct qed_rdma_qp *qp,
1700 struct qed_rdma_query_qp_out_params *out_params)
1702 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1703 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1704 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1705 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1706 struct qed_sp_init_data init_data;
1707 dma_addr_t resp_ramrod_res_phys;
1708 dma_addr_t req_ramrod_res_phys;
1709 struct qed_spq_entry *p_ent;
1715 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1716 /* We can't send ramrod to the fw since this qp wasn't offloaded
1719 out_params->draining = false;
1720 out_params->rq_psn = qp->rq_psn;
1721 out_params->sq_psn = qp->sq_psn;
1722 out_params->state = qp->cur_state;
1724 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1728 if (!(qp->resp_offloaded)) {
1730 "The responder's qp should be offloded before requester's\n");
1734 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1735 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1736 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1737 sizeof(*p_resp_ramrod_res),
1738 &resp_ramrod_res_phys, GFP_KERNEL);
1739 if (!p_resp_ramrod_res) {
1741 "qed query qp failed: cannot allocate memory (ramrod)\n");
1746 memset(&init_data, 0, sizeof(init_data));
1747 init_data.cid = qp->icid;
1748 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1749 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1750 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1751 PROTOCOLID_ROCE, &init_data);
1755 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1756 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1758 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1762 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1763 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1764 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1766 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1767 p_resp_ramrod_res, resp_ramrod_res_phys);
1769 if (!(qp->req_offloaded)) {
1770 /* Don't send query qp for the requester */
1771 out_params->sq_psn = qp->sq_psn;
1772 out_params->draining = false;
1775 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1777 out_params->state = qp->cur_state;
1782 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1783 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1784 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1785 sizeof(*p_req_ramrod_res),
1786 &req_ramrod_res_phys,
1788 if (!p_req_ramrod_res) {
1791 "qed query qp failed: cannot allocate memory (ramrod)\n");
1796 init_data.cid = qp->icid + 1;
1797 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1798 PROTOCOLID_ROCE, &init_data);
1802 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1803 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1805 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1809 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1810 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1811 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1813 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1814 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1816 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1817 p_req_ramrod_res, req_ramrod_res_phys);
1819 out_params->draining = false;
1822 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1823 else if (sq_err_state)
1824 qp->cur_state = QED_ROCE_QP_STATE_SQE;
1825 else if (sq_draining)
1826 out_params->draining = true;
1827 out_params->state = qp->cur_state;
1832 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1833 p_req_ramrod_res, req_ramrod_res_phys);
1836 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1837 p_resp_ramrod_res, resp_ramrod_res_phys);
1841 static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1843 u32 num_invalidated_mw = 0;
1844 u32 num_bound_mw = 0;
1848 /* Destroys the specified QP */
1849 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
1850 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
1851 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
1853 "QP must be in error, reset or init state before destroying it\n");
1857 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
1861 /* Send destroy requester ramrod */
1862 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
1866 if (num_invalidated_mw != num_bound_mw) {
1868 "number of invalidate memory windows is different from bounded ones\n");
1872 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1874 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1875 p_hwfn->p_rdma_info->proto);
1877 /* Release responder's icid */
1878 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1879 qp->icid - start_cid);
1881 /* Release requester's icid */
1882 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1883 qp->icid + 1 - start_cid);
1885 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1890 static int qed_rdma_query_qp(void *rdma_cxt,
1891 struct qed_rdma_qp *qp,
1892 struct qed_rdma_query_qp_out_params *out_params)
1894 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1897 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1899 /* The following fields are filled in from qp and not FW as they can't
1902 out_params->mtu = qp->mtu;
1903 out_params->dest_qp = qp->dest_qp;
1904 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1905 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1906 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1907 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1908 out_params->dgid = qp->dgid;
1909 out_params->flow_label = qp->flow_label;
1910 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1911 out_params->traffic_class_tos = qp->traffic_class_tos;
1912 out_params->timeout = qp->ack_timeout;
1913 out_params->rnr_retry = qp->rnr_retry_cnt;
1914 out_params->retry_cnt = qp->retry_cnt;
1915 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1916 out_params->pkey_index = 0;
1917 out_params->max_rd_atomic = qp->max_rd_atomic_req;
1918 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1919 out_params->sqd_async = qp->sqd_async;
1921 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1923 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1927 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1929 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1932 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1934 rc = qed_roce_destroy_qp(p_hwfn, qp);
1936 /* free qp params struct */
1939 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1943 static struct qed_rdma_qp *
1944 qed_rdma_create_qp(void *rdma_cxt,
1945 struct qed_rdma_create_qp_in_params *in_params,
1946 struct qed_rdma_create_qp_out_params *out_params)
1948 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1949 struct qed_rdma_qp *qp;
1950 u8 max_stats_queues;
1953 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1954 DP_ERR(p_hwfn->cdev,
1955 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1956 rdma_cxt, in_params, out_params);
1960 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1961 "qed rdma create qp called with qp_handle = %08x%08x\n",
1962 in_params->qp_handle_hi, in_params->qp_handle_lo);
1964 /* Some sanity checks... */
1965 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1966 if (in_params->stats_queue >= max_stats_queues) {
1967 DP_ERR(p_hwfn->cdev,
1968 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1969 in_params->stats_queue, max_stats_queues);
1973 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1975 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
1979 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1980 qp->qpid = ((0xFF << 16) | qp->icid);
1982 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
1989 qp->cur_state = QED_ROCE_QP_STATE_RESET;
1990 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1991 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1992 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
1993 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
1994 qp->use_srq = in_params->use_srq;
1995 qp->signal_all = in_params->signal_all;
1996 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1997 qp->pd = in_params->pd;
1998 qp->dpi = in_params->dpi;
1999 qp->sq_cq_id = in_params->sq_cq_id;
2000 qp->sq_num_pages = in_params->sq_num_pages;
2001 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2002 qp->rq_cq_id = in_params->rq_cq_id;
2003 qp->rq_num_pages = in_params->rq_num_pages;
2004 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2005 qp->srq_id = in_params->srq_id;
2006 qp->req_offloaded = false;
2007 qp->resp_offloaded = false;
2008 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2009 qp->stats_queue = in_params->stats_queue;
2011 out_params->icid = qp->icid;
2012 out_params->qp_id = qp->qpid;
2014 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2018 static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2019 struct qed_rdma_qp *qp,
2020 enum qed_roce_qp_state prev_state,
2021 struct qed_rdma_modify_qp_in_params *params)
2023 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2026 /* Perform additional operations according to the current state and the
2029 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2030 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2031 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2032 /* Init->RTR or Reset->RTR */
2033 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2035 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2036 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2038 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2042 /* Send modify responder ramrod */
2043 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2044 params->modify_flags);
2046 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2047 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2049 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2050 params->modify_flags);
2054 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2055 params->modify_flags);
2057 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2058 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2060 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2061 params->modify_flags);
2063 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2064 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2066 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2067 params->modify_flags);
2071 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2072 params->modify_flags);
2074 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2075 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2077 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2078 params->modify_flags);
2082 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2083 params->modify_flags);
2086 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
2087 qp->cur_state == QED_ROCE_QP_STATE_SQE) {
2089 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2090 params->modify_flags);
2094 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2095 params->modify_flags);
2097 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2098 /* Any state -> RESET */
2100 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2101 &num_invalidated_mw);
2105 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2108 if (num_invalidated_mw != num_bound_mw) {
2110 "number of invalidate memory windows is different from bounded ones\n");
2114 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2120 static int qed_rdma_modify_qp(void *rdma_cxt,
2121 struct qed_rdma_qp *qp,
2122 struct qed_rdma_modify_qp_in_params *params)
2124 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2125 enum qed_roce_qp_state prev_state;
2128 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2129 qp->icid, params->new_state);
2132 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2136 if (GET_FIELD(params->modify_flags,
2137 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2138 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2139 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2140 qp->incoming_atomic_en = params->incoming_atomic_en;
2143 /* Update QP structure with the updated values */
2144 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2145 qp->roce_mode = params->roce_mode;
2146 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2147 qp->pkey = params->pkey;
2148 if (GET_FIELD(params->modify_flags,
2149 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2150 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2151 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2152 qp->dest_qp = params->dest_qp;
2153 if (GET_FIELD(params->modify_flags,
2154 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2155 /* Indicates that the following parameters have changed:
2156 * Traffic class, flow label, hop limit, source GID,
2157 * destination GID, loopback indicator
2159 qp->traffic_class_tos = params->traffic_class_tos;
2160 qp->flow_label = params->flow_label;
2161 qp->hop_limit_ttl = params->hop_limit_ttl;
2163 qp->sgid = params->sgid;
2164 qp->dgid = params->dgid;
2165 qp->udp_src_port = 0;
2166 qp->vlan_id = params->vlan_id;
2167 qp->mtu = params->mtu;
2168 qp->lb_indication = params->lb_indication;
2169 memcpy((u8 *)&qp->remote_mac_addr[0],
2170 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
2171 if (params->use_local_mac) {
2172 memcpy((u8 *)&qp->local_mac_addr[0],
2173 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
2175 memcpy((u8 *)&qp->local_mac_addr[0],
2176 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2179 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2180 qp->rq_psn = params->rq_psn;
2181 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2182 qp->sq_psn = params->sq_psn;
2183 if (GET_FIELD(params->modify_flags,
2184 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2185 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2186 if (GET_FIELD(params->modify_flags,
2187 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2188 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2189 if (GET_FIELD(params->modify_flags,
2190 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2191 qp->ack_timeout = params->ack_timeout;
2192 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2193 qp->retry_cnt = params->retry_cnt;
2194 if (GET_FIELD(params->modify_flags,
2195 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2196 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2197 if (GET_FIELD(params->modify_flags,
2198 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2199 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2201 qp->sqd_async = params->sqd_async;
2203 prev_state = qp->cur_state;
2204 if (GET_FIELD(params->modify_flags,
2205 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2206 qp->cur_state = params->new_state;
2207 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2211 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2213 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2218 qed_rdma_register_tid(void *rdma_cxt,
2219 struct qed_rdma_register_tid_in_params *params)
2221 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2222 struct rdma_register_tid_ramrod_data *p_ramrod;
2223 struct qed_sp_init_data init_data;
2224 struct qed_spq_entry *p_ent;
2225 enum rdma_tid_type tid_type;
2229 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2232 memset(&init_data, 0, sizeof(init_data));
2233 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2234 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2236 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2237 p_hwfn->p_rdma_info->proto, &init_data);
2239 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2243 if (p_hwfn->p_rdma_info->last_tid < params->itid)
2244 p_hwfn->p_rdma_info->last_tid = params->itid;
2246 p_ramrod = &p_ent->ramrod.rdma_register_tid;
2248 p_ramrod->flags = 0;
2249 SET_FIELD(p_ramrod->flags,
2250 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2251 params->pbl_two_level);
2253 SET_FIELD(p_ramrod->flags,
2254 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2256 SET_FIELD(p_ramrod->flags,
2257 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2259 /* Don't initialize D/C field, as it may override other bits. */
2260 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2261 SET_FIELD(p_ramrod->flags,
2262 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2263 params->page_size_log - 12);
2265 SET_FIELD(p_ramrod->flags,
2266 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
2267 p_hwfn->p_rdma_info->last_tid);
2269 SET_FIELD(p_ramrod->flags,
2270 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2271 params->remote_read);
2273 SET_FIELD(p_ramrod->flags,
2274 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2275 params->remote_write);
2277 SET_FIELD(p_ramrod->flags,
2278 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2279 params->remote_atomic);
2281 SET_FIELD(p_ramrod->flags,
2282 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2283 params->local_write);
2285 SET_FIELD(p_ramrod->flags,
2286 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2288 SET_FIELD(p_ramrod->flags,
2289 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2292 SET_FIELD(p_ramrod->flags1,
2293 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2294 params->pbl_page_size_log - 12);
2296 SET_FIELD(p_ramrod->flags2,
2297 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2299 switch (params->tid_type) {
2300 case QED_RDMA_TID_REGISTERED_MR:
2301 tid_type = RDMA_TID_REGISTERED_MR;
2303 case QED_RDMA_TID_FMR:
2304 tid_type = RDMA_TID_FMR;
2306 case QED_RDMA_TID_MW_TYPE1:
2307 tid_type = RDMA_TID_MW_TYPE1;
2309 case QED_RDMA_TID_MW_TYPE2A:
2310 tid_type = RDMA_TID_MW_TYPE2A;
2314 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2317 SET_FIELD(p_ramrod->flags1,
2318 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2320 p_ramrod->itid = cpu_to_le32(params->itid);
2321 p_ramrod->key = params->key;
2322 p_ramrod->pd = cpu_to_le16(params->pd);
2323 p_ramrod->length_hi = (u8)(params->length >> 32);
2324 p_ramrod->length_lo = DMA_LO_LE(params->length);
2326 /* Lower 32 bits of the registered MR address.
2327 * In case of zero based MR, will hold FBO
2329 p_ramrod->va.hi = 0;
2330 p_ramrod->va.lo = cpu_to_le32(params->fbo);
2332 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2334 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2337 if (params->dif_enabled) {
2338 SET_FIELD(p_ramrod->flags2,
2339 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2340 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2341 params->dif_error_addr);
2342 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2345 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2347 if (fw_return_code != RDMA_RETURN_OK) {
2348 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2352 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2356 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2358 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2359 struct rdma_deregister_tid_ramrod_data *p_ramrod;
2360 struct qed_sp_init_data init_data;
2361 struct qed_spq_entry *p_ent;
2362 struct qed_ptt *p_ptt;
2366 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2369 memset(&init_data, 0, sizeof(init_data));
2370 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2371 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2373 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2374 p_hwfn->p_rdma_info->proto, &init_data);
2376 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2380 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2381 p_ramrod->itid = cpu_to_le32(itid);
2383 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2385 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2389 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2390 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2392 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2393 /* Bit indicating that the TID is in use and a nig drain is
2394 * required before sending the ramrod again
2396 p_ptt = qed_ptt_acquire(p_hwfn);
2399 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2400 "Failed to acquire PTT\n");
2404 rc = qed_mcp_drain(p_hwfn, p_ptt);
2406 qed_ptt_release(p_hwfn, p_ptt);
2407 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2412 qed_ptt_release(p_hwfn, p_ptt);
2414 /* Resend the ramrod */
2415 rc = qed_sp_init_request(p_hwfn, &p_ent,
2416 RDMA_RAMROD_DEREGISTER_MR,
2417 p_hwfn->p_rdma_info->proto,
2420 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2421 "Failed to init sp-element\n");
2425 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2427 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2432 if (fw_return_code != RDMA_RETURN_OK) {
2433 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2439 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2443 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2445 return QED_LEADING_HWFN(cdev);
2448 static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2452 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2454 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2455 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2456 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2457 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2460 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2462 p_hwfn->db_bar_no_edpm = true;
2464 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2467 static int qed_rdma_start(void *rdma_cxt,
2468 struct qed_rdma_start_in_params *params)
2470 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2471 struct qed_ptt *p_ptt;
2474 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2475 "desired_cnq = %08x\n", params->desired_cnq);
2477 p_ptt = qed_ptt_acquire(p_hwfn);
2481 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2485 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2489 qed_ptt_release(p_hwfn, p_ptt);
2494 qed_rdma_free(p_hwfn);
2496 qed_ptt_release(p_hwfn, p_ptt);
2498 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2502 static int qed_rdma_init(struct qed_dev *cdev,
2503 struct qed_rdma_start_in_params *params)
2505 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2508 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2510 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2512 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2514 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2515 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2516 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2519 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2520 u8 connection_handle,
2522 dma_addr_t first_frag_addr,
2523 bool b_last_fragment, bool b_last_packet)
2525 struct qed_roce_ll2_packet *packet = cookie;
2526 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2528 roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
2531 void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2532 u8 connection_handle,
2534 dma_addr_t first_frag_addr,
2535 bool b_last_fragment, bool b_last_packet)
2537 qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
2538 cookie, first_frag_addr,
2539 b_last_fragment, b_last_packet);
2542 void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
2543 u8 connection_handle,
2545 dma_addr_t rx_buf_addr,
2547 u8 data_length_error,
2550 u32 src_mac_addr_hi,
2551 u16 src_mac_addr_lo, bool b_last_packet)
2553 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2554 struct qed_roce_ll2_rx_params params;
2555 struct qed_dev *cdev = p_hwfn->cdev;
2556 struct qed_roce_ll2_packet pkt;
2560 "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
2561 (void *)(uintptr_t)rx_buf_addr,
2562 data_length, data_length_error);
2564 memset(&pkt, 0, sizeof(pkt));
2566 pkt.payload[0].baddr = rx_buf_addr;
2567 pkt.payload[0].len = data_length;
2569 memset(¶ms, 0, sizeof(params));
2570 params.vlan_id = vlan;
2571 *((u32 *)¶ms.smac[0]) = ntohl(src_mac_addr_hi);
2572 *((u16 *)¶ms.smac[4]) = ntohs(src_mac_addr_lo);
2574 if (data_length_error) {
2576 "roce ll2 rx complete: data length error %d, length=%d\n",
2577 data_length_error, data_length);
2578 params.rc = -EINVAL;
2581 roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, ¶ms);
2584 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2585 u8 *old_mac_address,
2586 u8 *new_mac_address)
2588 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2589 struct qed_ptt *p_ptt;
2592 if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
2594 "qed roce mac filter failed - roce_info/ll2 NULL\n");
2598 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2601 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2605 mutex_lock(&hwfn->ll2->lock);
2606 if (old_mac_address)
2607 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2609 if (new_mac_address)
2610 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2612 mutex_unlock(&hwfn->ll2->lock);
2614 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2618 "qed roce ll2 mac filter set: failed to add mac filter\n");
2623 static int qed_roce_ll2_start(struct qed_dev *cdev,
2624 struct qed_roce_ll2_params *params)
2626 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2627 struct qed_roce_ll2_info *roce_ll2;
2628 struct qed_ll2_info ll2_params;
2632 DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
2635 if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
2637 "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
2638 params->cbs.tx_cb, params->cbs.rx_cb);
2641 if (!is_valid_ether_addr(params->mac_address)) {
2643 "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
2644 params->mac_address);
2649 roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
2651 DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
2654 memset(roce_ll2, 0, sizeof(*roce_ll2));
2655 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2656 roce_ll2->cbs = params->cbs;
2657 roce_ll2->cb_cookie = params->cb_cookie;
2658 mutex_init(&roce_ll2->lock);
2660 memset(&ll2_params, 0, sizeof(ll2_params));
2661 ll2_params.conn_type = QED_LL2_TYPE_ROCE;
2662 ll2_params.mtu = params->mtu;
2663 ll2_params.rx_drop_ttl0_flg = true;
2664 ll2_params.rx_vlan_removal_en = false;
2665 ll2_params.tx_dest = CORE_TX_DEST_NW;
2666 ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
2667 ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
2668 ll2_params.gsi_enable = true;
2670 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
2671 params->max_rx_buffers,
2672 params->max_tx_buffers,
2676 "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
2681 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2685 "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
2690 hwfn->ll2 = roce_ll2;
2692 rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
2697 ether_addr_copy(roce_ll2->mac_address, params->mac_address);
2702 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2704 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2710 static int qed_roce_ll2_stop(struct qed_dev *cdev)
2712 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2713 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2716 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
2717 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
2721 /* remove LL2 MAC address filter */
2722 rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
2723 eth_zero_addr(roce_ll2->mac_address);
2725 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2729 "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
2732 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2734 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2741 static int qed_roce_ll2_tx(struct qed_dev *cdev,
2742 struct qed_roce_ll2_packet *pkt,
2743 struct qed_roce_ll2_tx_params *params)
2745 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2746 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2747 enum qed_ll2_roce_flavor_type qed_roce_flavor;
2752 if (!pkt || !params) {
2754 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2759 qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
2762 if (pkt->roce_mode == ROCE_V2_IPV4)
2763 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2766 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
2767 1 + pkt->n_seg, 0, flags, 0,
2768 qed_roce_flavor, pkt->header.baddr,
2769 pkt->header.len, pkt, 1);
2771 DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
2772 return QED_ROCE_TX_HEAD_FAILURE;
2776 for (i = 0; i < pkt->n_seg; i++) {
2777 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2779 pkt->payload[i].baddr,
2780 pkt->payload[i].len);
2782 /* If failed not much to do here, partial packet has
2783 * been posted * we can't free memory, will need to wait
2787 "roce ll2 tx: payload failed (rc=%d)\n", rc);
2788 return QED_ROCE_TX_FRAG_FAILURE;
2795 static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
2796 struct qed_roce_ll2_buffer *buf,
2797 u64 cookie, u8 notify_fw)
2799 return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2800 QED_LEADING_HWFN(cdev)->ll2->handle,
2801 buf->baddr, buf->len,
2802 (void *)(uintptr_t)cookie, notify_fw);
2805 static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2807 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2808 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2810 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2811 roce_ll2->handle, stats);
2814 static const struct qed_rdma_ops qed_rdma_ops_pass = {
2815 .common = &qed_common_ops_pass,
2816 .fill_dev_info = &qed_fill_rdma_dev_info,
2817 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2818 .rdma_init = &qed_rdma_init,
2819 .rdma_add_user = &qed_rdma_add_user,
2820 .rdma_remove_user = &qed_rdma_remove_user,
2821 .rdma_stop = &qed_rdma_stop,
2822 .rdma_query_port = &qed_rdma_query_port,
2823 .rdma_query_device = &qed_rdma_query_device,
2824 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2825 .rdma_get_rdma_int = &qed_rdma_get_int,
2826 .rdma_set_rdma_int = &qed_rdma_set_int,
2827 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2828 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2829 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2830 .rdma_dealloc_pd = &qed_rdma_free_pd,
2831 .rdma_create_cq = &qed_rdma_create_cq,
2832 .rdma_destroy_cq = &qed_rdma_destroy_cq,
2833 .rdma_create_qp = &qed_rdma_create_qp,
2834 .rdma_modify_qp = &qed_rdma_modify_qp,
2835 .rdma_query_qp = &qed_rdma_query_qp,
2836 .rdma_destroy_qp = &qed_rdma_destroy_qp,
2837 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2838 .rdma_free_tid = &qed_rdma_free_tid,
2839 .rdma_register_tid = &qed_rdma_register_tid,
2840 .rdma_deregister_tid = &qed_rdma_deregister_tid,
2841 .roce_ll2_start = &qed_roce_ll2_start,
2842 .roce_ll2_stop = &qed_roce_ll2_stop,
2843 .roce_ll2_tx = &qed_roce_ll2_tx,
2844 .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
2845 .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2846 .roce_ll2_stats = &qed_roce_ll2_stats,
2849 const struct qed_rdma_ops *qed_get_rdma_ops(void)
2851 return &qed_rdma_ops_pass;
2853 EXPORT_SYMBOL(qed_get_rdma_ops);