1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
35 #include <linux/ipv6.h>
36 #include <linux/spinlock.h>
37 #include <linux/tcp.h>
42 #include "qed_reg_addr.h"
45 #define QED_IWARP_ORD_DEFAULT 32
46 #define QED_IWARP_IRD_DEFAULT 32
47 #define QED_IWARP_MAX_FW_MSS 4120
49 #define QED_EP_SIG 0xecabcdef
56 #define MPA_V2_PEER2PEER_MODEL 0x8000
57 #define MPA_V2_SEND_RTR 0x4000 /* on ird */
58 #define MPA_V2_READ_RTR 0x4000 /* on ord */
59 #define MPA_V2_WRITE_RTR 0x8000
60 #define MPA_V2_IRD_ORD_MASK 0x3FFF
62 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
64 #define QED_IWARP_INVALID_TCP_CID 0xffffffff
65 #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
66 #define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
67 #define TIMESTAMP_HEADER_SIZE (12)
69 #define QED_IWARP_TS_EN BIT(0)
70 #define QED_IWARP_DA_EN BIT(1)
71 #define QED_IWARP_PARAM_CRC_NEEDED (1)
72 #define QED_IWARP_PARAM_P2P (1)
74 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
75 u8 fw_event_code, u16 echo,
76 union event_ring_data *data,
79 /* Override devinfo with iWARP specific values */
80 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
82 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
84 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
85 dev->max_qp = min_t(u32,
87 p_hwfn->p_rdma_info->num_qps) -
88 QED_IWARP_PREALLOC_CNT;
90 dev->max_cq = dev->max_qp;
92 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
93 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
96 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
98 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
99 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
100 p_hwfn->b_rdma_enabled_in_prs = true;
103 /* We have two cid maps, one for tcp which should be used only from passive
104 * syn processing and replacing a pre-allocated ep in the list. The second
105 * for active tcp and for QPs.
107 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
109 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
111 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
113 if (cid < QED_IWARP_PREALLOC_CNT)
114 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
117 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
119 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
122 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
126 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
127 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
128 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
130 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
133 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
135 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
137 qed_iwarp_cid_cleaned(p_hwfn, *cid);
142 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
144 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
146 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
147 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
148 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
151 /* This function allocates a cid for passive tcp (called from syn receive)
152 * the reason it's separate from the regular cid allocation is because it
153 * is assured that these cids already have ilt allocated. They are preallocated
154 * to ensure that we won't need to allocate memory during syn processing
156 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
160 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
162 rc = qed_rdma_bmap_alloc_id(p_hwfn,
163 &p_hwfn->p_rdma_info->tcp_cid_map, cid);
165 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
168 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
169 "can't allocate iwarp tcp cid max-count=%d\n",
170 p_hwfn->p_rdma_info->tcp_cid_map.max_count);
172 *cid = QED_IWARP_INVALID_TCP_CID;
176 *cid += qed_cxt_get_proto_cid_start(p_hwfn,
177 p_hwfn->p_rdma_info->proto);
181 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
182 struct qed_rdma_qp *qp,
183 struct qed_rdma_create_qp_out_params *out_params)
185 struct iwarp_create_qp_ramrod_data *p_ramrod;
186 struct qed_sp_init_data init_data;
187 struct qed_spq_entry *p_ent;
192 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
193 IWARP_SHARED_QUEUE_PAGE_SIZE,
194 &qp->shared_queue_phys_addr,
196 if (!qp->shared_queue)
199 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
200 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
201 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
202 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
203 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
204 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
205 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
206 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
208 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
214 memset(&init_data, 0, sizeof(init_data));
215 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
216 init_data.cid = qp->icid;
217 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
219 rc = qed_sp_init_request(p_hwfn, &p_ent,
220 IWARP_RAMROD_CMD_ID_CREATE_QP,
221 PROTOCOLID_IWARP, &init_data);
225 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
227 SET_FIELD(p_ramrod->flags,
228 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
229 qp->fmr_and_reserved_lkey);
231 SET_FIELD(p_ramrod->flags,
232 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
234 SET_FIELD(p_ramrod->flags,
235 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
236 qp->incoming_rdma_read_en);
238 SET_FIELD(p_ramrod->flags,
239 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
240 qp->incoming_rdma_write_en);
242 SET_FIELD(p_ramrod->flags,
243 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
244 qp->incoming_atomic_en);
246 SET_FIELD(p_ramrod->flags,
247 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
249 p_ramrod->pd = qp->pd;
250 p_ramrod->sq_num_pages = qp->sq_num_pages;
251 p_ramrod->rq_num_pages = qp->rq_num_pages;
253 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
254 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
256 p_ramrod->cq_cid_for_sq =
257 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
258 p_ramrod->cq_cid_for_rq =
259 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
261 p_ramrod->dpi = cpu_to_le16(qp->dpi);
263 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
264 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
265 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
266 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
268 rc = qed_spq_post(p_hwfn, p_ent, NULL);
275 qed_iwarp_cid_cleaned(p_hwfn, cid);
277 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
278 IWARP_SHARED_QUEUE_PAGE_SIZE,
279 qp->shared_queue, qp->shared_queue_phys_addr);
284 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
286 struct iwarp_modify_qp_ramrod_data *p_ramrod;
287 struct qed_sp_init_data init_data;
288 struct qed_spq_entry *p_ent;
292 memset(&init_data, 0, sizeof(init_data));
293 init_data.cid = qp->icid;
294 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
295 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
297 rc = qed_sp_init_request(p_hwfn, &p_ent,
298 IWARP_RAMROD_CMD_ID_MODIFY_QP,
299 p_hwfn->p_rdma_info->proto, &init_data);
303 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
304 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
306 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
307 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
309 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
311 rc = qed_spq_post(p_hwfn, p_ent, NULL);
313 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
318 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
321 case QED_ROCE_QP_STATE_RESET:
322 case QED_ROCE_QP_STATE_INIT:
323 case QED_ROCE_QP_STATE_RTR:
324 return QED_IWARP_QP_STATE_IDLE;
325 case QED_ROCE_QP_STATE_RTS:
326 return QED_IWARP_QP_STATE_RTS;
327 case QED_ROCE_QP_STATE_SQD:
328 return QED_IWARP_QP_STATE_CLOSING;
329 case QED_ROCE_QP_STATE_ERR:
330 return QED_IWARP_QP_STATE_ERROR;
331 case QED_ROCE_QP_STATE_SQE:
332 return QED_IWARP_QP_STATE_TERMINATE;
334 return QED_IWARP_QP_STATE_ERROR;
338 static enum qed_roce_qp_state
339 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
342 case QED_IWARP_QP_STATE_IDLE:
343 return QED_ROCE_QP_STATE_INIT;
344 case QED_IWARP_QP_STATE_RTS:
345 return QED_ROCE_QP_STATE_RTS;
346 case QED_IWARP_QP_STATE_TERMINATE:
347 return QED_ROCE_QP_STATE_SQE;
348 case QED_IWARP_QP_STATE_CLOSING:
349 return QED_ROCE_QP_STATE_SQD;
350 case QED_IWARP_QP_STATE_ERROR:
351 return QED_ROCE_QP_STATE_ERR;
353 return QED_ROCE_QP_STATE_ERR;
357 const char *iwarp_state_names[] = {
366 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
367 struct qed_rdma_qp *qp,
368 enum qed_iwarp_qp_state new_state, bool internal)
370 enum qed_iwarp_qp_state prev_iw_state;
371 bool modify_fw = false;
374 /* modify QP can be called from upper-layer or as a result of async
375 * RST/FIN... therefore need to protect
377 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
378 prev_iw_state = qp->iwarp_state;
380 if (prev_iw_state == new_state) {
381 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
385 switch (prev_iw_state) {
386 case QED_IWARP_QP_STATE_IDLE:
388 case QED_IWARP_QP_STATE_RTS:
389 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
391 case QED_IWARP_QP_STATE_ERROR:
392 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
400 case QED_IWARP_QP_STATE_RTS:
402 case QED_IWARP_QP_STATE_CLOSING:
406 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
408 case QED_IWARP_QP_STATE_ERROR:
411 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
417 case QED_IWARP_QP_STATE_ERROR:
419 case QED_IWARP_QP_STATE_IDLE:
421 qp->iwarp_state = new_state;
423 case QED_IWARP_QP_STATE_CLOSING:
424 /* could happen due to race... do nothing.... */
430 case QED_IWARP_QP_STATE_TERMINATE:
431 case QED_IWARP_QP_STATE_CLOSING:
432 qp->iwarp_state = new_state;
438 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
440 iwarp_state_names[prev_iw_state],
441 iwarp_state_names[qp->iwarp_state],
442 internal ? "internal" : "");
444 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
447 rc = qed_iwarp_modify_fw(p_hwfn, qp);
452 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
454 struct qed_sp_init_data init_data;
455 struct qed_spq_entry *p_ent;
459 memset(&init_data, 0, sizeof(init_data));
460 init_data.cid = qp->icid;
461 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
462 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
464 rc = qed_sp_init_request(p_hwfn, &p_ent,
465 IWARP_RAMROD_CMD_ID_DESTROY_QP,
466 p_hwfn->p_rdma_info->proto, &init_data);
470 rc = qed_spq_post(p_hwfn, p_ent, NULL);
472 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
477 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
478 struct qed_iwarp_ep *ep,
479 bool remove_from_active_list)
481 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
482 sizeof(*ep->ep_buffer_virt),
483 ep->ep_buffer_virt, ep->ep_buffer_phys);
485 if (remove_from_active_list) {
486 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
487 list_del(&ep->list_entry);
488 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
497 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
499 struct qed_iwarp_ep *ep = qp->ep;
503 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
504 rc = qed_iwarp_modify_qp(p_hwfn, qp,
505 QED_IWARP_QP_STATE_ERROR, false);
510 /* Make sure ep is closed before returning and freeing memory. */
512 while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
516 if (ep->state != QED_IWARP_EP_CLOSED)
517 DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
520 qed_iwarp_destroy_ep(p_hwfn, ep, false);
523 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
525 if (qp->shared_queue)
526 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
527 IWARP_SHARED_QUEUE_PAGE_SIZE,
528 qp->shared_queue, qp->shared_queue_phys_addr);
534 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
536 struct qed_iwarp_ep *ep;
539 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
543 ep->state = QED_IWARP_EP_INIT;
545 ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
546 sizeof(*ep->ep_buffer_virt),
549 if (!ep->ep_buffer_virt) {
554 ep->sig = QED_EP_SIG;
566 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
567 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
569 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
570 p_tcp_ramrod->tcp.local_mac_addr_lo,
571 p_tcp_ramrod->tcp.local_mac_addr_mid,
572 p_tcp_ramrod->tcp.local_mac_addr_hi,
573 p_tcp_ramrod->tcp.remote_mac_addr_lo,
574 p_tcp_ramrod->tcp.remote_mac_addr_mid,
575 p_tcp_ramrod->tcp.remote_mac_addr_hi);
577 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
578 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
579 "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
580 p_tcp_ramrod->tcp.local_ip,
581 p_tcp_ramrod->tcp.local_port,
582 p_tcp_ramrod->tcp.remote_ip,
583 p_tcp_ramrod->tcp.remote_port,
584 p_tcp_ramrod->tcp.vlan_id);
586 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
587 "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
588 p_tcp_ramrod->tcp.local_ip,
589 p_tcp_ramrod->tcp.local_port,
590 p_tcp_ramrod->tcp.remote_ip,
591 p_tcp_ramrod->tcp.remote_port,
592 p_tcp_ramrod->tcp.vlan_id);
595 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
596 "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
597 p_tcp_ramrod->tcp.flow_label,
598 p_tcp_ramrod->tcp.ttl,
599 p_tcp_ramrod->tcp.tos_or_tc,
600 p_tcp_ramrod->tcp.mss,
601 p_tcp_ramrod->tcp.rcv_wnd_scale,
602 p_tcp_ramrod->tcp.connect_mode,
603 p_tcp_ramrod->tcp.flags);
605 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
606 p_tcp_ramrod->tcp.syn_ip_payload_length,
607 p_tcp_ramrod->tcp.syn_phy_addr_lo,
608 p_tcp_ramrod->tcp.syn_phy_addr_hi);
612 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
614 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
615 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
616 struct tcp_offload_params_opt2 *tcp;
617 struct qed_sp_init_data init_data;
618 struct qed_spq_entry *p_ent;
619 dma_addr_t async_output_phys;
620 dma_addr_t in_pdata_phys;
626 memset(&init_data, 0, sizeof(init_data));
627 init_data.cid = ep->tcp_cid;
628 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
629 if (ep->connect_mode == TCP_CONNECT_PASSIVE)
630 init_data.comp_mode = QED_SPQ_MODE_CB;
632 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
634 rc = qed_sp_init_request(p_hwfn, &p_ent,
635 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
636 PROTOCOLID_IWARP, &init_data);
640 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
642 in_pdata_phys = ep->ep_buffer_phys +
643 offsetof(struct qed_iwarp_ep_memory, in_pdata);
644 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
647 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
648 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
650 async_output_phys = ep->ep_buffer_phys +
651 offsetof(struct qed_iwarp_ep_memory, async_output);
652 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
655 p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
656 p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
658 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
659 p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
660 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
661 p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
662 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
664 tcp = &p_tcp_ramrod->tcp;
665 qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
666 &tcp->remote_mac_addr_mid,
667 &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
668 qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
669 &tcp->local_mac_addr_lo, ep->local_mac_addr);
671 tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
673 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
675 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
676 !!(tcp_flags & QED_IWARP_TS_EN));
678 SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
679 !!(tcp_flags & QED_IWARP_DA_EN));
681 tcp->ip_version = ep->cm_info.ip_version;
683 for (i = 0; i < 4; i++) {
684 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
685 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
688 tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
689 tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
690 tcp->mss = cpu_to_le16(ep->mss);
695 tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
696 tcp->connect_mode = ep->connect_mode;
698 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
699 tcp->syn_ip_payload_length =
700 cpu_to_le16(ep->syn_ip_payload_length);
701 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
702 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
705 qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
707 rc = qed_spq_post(p_hwfn, p_ent, NULL);
709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
710 "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
716 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
718 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
719 struct qed_iwarp_cm_event_params params;
720 struct mpa_v2_hdr *mpa_v2;
721 union async_output *async_data;
722 u16 mpa_ord, mpa_ird;
726 async_data = &ep->ep_buffer_virt->async_output;
728 mpa_rev = async_data->mpa_request.mpa_handshake_mode;
729 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
730 "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
731 async_data->mpa_request.ulp_data_len,
732 mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
734 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
735 /* Read ord/ird values from private data buffer */
736 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
737 mpa_hdr_size = sizeof(*mpa_v2);
739 mpa_ord = ntohs(mpa_v2->ord);
740 mpa_ird = ntohs(mpa_v2->ird);
742 /* Temprary store in cm_info incoming ord/ird requested, later
743 * replace with negotiated value during accept
745 ep->cm_info.ord = (u8)min_t(u16,
746 (mpa_ord & MPA_V2_IRD_ORD_MASK),
747 QED_IWARP_ORD_DEFAULT);
749 ep->cm_info.ird = (u8)min_t(u16,
750 (mpa_ird & MPA_V2_IRD_ORD_MASK),
751 QED_IWARP_IRD_DEFAULT);
753 /* Peer2Peer negotiation */
754 ep->rtr_type = MPA_RTR_TYPE_NONE;
755 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
756 if (mpa_ord & MPA_V2_WRITE_RTR)
757 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
759 if (mpa_ord & MPA_V2_READ_RTR)
760 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
762 if (mpa_ird & MPA_V2_SEND_RTR)
763 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
765 ep->rtr_type &= iwarp_info->rtr_type;
767 /* if we're left with no match send our capabilities */
768 if (ep->rtr_type == MPA_RTR_TYPE_NONE)
769 ep->rtr_type = iwarp_info->rtr_type;
772 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
774 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
775 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
776 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
779 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
780 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
781 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
782 async_data->mpa_request.ulp_data_len, mpa_hdr_size);
784 /* Strip mpa v2 hdr from private data before sending to upper layer */
785 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
787 ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
790 params.event = QED_IWARP_EVENT_MPA_REQUEST;
791 params.cm_info = &ep->cm_info;
792 params.ep_context = ep;
795 ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
796 ep->event_cb(ep->cb_context, ¶ms);
800 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
802 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
803 struct qed_sp_init_data init_data;
804 dma_addr_t async_output_phys;
805 struct qed_spq_entry *p_ent;
806 dma_addr_t out_pdata_phys;
807 dma_addr_t in_pdata_phys;
808 struct qed_rdma_qp *qp;
818 memset(&init_data, 0, sizeof(init_data));
819 init_data.cid = reject ? ep->tcp_cid : qp->icid;
820 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
822 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
823 init_data.comp_mode = QED_SPQ_MODE_CB;
825 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
827 rc = qed_sp_init_request(p_hwfn, &p_ent,
828 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
829 PROTOCOLID_IWARP, &init_data);
833 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
834 out_pdata_phys = ep->ep_buffer_phys +
835 offsetof(struct qed_iwarp_ep_memory, out_pdata);
836 DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
838 p_mpa_ramrod->common.outgoing_ulp_buffer.len =
839 ep->cm_info.private_data_len;
840 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
842 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
843 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
845 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
847 in_pdata_phys = ep->ep_buffer_phys +
848 offsetof(struct qed_iwarp_ep_memory, in_pdata);
849 p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
850 DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
852 p_mpa_ramrod->incoming_ulp_buffer.len =
853 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
854 async_output_phys = ep->ep_buffer_phys +
855 offsetof(struct qed_iwarp_ep_memory, async_output);
856 DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
858 p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
859 p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
862 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
863 qp->shared_queue_phys_addr);
864 p_mpa_ramrod->stats_counter_id =
865 RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
867 p_mpa_ramrod->common.reject = 1;
870 p_mpa_ramrod->mode = ep->mpa_rev;
871 SET_FIELD(p_mpa_ramrod->rtr_pref,
872 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
874 ep->state = QED_IWARP_EP_MPA_OFFLOADED;
875 rc = qed_spq_post(p_hwfn, p_ent, NULL);
877 ep->cid = qp->icid; /* Now they're migrated. */
881 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
882 reject ? 0xffff : qp->icid,
886 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
891 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
893 ep->state = QED_IWARP_EP_INIT;
897 memset(&ep->cm_info, 0, sizeof(ep->cm_info));
899 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
900 /* We don't care about the return code, it's ok if tcp_cid
901 * remains invalid...in this case we'll defer allocation
903 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
905 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
907 list_del(&ep->list_entry);
908 list_add_tail(&ep->list_entry,
909 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
911 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
915 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
917 struct mpa_v2_hdr *mpa_v2_params;
918 union async_output *async_data;
919 u16 mpa_ird, mpa_ord;
920 u8 mpa_data_size = 0;
922 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
924 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
925 mpa_data_size = sizeof(*mpa_v2_params);
926 mpa_ird = ntohs(mpa_v2_params->ird);
927 mpa_ord = ntohs(mpa_v2_params->ord);
929 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
930 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
932 async_data = &ep->ep_buffer_virt->async_output;
934 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
935 ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
940 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
942 struct qed_iwarp_cm_event_params params;
944 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
946 "MPA reply event not expected on passive side!\n");
950 params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
952 qed_iwarp_parse_private_data(p_hwfn, ep);
954 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
955 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
956 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
958 params.cm_info = &ep->cm_info;
959 params.ep_context = ep;
962 ep->mpa_reply_processed = true;
964 ep->event_cb(ep->cb_context, ¶ms);
967 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
968 ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
970 /* Called as a result of the event:
971 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
974 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
975 struct qed_iwarp_ep *ep, u8 fw_return_code)
977 struct qed_iwarp_cm_event_params params;
979 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
980 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
982 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
984 if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
985 qed_iwarp_parse_private_data(p_hwfn, ep);
987 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
988 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
989 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
991 params.cm_info = &ep->cm_info;
993 params.ep_context = ep;
995 switch (fw_return_code) {
997 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
998 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
999 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1000 ep->state = QED_IWARP_EP_ESTABLISHED;
1003 case IWARP_CONN_ERROR_MPA_TIMEOUT:
1004 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1005 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1006 params.status = -EBUSY;
1008 case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1009 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1010 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1011 params.status = -ECONNREFUSED;
1013 case IWARP_CONN_ERROR_MPA_RST:
1014 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1015 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1017 params.status = -ECONNRESET;
1019 case IWARP_CONN_ERROR_MPA_FIN:
1020 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1021 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1022 params.status = -ECONNREFUSED;
1024 case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1025 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1026 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1027 params.status = -ECONNREFUSED;
1029 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1030 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1031 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1032 params.status = -ECONNREFUSED;
1034 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1035 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1036 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037 params.status = -ECONNREFUSED;
1039 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1040 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1041 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042 params.status = -ECONNREFUSED;
1044 case IWARP_CONN_ERROR_MPA_TERMINATE:
1045 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1046 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1047 params.status = -ECONNREFUSED;
1050 params.status = -ECONNRESET;
1054 if (fw_return_code != RDMA_RETURN_OK)
1055 /* paired with READ_ONCE in destroy_qp */
1056 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1058 ep->event_cb(ep->cb_context, ¶ms);
1060 /* on passive side, if there is no associated QP (REJECT) we need to
1061 * return the ep to the pool, (in the regular case we add an element
1062 * in accept instead of this one.
1063 * In both cases we need to remove it from the ep_list.
1065 if (fw_return_code != RDMA_RETURN_OK) {
1066 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1067 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1068 (!ep->qp)) { /* Rejected */
1069 qed_iwarp_return_ep(p_hwfn, ep);
1071 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1072 list_del(&ep->list_entry);
1073 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1079 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1080 struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1082 struct mpa_v2_hdr *mpa_v2_params;
1083 u16 mpa_ird, mpa_ord;
1086 if (MPA_REV2(ep->mpa_rev)) {
1088 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1089 *mpa_data_size = sizeof(*mpa_v2_params);
1091 mpa_ird = (u16)ep->cm_info.ird;
1092 mpa_ord = (u16)ep->cm_info.ord;
1094 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1095 mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1097 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1098 mpa_ird |= MPA_V2_SEND_RTR;
1100 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1101 mpa_ord |= MPA_V2_WRITE_RTR;
1103 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1104 mpa_ord |= MPA_V2_READ_RTR;
1107 mpa_v2_params->ird = htons(mpa_ird);
1108 mpa_v2_params->ord = htons(mpa_ord);
1112 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1115 *((u32 *)mpa_v2_params),
1116 mpa_ord & MPA_V2_IRD_ORD_MASK,
1117 mpa_ird & MPA_V2_IRD_ORD_MASK,
1118 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1119 !!(mpa_ird & MPA_V2_SEND_RTR),
1120 !!(mpa_ord & MPA_V2_WRITE_RTR),
1121 !!(mpa_ord & MPA_V2_READ_RTR));
1125 int qed_iwarp_connect(void *rdma_cxt,
1126 struct qed_iwarp_connect_in *iparams,
1127 struct qed_iwarp_connect_out *oparams)
1129 struct qed_hwfn *p_hwfn = rdma_cxt;
1130 struct qed_iwarp_info *iwarp_info;
1131 struct qed_iwarp_ep *ep;
1132 u8 mpa_data_size = 0;
1137 if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1138 (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1140 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1141 iparams->qp->icid, iparams->cm_info.ord,
1142 iparams->cm_info.ird);
1147 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1149 /* Allocate ep object */
1150 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1154 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1160 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1161 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1162 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1164 ep->qp = iparams->qp;
1166 ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1167 ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1168 memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1170 ep->cm_info.ord = iparams->cm_info.ord;
1171 ep->cm_info.ird = iparams->cm_info.ird;
1173 ep->rtr_type = iwarp_info->rtr_type;
1174 if (!iwarp_info->peer2peer)
1175 ep->rtr_type = MPA_RTR_TYPE_NONE;
1177 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1178 ep->cm_info.ord = 1;
1180 ep->mpa_rev = iwarp_info->mpa_rev;
1182 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1184 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1185 ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1188 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1189 iparams->cm_info.private_data,
1190 iparams->cm_info.private_data_len);
1192 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
1193 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1195 ep->mss = iparams->mss - ts_hdr_size;
1196 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1198 ep->event_cb = iparams->event_cb;
1199 ep->cb_context = iparams->cb_context;
1200 ep->connect_mode = TCP_CONNECT_ACTIVE;
1202 oparams->ep_context = ep;
1204 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1206 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1207 iparams->qp->icid, ep->tcp_cid, rc);
1210 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1216 qed_iwarp_cid_cleaned(p_hwfn, cid);
1221 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1223 struct qed_iwarp_ep *ep = NULL;
1226 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1228 if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1229 DP_ERR(p_hwfn, "Ep list is empty\n");
1233 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1234 struct qed_iwarp_ep, list_entry);
1236 /* in some cases we could have failed allocating a tcp cid when added
1237 * from accept / failure... retry now..this is not the common case.
1239 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1240 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1242 /* if we fail we could look for another entry with a valid
1243 * tcp_cid, but since we don't expect to reach this anyway
1244 * it's not worth the handling
1247 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1253 list_del(&ep->list_entry);
1256 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1260 #define QED_IWARP_MAX_CID_CLEAN_TIME 100
1261 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1263 /* This function waits for all the bits of a bmap to be cleared, as long as
1264 * there is progress ( i.e. the number of bits left to be cleared decreases )
1265 * the function continues.
1268 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1270 int prev_weight = 0;
1274 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1275 prev_weight = weight;
1278 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1280 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1282 if (prev_weight == weight) {
1285 prev_weight = weight;
1289 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1291 "%s bitmap wait timed out (%d cids pending)\n",
1292 bmap->name, weight);
1299 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1304 rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1305 &p_hwfn->p_rdma_info->tcp_cid_map);
1309 /* Now free the tcp cids from the main cid map */
1310 for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1311 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1313 /* Now wait for all cids to be completed */
1314 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1315 &p_hwfn->p_rdma_info->cid_map);
1318 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1320 struct qed_iwarp_ep *ep;
1322 while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1323 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1325 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1326 struct qed_iwarp_ep, list_entry);
1329 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1332 list_del(&ep->list_entry);
1334 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1336 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1337 qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1339 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1343 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1345 struct qed_iwarp_ep *ep;
1351 count = init ? QED_IWARP_PREALLOC_CNT : 1;
1352 for (i = 0; i < count; i++) {
1353 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1357 /* During initialization we allocate from the main pool,
1358 * afterwards we allocate only from the tcp_cid.
1361 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1364 qed_iwarp_set_tcp_cid(p_hwfn, cid);
1366 /* We don't care about the return code, it's ok if
1367 * tcp_cid remains invalid...in this case we'll
1370 qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1375 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1376 list_add_tail(&ep->list_entry,
1377 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1378 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1384 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1389 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1393 /* Allocate bitmap for tcp cid. These are used by passive side
1394 * to ensure it can allocate a tcp cid during dpc that was
1395 * pre-acquired and doesn't require dynamic allocation of ilt
1397 rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1398 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1400 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1401 "Failed to allocate tcp cid, rc = %d\n", rc);
1405 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1406 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408 return qed_iwarp_prealloc_ep(p_hwfn, true);
1411 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1413 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1416 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1418 struct qed_hwfn *p_hwfn = rdma_cxt;
1419 struct qed_iwarp_ep *ep;
1420 u8 mpa_data_size = 0;
1423 ep = iparams->ep_context;
1425 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1429 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1430 iparams->qp->icid, ep->tcp_cid);
1432 if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1433 (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1436 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1438 ep->tcp_cid, iparams->ord, iparams->ord);
1442 qed_iwarp_prealloc_ep(p_hwfn, false);
1444 ep->cb_context = iparams->cb_context;
1445 ep->qp = iparams->qp;
1448 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1449 /* Negotiate ord/ird: if upperlayer requested ord larger than
1450 * ird advertised by remote, we need to decrease our ord
1452 if (iparams->ord > ep->cm_info.ird)
1453 iparams->ord = ep->cm_info.ird;
1455 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1456 (iparams->ird == 0))
1460 /* Update cm_info ord/ird to be negotiated values */
1461 ep->cm_info.ord = iparams->ord;
1462 ep->cm_info.ird = iparams->ird;
1464 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1466 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1467 ep->cm_info.private_data_len = iparams->private_data_len +
1470 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1471 iparams->private_data, iparams->private_data_len);
1473 rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1475 qed_iwarp_modify_qp(p_hwfn,
1476 iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1481 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1483 struct qed_hwfn *p_hwfn = rdma_cxt;
1484 struct qed_iwarp_ep *ep;
1485 u8 mpa_data_size = 0;
1487 ep = iparams->ep_context;
1489 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1493 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1495 ep->cb_context = iparams->cb_context;
1498 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1500 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1501 ep->cm_info.private_data_len = iparams->private_data_len +
1504 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1505 iparams->private_data, iparams->private_data_len);
1507 return qed_iwarp_mpa_offload(p_hwfn, ep);
1511 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1512 struct qed_iwarp_cm_info *cm_info)
1514 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1515 cm_info->ip_version);
1517 if (cm_info->ip_version == QED_TCP_IPV4)
1518 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1519 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1520 cm_info->remote_ip, cm_info->remote_port,
1521 cm_info->local_ip, cm_info->local_port,
1524 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1525 "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1526 cm_info->remote_ip, cm_info->remote_port,
1527 cm_info->local_ip, cm_info->local_port,
1530 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1531 "private_data_len = %x ord = %d, ird = %d\n",
1532 cm_info->private_data_len, cm_info->ord, cm_info->ird);
1536 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1537 struct qed_iwarp_ll2_buff *buf, u8 handle)
1541 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1542 (u16)buf->buff_size, buf, 1);
1545 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1547 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1548 buf->data, buf->data_phys_addr);
1556 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1558 struct qed_iwarp_ep *ep = NULL;
1561 list_for_each_entry(ep,
1562 &p_hwfn->p_rdma_info->iwarp.ep_list,
1564 if ((ep->cm_info.local_port == cm_info->local_port) &&
1565 (ep->cm_info.remote_port == cm_info->remote_port) &&
1566 (ep->cm_info.vlan == cm_info->vlan) &&
1567 !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1568 sizeof(cm_info->local_ip)) &&
1569 !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1570 sizeof(cm_info->remote_ip))) {
1578 "SYN received on active connection - dropping\n");
1579 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1587 static struct qed_iwarp_listener *
1588 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1589 struct qed_iwarp_cm_info *cm_info)
1591 struct qed_iwarp_listener *listener = NULL;
1592 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1595 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1597 list_for_each_entry(listener,
1598 &p_hwfn->p_rdma_info->iwarp.listen_list,
1600 if (listener->port == cm_info->local_port) {
1601 if (!memcmp(listener->ip_addr,
1602 ip_zero, sizeof(ip_zero))) {
1607 if (!memcmp(listener->ip_addr,
1609 sizeof(cm_info->local_ip)) &&
1610 (listener->vlan == cm_info->vlan)) {
1618 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1623 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1628 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1629 struct qed_iwarp_cm_info *cm_info,
1631 u8 *remote_mac_addr,
1633 int *payload_len, int *tcp_start_offset)
1635 struct vlan_ethhdr *vethh;
1636 bool vlan_valid = false;
1637 struct ipv6hdr *ip6h;
1638 struct ethhdr *ethh;
1639 struct tcphdr *tcph;
1647 eth_type = ntohs(ethh->h_proto);
1648 if (eth_type == ETH_P_8021Q) {
1650 vethh = (struct vlan_ethhdr *)ethh;
1651 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1652 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1655 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1657 if (!ether_addr_equal(ethh->h_dest,
1658 p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1661 "Got unexpected mac %pM instead of %pM\n",
1662 ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1666 ether_addr_copy(remote_mac_addr, ethh->h_source);
1667 ether_addr_copy(local_mac_addr, ethh->h_dest);
1669 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1670 eth_type, ethh->h_source);
1672 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1673 eth_hlen, ethh->h_dest);
1675 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1677 if (eth_type == ETH_P_IP) {
1678 if (iph->protocol != IPPROTO_TCP) {
1680 "Unexpected ip protocol on ll2 %x\n",
1685 cm_info->local_ip[0] = ntohl(iph->daddr);
1686 cm_info->remote_ip[0] = ntohl(iph->saddr);
1687 cm_info->ip_version = QED_TCP_IPV4;
1689 ip_hlen = (iph->ihl) * sizeof(u32);
1690 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1691 } else if (eth_type == ETH_P_IPV6) {
1692 ip6h = (struct ipv6hdr *)iph;
1694 if (ip6h->nexthdr != IPPROTO_TCP) {
1696 "Unexpected ip protocol on ll2 %x\n",
1701 for (i = 0; i < 4; i++) {
1702 cm_info->local_ip[i] =
1703 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1704 cm_info->remote_ip[i] =
1705 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1707 cm_info->ip_version = QED_TCP_IPV6;
1709 ip_hlen = sizeof(*ip6h);
1710 *payload_len = ntohs(ip6h->payload_len);
1712 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1716 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1720 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1721 iph->ihl, tcph->source, tcph->dest);
1725 cm_info->local_port = ntohs(tcph->dest);
1726 cm_info->remote_port = ntohs(tcph->source);
1728 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1730 *tcp_start_offset = eth_hlen + ip_hlen;
1736 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
1738 struct qed_iwarp_ll2_buff *buf = data->cookie;
1739 struct qed_iwarp_listener *listener;
1740 struct qed_ll2_tx_pkt_info tx_pkt;
1741 struct qed_iwarp_cm_info cm_info;
1742 struct qed_hwfn *p_hwfn = cxt;
1743 u8 remote_mac_addr[ETH_ALEN];
1744 u8 local_mac_addr[ETH_ALEN];
1745 struct qed_iwarp_ep *ep;
1746 int tcp_start_offset;
1753 memset(&cm_info, 0, sizeof(cm_info));
1754 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
1755 if (GET_FIELD(data->parse_flags,
1756 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
1757 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
1758 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
1762 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
1763 data->u.placement_offset, remote_mac_addr,
1764 local_mac_addr, &payload_len,
1769 /* Check if there is a listener for this 4-tuple+vlan */
1770 listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
1774 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
1775 data->parse_flags, data->length.packet_length);
1777 memset(&tx_pkt, 0, sizeof(tx_pkt));
1778 tx_pkt.num_of_bds = 1;
1779 tx_pkt.vlan = data->vlan;
1781 if (GET_FIELD(data->parse_flags,
1782 PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
1783 SET_FIELD(tx_pkt.bd_flags,
1784 CORE_TX_BD_DATA_VLAN_INSERTION, 1);
1786 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
1787 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
1788 tx_pkt.first_frag = buf->data_phys_addr +
1789 data->u.placement_offset;
1790 tx_pkt.first_frag_len = data->length.packet_length;
1791 tx_pkt.cookie = buf;
1793 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
1798 "Can't post SYN back to chip rc=%d\n", rc);
1804 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
1805 /* There may be an open ep on this connection if this is a syn
1806 * retrasnmit... need to make sure there isn't...
1808 if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
1811 ep = qed_iwarp_get_free_ep(p_hwfn);
1815 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1816 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1817 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1819 ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
1820 ether_addr_copy(ep->local_mac_addr, local_mac_addr);
1822 memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
1824 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
1825 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1827 hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
1829 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
1830 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1832 ep->event_cb = listener->event_cb;
1833 ep->cb_context = listener->cb_context;
1834 ep->connect_mode = TCP_CONNECT_PASSIVE;
1837 ep->syn_ip_payload_length = (u16)payload_len;
1838 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
1841 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1843 qed_iwarp_return_ep(p_hwfn, ep);
1849 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
1852 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
1853 void *cookie, dma_addr_t rx_buf_addr,
1856 struct qed_iwarp_ll2_buff *buffer = cookie;
1857 struct qed_hwfn *p_hwfn = cxt;
1859 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
1860 buffer->data, buffer->data_phys_addr);
1864 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
1865 void *cookie, dma_addr_t first_frag_addr,
1866 bool b_last_fragment, bool b_last_packet)
1868 struct qed_iwarp_ll2_buff *buffer = cookie;
1869 struct qed_hwfn *p_hwfn = cxt;
1871 /* this was originally an rx packet, post it back */
1872 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
1875 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
1876 void *cookie, dma_addr_t first_frag_addr,
1877 bool b_last_fragment, bool b_last_packet)
1879 struct qed_iwarp_ll2_buff *buffer = cookie;
1880 struct qed_hwfn *p_hwfn = cxt;
1885 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
1886 buffer->data, buffer->data_phys_addr);
1891 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1893 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1896 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
1897 rc = qed_ll2_terminate_connection(p_hwfn,
1898 iwarp_info->ll2_syn_handle);
1900 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
1902 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
1903 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
1906 qed_llh_remove_mac_filter(p_hwfn,
1907 p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
1912 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
1913 int num_rx_bufs, int buff_size, u8 ll2_handle)
1915 struct qed_iwarp_ll2_buff *buffer;
1919 for (i = 0; i < num_rx_bufs; i++) {
1920 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1926 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1928 &buffer->data_phys_addr,
1930 if (!buffer->data) {
1936 buffer->buff_size = buff_size;
1937 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
1939 /* buffers will be deallocated by qed_ll2 */
1945 #define QED_IWARP_MAX_BUF_SIZE(mtu) \
1946 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
1947 ETH_CACHE_LINE_SIZE)
1950 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
1951 struct qed_rdma_start_in_params *params,
1952 struct qed_ptt *p_ptt)
1954 struct qed_iwarp_info *iwarp_info;
1955 struct qed_ll2_acquire_data data;
1956 struct qed_ll2_cbs cbs;
1959 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1960 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
1962 iwarp_info->max_mtu = params->max_mtu;
1964 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
1966 rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
1970 /* Start SYN connection */
1971 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
1972 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
1973 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
1974 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
1975 cbs.cookie = p_hwfn;
1977 memset(&data, 0, sizeof(data));
1978 data.input.conn_type = QED_LL2_TYPE_IWARP;
1979 data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
1980 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
1981 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
1982 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
1983 data.input.tx_tc = PKT_LB_TC;
1984 data.input.tx_dest = QED_LL2_TX_DEST_LB;
1985 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
1988 rc = qed_ll2_acquire_connection(p_hwfn, &data);
1990 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
1991 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
1995 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
1997 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2001 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2002 QED_IWARP_LL2_SYN_RX_SIZE,
2003 QED_IWARP_MAX_SYN_PKT_SIZE,
2004 iwarp_info->ll2_syn_handle);
2010 qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2015 int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2016 struct qed_rdma_start_in_params *params)
2018 struct qed_iwarp_info *iwarp_info;
2021 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2023 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2024 rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
2026 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2027 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2028 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2029 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2030 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2032 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2034 iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
2035 MPA_RTR_TYPE_ZERO_WRITE |
2036 MPA_RTR_TYPE_ZERO_READ;
2038 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2039 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2040 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2042 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2043 qed_iwarp_async_event);
2045 return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
2048 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2052 qed_iwarp_free_prealloc_ep(p_hwfn);
2053 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2057 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2059 return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2062 void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2063 struct qed_iwarp_ep *ep, u8 fw_return_code)
2065 struct qed_iwarp_cm_event_params params;
2067 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2069 params.event = QED_IWARP_EVENT_CLOSE;
2070 params.ep_context = ep;
2071 params.cm_info = &ep->cm_info;
2072 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2075 /* paired with READ_ONCE in destroy_qp */
2076 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2078 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2079 list_del(&ep->list_entry);
2080 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2082 ep->event_cb(ep->cb_context, ¶ms);
2085 void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2086 struct qed_iwarp_ep *ep, int fw_ret_code)
2088 struct qed_iwarp_cm_event_params params;
2089 bool event_cb = false;
2091 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2092 ep->cid, fw_ret_code);
2094 switch (fw_ret_code) {
2095 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2097 params.event = QED_IWARP_EVENT_DISCONNECT;
2100 case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2101 params.status = -ECONNRESET;
2102 params.event = QED_IWARP_EVENT_DISCONNECT;
2105 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2106 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2109 case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2110 params.event = QED_IWARP_EVENT_IRQ_FULL;
2113 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2114 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2117 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2118 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2121 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2122 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2125 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2126 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2129 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2130 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2133 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2134 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2137 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2138 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2142 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2143 "Unhandled exception received...fw_ret_code=%d\n",
2149 params.ep_context = ep;
2150 params.cm_info = &ep->cm_info;
2151 ep->event_cb(ep->cb_context, ¶ms);
2156 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2157 struct qed_iwarp_ep *ep, u8 fw_return_code)
2159 struct qed_iwarp_cm_event_params params;
2161 memset(¶ms, 0, sizeof(params));
2162 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2163 params.ep_context = ep;
2164 params.cm_info = &ep->cm_info;
2165 /* paired with READ_ONCE in destroy_qp */
2166 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2168 switch (fw_return_code) {
2169 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2170 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2171 "%s(0x%x) TCP connect got invalid packet\n",
2172 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2173 params.status = -ECONNRESET;
2175 case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2176 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2177 "%s(0x%x) TCP Connection Reset\n",
2178 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2179 params.status = -ECONNRESET;
2181 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2182 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2183 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2184 params.status = -EBUSY;
2186 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2187 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2188 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2189 params.status = -ECONNREFUSED;
2191 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2192 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2193 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2194 params.status = -ECONNRESET;
2198 "%s(0x%x) Unexpected return code tcp connect: %d\n",
2199 QED_IWARP_CONNECT_MODE_STRING(ep),
2200 ep->tcp_cid, fw_return_code);
2201 params.status = -ECONNRESET;
2205 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2206 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2207 qed_iwarp_return_ep(p_hwfn, ep);
2209 ep->event_cb(ep->cb_context, ¶ms);
2210 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2211 list_del(&ep->list_entry);
2212 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2217 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2218 struct qed_iwarp_ep *ep, u8 fw_return_code)
2220 u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2222 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2223 /* Done with the SYN packet, post back to ll2 rx */
2224 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
2228 /* If connect failed - upper layer doesn't know about it */
2229 if (fw_return_code == RDMA_RETURN_OK)
2230 qed_iwarp_mpa_received(p_hwfn, ep);
2232 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2235 if (fw_return_code == RDMA_RETURN_OK)
2236 qed_iwarp_mpa_offload(p_hwfn, ep);
2238 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2244 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
2246 if (!ep || (ep->sig != QED_EP_SIG)) {
2247 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
2254 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
2255 u8 fw_event_code, u16 echo,
2256 union event_ring_data *data,
2259 struct regpair *fw_handle = &data->rdma_data.async_handle;
2260 struct qed_iwarp_ep *ep = NULL;
2263 ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
2266 switch (fw_event_code) {
2267 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
2268 /* Async completion after TCP 3-way handshake */
2269 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
2273 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
2274 ep->tcp_cid, fw_return_code);
2275 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
2277 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
2278 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
2282 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
2283 ep->cid, fw_return_code);
2284 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
2286 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
2287 /* Async completion for Close Connection ramrod */
2288 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
2292 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
2293 ep->cid, fw_return_code);
2294 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
2296 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
2297 /* Async event for active side only */
2298 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
2302 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
2303 ep->cid, fw_return_code);
2304 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
2306 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
2307 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
2311 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
2312 ep->cid, fw_return_code);
2313 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
2315 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
2316 cid = (u16)le32_to_cpu(fw_handle->lo);
2317 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2318 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
2319 qed_iwarp_cid_cleaned(p_hwfn, cid);
2322 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
2323 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
2325 p_hwfn->p_rdma_info->events.affiliated_event(
2326 p_hwfn->p_rdma_info->events.context,
2327 QED_IWARP_EVENT_CQ_OVERFLOW,
2331 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
2339 qed_iwarp_create_listen(void *rdma_cxt,
2340 struct qed_iwarp_listen_in *iparams,
2341 struct qed_iwarp_listen_out *oparams)
2343 struct qed_hwfn *p_hwfn = rdma_cxt;
2344 struct qed_iwarp_listener *listener;
2346 listener = kzalloc(sizeof(*listener), GFP_KERNEL);
2350 listener->ip_version = iparams->ip_version;
2351 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
2352 listener->port = iparams->port;
2353 listener->vlan = iparams->vlan;
2355 listener->event_cb = iparams->event_cb;
2356 listener->cb_context = iparams->cb_context;
2357 listener->max_backlog = iparams->max_backlog;
2358 oparams->handle = listener;
2360 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2361 list_add_tail(&listener->list_entry,
2362 &p_hwfn->p_rdma_info->iwarp.listen_list);
2363 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2367 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
2370 listener->ip_addr[0],
2371 listener->ip_addr[1],
2372 listener->ip_addr[2],
2373 listener->ip_addr[3], listener->port, listener->vlan);
2378 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
2380 struct qed_iwarp_listener *listener = handle;
2381 struct qed_hwfn *p_hwfn = rdma_cxt;
2383 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
2385 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2386 list_del(&listener->list_entry);
2387 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2394 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
2396 struct qed_hwfn *p_hwfn = rdma_cxt;
2397 struct qed_sp_init_data init_data;
2398 struct qed_spq_entry *p_ent;
2399 struct qed_iwarp_ep *ep;
2400 struct qed_rdma_qp *qp;
2403 ep = iparams->ep_context;
2405 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
2411 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
2412 qp->icid, ep->tcp_cid);
2414 memset(&init_data, 0, sizeof(init_data));
2415 init_data.cid = qp->icid;
2416 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2417 init_data.comp_mode = QED_SPQ_MODE_CB;
2419 rc = qed_sp_init_request(p_hwfn, &p_ent,
2420 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
2421 PROTOCOLID_IWARP, &init_data);
2426 rc = qed_spq_post(p_hwfn, p_ent, NULL);
2428 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
2434 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
2435 struct qed_rdma_query_qp_out_params *out_params)
2437 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);