1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
10 #include <linux/ipv6.h>
11 #include <linux/spinlock.h>
12 #include <linux/tcp.h>
17 #include "qed_reg_addr.h"
21 #define QED_IWARP_ORD_DEFAULT 32
22 #define QED_IWARP_IRD_DEFAULT 32
23 #define QED_IWARP_MAX_FW_MSS 4120
25 #define QED_EP_SIG 0xecabcdef
32 #define MPA_V2_PEER2PEER_MODEL 0x8000
33 #define MPA_V2_SEND_RTR 0x4000 /* on ird */
34 #define MPA_V2_READ_RTR 0x4000 /* on ord */
35 #define MPA_V2_WRITE_RTR 0x8000
36 #define MPA_V2_IRD_ORD_MASK 0x3FFF
38 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
40 #define QED_IWARP_INVALID_TCP_CID 0xffffffff
42 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
43 #define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
44 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
45 #define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
47 #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
48 #define TIMESTAMP_HEADER_SIZE (12)
49 #define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
51 #define QED_IWARP_TS_EN BIT(0)
52 #define QED_IWARP_DA_EN BIT(1)
53 #define QED_IWARP_PARAM_CRC_NEEDED (1)
54 #define QED_IWARP_PARAM_P2P (1)
56 #define QED_IWARP_DEF_MAX_RT_TIME (0)
57 #define QED_IWARP_DEF_CWND_FACTOR (4)
58 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
59 #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
60 #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
62 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
63 __le16 echo, union event_ring_data *data,
66 /* Override devinfo with iWARP specific values */
67 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
69 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
71 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
72 dev->max_qp = min_t(u32,
74 p_hwfn->p_rdma_info->num_qps) -
75 QED_IWARP_PREALLOC_CNT;
77 dev->max_cq = dev->max_qp;
79 dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
80 dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
83 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
85 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
86 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
87 p_hwfn->b_rdma_enabled_in_prs = true;
90 /* We have two cid maps, one for tcp which should be used only from passive
91 * syn processing and replacing a pre-allocated ep in the list. The second
92 * for active tcp and for QPs.
94 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
96 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
98 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
100 if (cid < QED_IWARP_PREALLOC_CNT)
101 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
104 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
106 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
110 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
111 struct iwarp_init_func_ramrod_data *p_ramrod)
113 p_ramrod->iwarp.ll2_ooo_q_index =
114 RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
115 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
117 p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
122 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
126 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
127 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
128 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
130 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
133 *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
135 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
137 qed_iwarp_cid_cleaned(p_hwfn, *cid);
142 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
144 cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
146 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
147 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
148 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
151 /* This function allocates a cid for passive tcp (called from syn receive)
152 * the reason it's separate from the regular cid allocation is because it
153 * is assured that these cids already have ilt allocated. They are preallocated
154 * to ensure that we won't need to allocate memory during syn processing
156 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
160 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
162 rc = qed_rdma_bmap_alloc_id(p_hwfn,
163 &p_hwfn->p_rdma_info->tcp_cid_map, cid);
165 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
168 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
169 "can't allocate iwarp tcp cid max-count=%d\n",
170 p_hwfn->p_rdma_info->tcp_cid_map.max_count);
172 *cid = QED_IWARP_INVALID_TCP_CID;
176 *cid += qed_cxt_get_proto_cid_start(p_hwfn,
177 p_hwfn->p_rdma_info->proto);
181 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
182 struct qed_rdma_qp *qp,
183 struct qed_rdma_create_qp_out_params *out_params)
185 struct iwarp_create_qp_ramrod_data *p_ramrod;
186 struct qed_sp_init_data init_data;
187 struct qed_spq_entry *p_ent;
192 qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
193 IWARP_SHARED_QUEUE_PAGE_SIZE,
194 &qp->shared_queue_phys_addr,
196 if (!qp->shared_queue)
199 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
200 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
201 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
202 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
203 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
204 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
205 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
206 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
208 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
214 memset(&init_data, 0, sizeof(init_data));
215 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
216 init_data.cid = qp->icid;
217 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
219 rc = qed_sp_init_request(p_hwfn, &p_ent,
220 IWARP_RAMROD_CMD_ID_CREATE_QP,
221 PROTOCOLID_IWARP, &init_data);
225 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
227 SET_FIELD(p_ramrod->flags,
228 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
229 qp->fmr_and_reserved_lkey);
231 SET_FIELD(p_ramrod->flags,
232 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
234 SET_FIELD(p_ramrod->flags,
235 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
236 qp->incoming_rdma_read_en);
238 SET_FIELD(p_ramrod->flags,
239 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
240 qp->incoming_rdma_write_en);
242 SET_FIELD(p_ramrod->flags,
243 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
244 qp->incoming_atomic_en);
246 SET_FIELD(p_ramrod->flags,
247 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
249 p_ramrod->pd = cpu_to_le16(qp->pd);
250 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
251 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
253 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
254 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
255 p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
256 p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
258 p_ramrod->cq_cid_for_sq =
259 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
260 p_ramrod->cq_cid_for_rq =
261 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
263 p_ramrod->dpi = cpu_to_le16(qp->dpi);
265 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
266 p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
267 physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
268 p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
270 rc = qed_spq_post(p_hwfn, p_ent, NULL);
277 qed_iwarp_cid_cleaned(p_hwfn, cid);
279 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
280 IWARP_SHARED_QUEUE_PAGE_SIZE,
281 qp->shared_queue, qp->shared_queue_phys_addr);
286 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
288 struct iwarp_modify_qp_ramrod_data *p_ramrod;
289 struct qed_sp_init_data init_data;
290 struct qed_spq_entry *p_ent;
291 u16 flags, trans_to_state;
295 memset(&init_data, 0, sizeof(init_data));
296 init_data.cid = qp->icid;
297 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
298 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
300 rc = qed_sp_init_request(p_hwfn, &p_ent,
301 IWARP_RAMROD_CMD_ID_MODIFY_QP,
302 p_hwfn->p_rdma_info->proto, &init_data);
306 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
308 flags = le16_to_cpu(p_ramrod->flags);
309 SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
310 p_ramrod->flags = cpu_to_le16(flags);
312 if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
313 trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
315 trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
317 p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
319 rc = qed_spq_post(p_hwfn, p_ent, NULL);
321 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
326 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
329 case QED_ROCE_QP_STATE_RESET:
330 case QED_ROCE_QP_STATE_INIT:
331 case QED_ROCE_QP_STATE_RTR:
332 return QED_IWARP_QP_STATE_IDLE;
333 case QED_ROCE_QP_STATE_RTS:
334 return QED_IWARP_QP_STATE_RTS;
335 case QED_ROCE_QP_STATE_SQD:
336 return QED_IWARP_QP_STATE_CLOSING;
337 case QED_ROCE_QP_STATE_ERR:
338 return QED_IWARP_QP_STATE_ERROR;
339 case QED_ROCE_QP_STATE_SQE:
340 return QED_IWARP_QP_STATE_TERMINATE;
342 return QED_IWARP_QP_STATE_ERROR;
346 static enum qed_roce_qp_state
347 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
350 case QED_IWARP_QP_STATE_IDLE:
351 return QED_ROCE_QP_STATE_INIT;
352 case QED_IWARP_QP_STATE_RTS:
353 return QED_ROCE_QP_STATE_RTS;
354 case QED_IWARP_QP_STATE_TERMINATE:
355 return QED_ROCE_QP_STATE_SQE;
356 case QED_IWARP_QP_STATE_CLOSING:
357 return QED_ROCE_QP_STATE_SQD;
358 case QED_IWARP_QP_STATE_ERROR:
359 return QED_ROCE_QP_STATE_ERR;
361 return QED_ROCE_QP_STATE_ERR;
365 static const char * const iwarp_state_names[] = {
374 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
375 struct qed_rdma_qp *qp,
376 enum qed_iwarp_qp_state new_state, bool internal)
378 enum qed_iwarp_qp_state prev_iw_state;
379 bool modify_fw = false;
382 /* modify QP can be called from upper-layer or as a result of async
383 * RST/FIN... therefore need to protect
385 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
386 prev_iw_state = qp->iwarp_state;
388 if (prev_iw_state == new_state) {
389 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
393 switch (prev_iw_state) {
394 case QED_IWARP_QP_STATE_IDLE:
396 case QED_IWARP_QP_STATE_RTS:
397 qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
399 case QED_IWARP_QP_STATE_ERROR:
400 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
408 case QED_IWARP_QP_STATE_RTS:
410 case QED_IWARP_QP_STATE_CLOSING:
414 qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
416 case QED_IWARP_QP_STATE_ERROR:
419 qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
425 case QED_IWARP_QP_STATE_ERROR:
427 case QED_IWARP_QP_STATE_IDLE:
429 qp->iwarp_state = new_state;
431 case QED_IWARP_QP_STATE_CLOSING:
432 /* could happen due to race... do nothing.... */
438 case QED_IWARP_QP_STATE_TERMINATE:
439 case QED_IWARP_QP_STATE_CLOSING:
440 qp->iwarp_state = new_state;
446 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
448 iwarp_state_names[prev_iw_state],
449 iwarp_state_names[qp->iwarp_state],
450 internal ? "internal" : "");
452 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
455 rc = qed_iwarp_modify_fw(p_hwfn, qp);
460 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
462 struct qed_sp_init_data init_data;
463 struct qed_spq_entry *p_ent;
467 memset(&init_data, 0, sizeof(init_data));
468 init_data.cid = qp->icid;
469 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
470 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
472 rc = qed_sp_init_request(p_hwfn, &p_ent,
473 IWARP_RAMROD_CMD_ID_DESTROY_QP,
474 p_hwfn->p_rdma_info->proto, &init_data);
478 rc = qed_spq_post(p_hwfn, p_ent, NULL);
480 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
485 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
486 struct qed_iwarp_ep *ep,
487 bool remove_from_active_list)
489 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
490 sizeof(*ep->ep_buffer_virt),
491 ep->ep_buffer_virt, ep->ep_buffer_phys);
493 if (remove_from_active_list) {
494 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
495 list_del(&ep->list_entry);
496 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
505 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
507 struct qed_iwarp_ep *ep = qp->ep;
511 if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
512 rc = qed_iwarp_modify_qp(p_hwfn, qp,
513 QED_IWARP_QP_STATE_ERROR, false);
518 /* Make sure ep is closed before returning and freeing memory. */
520 while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
524 if (ep->state != QED_IWARP_EP_CLOSED)
525 DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
528 qed_iwarp_destroy_ep(p_hwfn, ep, false);
531 rc = qed_iwarp_fw_destroy(p_hwfn, qp);
533 if (qp->shared_queue)
534 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
535 IWARP_SHARED_QUEUE_PAGE_SIZE,
536 qp->shared_queue, qp->shared_queue_phys_addr);
542 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
544 struct qed_iwarp_ep *ep;
547 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
551 ep->state = QED_IWARP_EP_INIT;
553 ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
554 sizeof(*ep->ep_buffer_virt),
557 if (!ep->ep_buffer_virt) {
562 ep->sig = QED_EP_SIG;
574 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
575 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
577 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
578 p_tcp_ramrod->tcp.local_mac_addr_lo,
579 p_tcp_ramrod->tcp.local_mac_addr_mid,
580 p_tcp_ramrod->tcp.local_mac_addr_hi,
581 p_tcp_ramrod->tcp.remote_mac_addr_lo,
582 p_tcp_ramrod->tcp.remote_mac_addr_mid,
583 p_tcp_ramrod->tcp.remote_mac_addr_hi);
585 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
586 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
587 "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
588 p_tcp_ramrod->tcp.local_ip,
589 p_tcp_ramrod->tcp.local_port,
590 p_tcp_ramrod->tcp.remote_ip,
591 p_tcp_ramrod->tcp.remote_port,
592 p_tcp_ramrod->tcp.vlan_id);
594 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
595 "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
596 p_tcp_ramrod->tcp.local_ip,
597 p_tcp_ramrod->tcp.local_port,
598 p_tcp_ramrod->tcp.remote_ip,
599 p_tcp_ramrod->tcp.remote_port,
600 p_tcp_ramrod->tcp.vlan_id);
603 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
604 "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
605 p_tcp_ramrod->tcp.flow_label,
606 p_tcp_ramrod->tcp.ttl,
607 p_tcp_ramrod->tcp.tos_or_tc,
608 p_tcp_ramrod->tcp.mss,
609 p_tcp_ramrod->tcp.rcv_wnd_scale,
610 p_tcp_ramrod->tcp.connect_mode,
611 p_tcp_ramrod->tcp.flags);
613 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
614 p_tcp_ramrod->tcp.syn_ip_payload_length,
615 p_tcp_ramrod->tcp.syn_phy_addr_lo,
616 p_tcp_ramrod->tcp.syn_phy_addr_hi);
620 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
622 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
623 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
624 struct tcp_offload_params_opt2 *tcp;
625 struct qed_sp_init_data init_data;
626 struct qed_spq_entry *p_ent;
627 dma_addr_t async_output_phys;
628 dma_addr_t in_pdata_phys;
635 memset(&init_data, 0, sizeof(init_data));
636 init_data.cid = ep->tcp_cid;
637 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
638 if (ep->connect_mode == TCP_CONNECT_PASSIVE)
639 init_data.comp_mode = QED_SPQ_MODE_CB;
641 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
643 rc = qed_sp_init_request(p_hwfn, &p_ent,
644 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
645 PROTOCOLID_IWARP, &init_data);
649 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
651 in_pdata_phys = ep->ep_buffer_phys +
652 offsetof(struct qed_iwarp_ep_memory, in_pdata);
653 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
656 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
657 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
659 async_output_phys = ep->ep_buffer_phys +
660 offsetof(struct qed_iwarp_ep_memory, async_output);
661 DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
664 p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
665 p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
667 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
668 p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
669 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
670 p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
671 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
673 tcp = &p_tcp_ramrod->tcp;
674 qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
675 &tcp->remote_mac_addr_mid,
676 &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
677 qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
678 &tcp->local_mac_addr_lo, ep->local_mac_addr);
680 tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
682 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
684 SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
685 !!(tcp_flags & QED_IWARP_TS_EN));
687 SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
688 !!(tcp_flags & QED_IWARP_DA_EN));
690 tcp->flags = cpu_to_le16(flags);
691 tcp->ip_version = ep->cm_info.ip_version;
693 for (i = 0; i < 4; i++) {
694 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
695 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
698 tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
699 tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
700 tcp->mss = cpu_to_le16(ep->mss);
705 tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
706 tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
707 tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
708 tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
709 tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
711 tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
712 tcp->connect_mode = ep->connect_mode;
714 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
715 tcp->syn_ip_payload_length =
716 cpu_to_le16(ep->syn_ip_payload_length);
717 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
718 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
721 qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
723 rc = qed_spq_post(p_hwfn, p_ent, NULL);
725 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
726 "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
732 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
734 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
735 struct qed_iwarp_cm_event_params params;
736 struct mpa_v2_hdr *mpa_v2;
737 union async_output *async_data;
738 u16 mpa_ord, mpa_ird;
743 async_data = &ep->ep_buffer_virt->async_output;
745 mpa_rev = async_data->mpa_request.mpa_handshake_mode;
746 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
747 "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
748 async_data->mpa_request.ulp_data_len,
749 mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
751 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
752 /* Read ord/ird values from private data buffer */
753 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
754 mpa_hdr_size = sizeof(*mpa_v2);
756 mpa_ord = ntohs(mpa_v2->ord);
757 mpa_ird = ntohs(mpa_v2->ird);
759 /* Temprary store in cm_info incoming ord/ird requested, later
760 * replace with negotiated value during accept
762 ep->cm_info.ord = (u8)min_t(u16,
763 (mpa_ord & MPA_V2_IRD_ORD_MASK),
764 QED_IWARP_ORD_DEFAULT);
766 ep->cm_info.ird = (u8)min_t(u16,
767 (mpa_ird & MPA_V2_IRD_ORD_MASK),
768 QED_IWARP_IRD_DEFAULT);
770 /* Peer2Peer negotiation */
771 ep->rtr_type = MPA_RTR_TYPE_NONE;
772 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
773 if (mpa_ord & MPA_V2_WRITE_RTR)
774 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
776 if (mpa_ord & MPA_V2_READ_RTR)
777 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
779 if (mpa_ird & MPA_V2_SEND_RTR)
780 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
782 ep->rtr_type &= iwarp_info->rtr_type;
784 /* if we're left with no match send our capabilities */
785 if (ep->rtr_type == MPA_RTR_TYPE_NONE)
786 ep->rtr_type = iwarp_info->rtr_type;
789 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
791 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
792 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
793 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
796 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
797 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
798 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
799 async_data->mpa_request.ulp_data_len, mpa_hdr_size);
801 /* Strip mpa v2 hdr from private data before sending to upper layer */
802 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
804 ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
805 ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
807 params.event = QED_IWARP_EVENT_MPA_REQUEST;
808 params.cm_info = &ep->cm_info;
809 params.ep_context = ep;
812 ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
813 ep->event_cb(ep->cb_context, ¶ms);
817 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
819 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
820 struct mpa_outgoing_params *common;
821 struct qed_iwarp_info *iwarp_info;
822 struct qed_sp_init_data init_data;
823 dma_addr_t async_output_phys;
824 struct qed_spq_entry *p_ent;
825 dma_addr_t out_pdata_phys;
826 dma_addr_t in_pdata_phys;
827 struct qed_rdma_qp *qp;
838 memset(&init_data, 0, sizeof(init_data));
839 init_data.cid = reject ? ep->tcp_cid : qp->icid;
840 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
842 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
843 init_data.comp_mode = QED_SPQ_MODE_CB;
845 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
847 rc = qed_sp_init_request(p_hwfn, &p_ent,
848 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
849 PROTOCOLID_IWARP, &init_data);
853 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
854 common = &p_mpa_ramrod->common;
856 out_pdata_phys = ep->ep_buffer_phys +
857 offsetof(struct qed_iwarp_ep_memory, out_pdata);
858 DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
860 val = ep->cm_info.private_data_len;
861 common->outgoing_ulp_buffer.len = cpu_to_le16(val);
862 common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
864 common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
865 common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
867 val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
868 p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
870 in_pdata_phys = ep->ep_buffer_phys +
871 offsetof(struct qed_iwarp_ep_memory, in_pdata);
872 p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
873 DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
875 p_mpa_ramrod->incoming_ulp_buffer.len =
876 cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
877 async_output_phys = ep->ep_buffer_phys +
878 offsetof(struct qed_iwarp_ep_memory, async_output);
879 DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
881 p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
882 p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
885 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
886 qp->shared_queue_phys_addr);
887 p_mpa_ramrod->stats_counter_id =
888 RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
893 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
894 p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
895 p_mpa_ramrod->mode = ep->mpa_rev;
896 SET_FIELD(p_mpa_ramrod->rtr_pref,
897 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
899 ep->state = QED_IWARP_EP_MPA_OFFLOADED;
900 rc = qed_spq_post(p_hwfn, p_ent, NULL);
902 ep->cid = qp->icid; /* Now they're migrated. */
906 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
907 reject ? 0xffff : qp->icid,
911 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
916 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
918 ep->state = QED_IWARP_EP_INIT;
922 memset(&ep->cm_info, 0, sizeof(ep->cm_info));
924 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
925 /* We don't care about the return code, it's ok if tcp_cid
926 * remains invalid...in this case we'll defer allocation
928 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
930 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
932 list_move_tail(&ep->list_entry,
933 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
935 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
939 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
941 struct mpa_v2_hdr *mpa_v2_params;
942 union async_output *async_data;
943 u16 mpa_ird, mpa_ord;
944 u8 mpa_data_size = 0;
947 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
949 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
950 mpa_data_size = sizeof(*mpa_v2_params);
951 mpa_ird = ntohs(mpa_v2_params->ird);
952 mpa_ord = ntohs(mpa_v2_params->ord);
954 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
955 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
958 async_data = &ep->ep_buffer_virt->async_output;
959 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
961 ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
962 ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
966 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
968 struct qed_iwarp_cm_event_params params;
970 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
972 "MPA reply event not expected on passive side!\n");
976 params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
978 qed_iwarp_parse_private_data(p_hwfn, ep);
980 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
981 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
982 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
984 params.cm_info = &ep->cm_info;
985 params.ep_context = ep;
988 ep->mpa_reply_processed = true;
990 ep->event_cb(ep->cb_context, ¶ms);
993 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
994 ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
996 /* Called as a result of the event:
997 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1000 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1001 struct qed_iwarp_ep *ep, u8 fw_return_code)
1003 struct qed_iwarp_cm_event_params params;
1005 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1006 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1008 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1010 if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1011 qed_iwarp_parse_private_data(p_hwfn, ep);
1013 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1014 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1015 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1017 params.cm_info = &ep->cm_info;
1019 params.ep_context = ep;
1021 switch (fw_return_code) {
1022 case RDMA_RETURN_OK:
1023 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1024 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1025 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1026 ep->state = QED_IWARP_EP_ESTABLISHED;
1029 case IWARP_CONN_ERROR_MPA_TIMEOUT:
1030 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1031 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1032 params.status = -EBUSY;
1034 case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1035 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1036 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037 params.status = -ECONNREFUSED;
1039 case IWARP_CONN_ERROR_MPA_RST:
1040 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1041 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1043 params.status = -ECONNRESET;
1045 case IWARP_CONN_ERROR_MPA_FIN:
1046 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1047 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1048 params.status = -ECONNREFUSED;
1050 case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1051 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1052 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053 params.status = -ECONNREFUSED;
1055 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1056 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1057 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058 params.status = -ECONNREFUSED;
1060 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1061 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1062 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063 params.status = -ECONNREFUSED;
1065 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1066 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1067 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068 params.status = -ECONNREFUSED;
1070 case IWARP_CONN_ERROR_MPA_TERMINATE:
1071 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1072 QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073 params.status = -ECONNREFUSED;
1076 params.status = -ECONNRESET;
1080 if (fw_return_code != RDMA_RETURN_OK)
1081 /* paired with READ_ONCE in destroy_qp */
1082 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1084 ep->event_cb(ep->cb_context, ¶ms);
1086 /* on passive side, if there is no associated QP (REJECT) we need to
1087 * return the ep to the pool, (in the regular case we add an element
1088 * in accept instead of this one.
1089 * In both cases we need to remove it from the ep_list.
1091 if (fw_return_code != RDMA_RETURN_OK) {
1092 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1093 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1094 (!ep->qp)) { /* Rejected */
1095 qed_iwarp_return_ep(p_hwfn, ep);
1097 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1098 list_del(&ep->list_entry);
1099 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1105 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1106 struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1108 struct mpa_v2_hdr *mpa_v2_params;
1109 u16 mpa_ird, mpa_ord;
1112 if (MPA_REV2(ep->mpa_rev)) {
1114 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1115 *mpa_data_size = sizeof(*mpa_v2_params);
1117 mpa_ird = (u16)ep->cm_info.ird;
1118 mpa_ord = (u16)ep->cm_info.ord;
1120 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1121 mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1123 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1124 mpa_ird |= MPA_V2_SEND_RTR;
1126 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1127 mpa_ord |= MPA_V2_WRITE_RTR;
1129 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1130 mpa_ord |= MPA_V2_READ_RTR;
1133 mpa_v2_params->ird = htons(mpa_ird);
1134 mpa_v2_params->ord = htons(mpa_ord);
1138 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1141 *((u32 *)mpa_v2_params),
1142 mpa_ord & MPA_V2_IRD_ORD_MASK,
1143 mpa_ird & MPA_V2_IRD_ORD_MASK,
1144 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1145 !!(mpa_ird & MPA_V2_SEND_RTR),
1146 !!(mpa_ord & MPA_V2_WRITE_RTR),
1147 !!(mpa_ord & MPA_V2_READ_RTR));
1151 int qed_iwarp_connect(void *rdma_cxt,
1152 struct qed_iwarp_connect_in *iparams,
1153 struct qed_iwarp_connect_out *oparams)
1155 struct qed_hwfn *p_hwfn = rdma_cxt;
1156 struct qed_iwarp_info *iwarp_info;
1157 struct qed_iwarp_ep *ep;
1158 u8 mpa_data_size = 0;
1162 if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1163 (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1165 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1166 iparams->qp->icid, iparams->cm_info.ord,
1167 iparams->cm_info.ird);
1172 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1174 /* Allocate ep object */
1175 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1179 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1185 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1186 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1187 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1189 ep->qp = iparams->qp;
1191 ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1192 ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1193 memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1195 ep->cm_info.ord = iparams->cm_info.ord;
1196 ep->cm_info.ird = iparams->cm_info.ird;
1198 ep->rtr_type = iwarp_info->rtr_type;
1199 if (!iwarp_info->peer2peer)
1200 ep->rtr_type = MPA_RTR_TYPE_NONE;
1202 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1203 ep->cm_info.ord = 1;
1205 ep->mpa_rev = iwarp_info->mpa_rev;
1207 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1209 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1210 ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1214 iparams->cm_info.private_data,
1215 iparams->cm_info.private_data_len);
1217 ep->mss = iparams->mss;
1218 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1220 ep->event_cb = iparams->event_cb;
1221 ep->cb_context = iparams->cb_context;
1222 ep->connect_mode = TCP_CONNECT_ACTIVE;
1224 oparams->ep_context = ep;
1226 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1228 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1229 iparams->qp->icid, ep->tcp_cid, rc);
1232 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1238 qed_iwarp_cid_cleaned(p_hwfn, cid);
1243 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1245 struct qed_iwarp_ep *ep = NULL;
1248 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1250 if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1251 DP_ERR(p_hwfn, "Ep list is empty\n");
1255 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1256 struct qed_iwarp_ep, list_entry);
1258 /* in some cases we could have failed allocating a tcp cid when added
1259 * from accept / failure... retry now..this is not the common case.
1261 if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1262 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1264 /* if we fail we could look for another entry with a valid
1265 * tcp_cid, but since we don't expect to reach this anyway
1266 * it's not worth the handling
1269 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1275 list_del(&ep->list_entry);
1278 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1282 #define QED_IWARP_MAX_CID_CLEAN_TIME 100
1283 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1285 /* This function waits for all the bits of a bmap to be cleared, as long as
1286 * there is progress ( i.e. the number of bits left to be cleared decreases )
1287 * the function continues.
1290 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1292 int prev_weight = 0;
1296 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1297 prev_weight = weight;
1300 /* If the HW device is during recovery, all resources are
1301 * immediately reset without receiving a per-cid indication
1302 * from HW. In this case we don't expect the cid_map to be
1305 if (p_hwfn->cdev->recov_in_prog)
1308 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1310 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1312 if (prev_weight == weight) {
1315 prev_weight = weight;
1319 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1321 "%s bitmap wait timed out (%d cids pending)\n",
1322 bmap->name, weight);
1329 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1334 rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1335 &p_hwfn->p_rdma_info->tcp_cid_map);
1339 /* Now free the tcp cids from the main cid map */
1340 for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1341 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1343 /* Now wait for all cids to be completed */
1344 return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1345 &p_hwfn->p_rdma_info->cid_map);
1348 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1350 struct qed_iwarp_ep *ep;
1352 while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1353 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1355 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1356 struct qed_iwarp_ep, list_entry);
1359 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362 list_del(&ep->list_entry);
1364 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1366 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1367 qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1369 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1373 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1375 struct qed_iwarp_ep *ep;
1381 count = init ? QED_IWARP_PREALLOC_CNT : 1;
1382 for (i = 0; i < count; i++) {
1383 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1387 /* During initialization we allocate from the main pool,
1388 * afterwards we allocate only from the tcp_cid.
1391 rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394 qed_iwarp_set_tcp_cid(p_hwfn, cid);
1396 /* We don't care about the return code, it's ok if
1397 * tcp_cid remains invalid...in this case we'll
1400 qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1405 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406 list_add_tail(&ep->list_entry,
1407 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1408 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1414 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1419 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1423 /* Allocate bitmap for tcp cid. These are used by passive side
1424 * to ensure it can allocate a tcp cid during dpc that was
1425 * pre-acquired and doesn't require dynamic allocation of ilt
1427 rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1428 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1430 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1431 "Failed to allocate tcp cid, rc = %d\n", rc);
1435 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1436 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1438 rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1442 return qed_ooo_alloc(p_hwfn);
1445 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1447 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1449 qed_ooo_free(p_hwfn);
1450 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1451 kfree(iwarp_info->mpa_bufs);
1452 kfree(iwarp_info->partial_fpdus);
1453 kfree(iwarp_info->mpa_intermediate_buf);
1456 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1458 struct qed_hwfn *p_hwfn = rdma_cxt;
1459 struct qed_iwarp_ep *ep;
1460 u8 mpa_data_size = 0;
1463 ep = iparams->ep_context;
1465 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1469 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1470 iparams->qp->icid, ep->tcp_cid);
1472 if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1473 (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1478 ep->tcp_cid, iparams->ord, iparams->ord);
1482 qed_iwarp_prealloc_ep(p_hwfn, false);
1484 ep->cb_context = iparams->cb_context;
1485 ep->qp = iparams->qp;
1488 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1489 /* Negotiate ord/ird: if upperlayer requested ord larger than
1490 * ird advertised by remote, we need to decrease our ord
1492 if (iparams->ord > ep->cm_info.ird)
1493 iparams->ord = ep->cm_info.ird;
1495 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1496 (iparams->ird == 0))
1500 /* Update cm_info ord/ird to be negotiated values */
1501 ep->cm_info.ord = iparams->ord;
1502 ep->cm_info.ird = iparams->ird;
1504 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1506 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1507 ep->cm_info.private_data_len = iparams->private_data_len +
1510 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1511 iparams->private_data, iparams->private_data_len);
1513 rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1515 qed_iwarp_modify_qp(p_hwfn,
1516 iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1521 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1523 struct qed_hwfn *p_hwfn = rdma_cxt;
1524 struct qed_iwarp_ep *ep;
1525 u8 mpa_data_size = 0;
1527 ep = iparams->ep_context;
1529 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1533 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1535 ep->cb_context = iparams->cb_context;
1538 qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1540 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1541 ep->cm_info.private_data_len = iparams->private_data_len +
1544 memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1545 iparams->private_data, iparams->private_data_len);
1547 return qed_iwarp_mpa_offload(p_hwfn, ep);
1551 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1552 struct qed_iwarp_cm_info *cm_info)
1554 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1555 cm_info->ip_version);
1557 if (cm_info->ip_version == QED_TCP_IPV4)
1558 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1559 "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1560 cm_info->remote_ip, cm_info->remote_port,
1561 cm_info->local_ip, cm_info->local_port,
1564 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1565 "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1566 cm_info->remote_ip, cm_info->remote_port,
1567 cm_info->local_ip, cm_info->local_port,
1570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1571 "private_data_len = %x ord = %d, ird = %d\n",
1572 cm_info->private_data_len, cm_info->ord, cm_info->ird);
1576 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1577 struct qed_iwarp_ll2_buff *buf, u8 handle)
1581 rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1582 (u16)buf->buff_size, buf, 1);
1585 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1587 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1588 buf->data, buf->data_phys_addr);
1596 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1598 struct qed_iwarp_ep *ep = NULL;
1601 list_for_each_entry(ep,
1602 &p_hwfn->p_rdma_info->iwarp.ep_list,
1604 if ((ep->cm_info.local_port == cm_info->local_port) &&
1605 (ep->cm_info.remote_port == cm_info->remote_port) &&
1606 (ep->cm_info.vlan == cm_info->vlan) &&
1607 !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1608 sizeof(cm_info->local_ip)) &&
1609 !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1610 sizeof(cm_info->remote_ip))) {
1618 "SYN received on active connection - dropping\n");
1619 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1627 static struct qed_iwarp_listener *
1628 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1629 struct qed_iwarp_cm_info *cm_info)
1631 struct qed_iwarp_listener *listener = NULL;
1632 static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1637 list_for_each_entry(listener,
1638 &p_hwfn->p_rdma_info->iwarp.listen_list,
1640 if (listener->port == cm_info->local_port) {
1641 if (!memcmp(listener->ip_addr,
1642 ip_zero, sizeof(ip_zero))) {
1647 if (!memcmp(listener->ip_addr,
1649 sizeof(cm_info->local_ip)) &&
1650 (listener->vlan == cm_info->vlan)) {
1658 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1668 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1669 struct qed_iwarp_cm_info *cm_info,
1671 u8 *remote_mac_addr,
1673 int *payload_len, int *tcp_start_offset)
1675 struct vlan_ethhdr *vethh;
1676 bool vlan_valid = false;
1677 struct ipv6hdr *ip6h;
1678 struct ethhdr *ethh;
1679 struct tcphdr *tcph;
1687 eth_type = ntohs(ethh->h_proto);
1688 if (eth_type == ETH_P_8021Q) {
1690 vethh = (struct vlan_ethhdr *)ethh;
1691 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1692 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1695 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1697 if (!ether_addr_equal(ethh->h_dest,
1698 p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1701 "Got unexpected mac %pM instead of %pM\n",
1702 ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1706 ether_addr_copy(remote_mac_addr, ethh->h_source);
1707 ether_addr_copy(local_mac_addr, ethh->h_dest);
1709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1710 eth_type, ethh->h_source);
1712 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1713 eth_hlen, ethh->h_dest);
1715 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1717 if (eth_type == ETH_P_IP) {
1718 if (iph->protocol != IPPROTO_TCP) {
1720 "Unexpected ip protocol on ll2 %x\n",
1725 cm_info->local_ip[0] = ntohl(iph->daddr);
1726 cm_info->remote_ip[0] = ntohl(iph->saddr);
1727 cm_info->ip_version = QED_TCP_IPV4;
1729 ip_hlen = (iph->ihl) * sizeof(u32);
1730 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1731 } else if (eth_type == ETH_P_IPV6) {
1732 ip6h = (struct ipv6hdr *)iph;
1734 if (ip6h->nexthdr != IPPROTO_TCP) {
1736 "Unexpected ip protocol on ll2 %x\n",
1741 for (i = 0; i < 4; i++) {
1742 cm_info->local_ip[i] =
1743 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1744 cm_info->remote_ip[i] =
1745 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1747 cm_info->ip_version = QED_TCP_IPV6;
1749 ip_hlen = sizeof(*ip6h);
1750 *payload_len = ntohs(ip6h->payload_len);
1752 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1756 tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1760 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1761 iph->ihl, tcph->source, tcph->dest);
1765 cm_info->local_port = ntohs(tcph->dest);
1766 cm_info->remote_port = ntohs(tcph->source);
1768 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1770 *tcp_start_offset = eth_hlen + ip_hlen;
1775 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1778 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1779 struct qed_iwarp_fpdu *partial_fpdu;
1782 idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1783 if (idx >= iwarp_info->max_num_partial_fpdus) {
1784 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1785 iwarp_info->max_num_partial_fpdus);
1789 partial_fpdu = &iwarp_info->partial_fpdus[idx];
1791 return partial_fpdu;
1794 enum qed_iwarp_mpa_pkt_type {
1795 QED_IWARP_MPA_PKT_PACKED,
1796 QED_IWARP_MPA_PKT_PARTIAL,
1797 QED_IWARP_MPA_PKT_UNALIGNED
1800 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1801 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1802 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1804 /* Pad to multiple of 4 */
1805 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1806 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
1807 (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \
1808 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1809 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1811 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1812 #define QED_IWARP_MAX_BDS_PER_FPDU 3
1814 static const char * const pkt_type_str[] = {
1815 "QED_IWARP_MPA_PKT_PACKED",
1816 "QED_IWARP_MPA_PKT_PARTIAL",
1817 "QED_IWARP_MPA_PKT_UNALIGNED"
1821 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1822 struct qed_iwarp_fpdu *fpdu,
1823 struct qed_iwarp_ll2_buff *buf);
1825 static enum qed_iwarp_mpa_pkt_type
1826 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1827 struct qed_iwarp_fpdu *fpdu,
1828 u16 tcp_payload_len, u8 *mpa_data)
1830 enum qed_iwarp_mpa_pkt_type pkt_type;
1833 if (fpdu->incomplete_bytes) {
1834 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1838 /* special case of one byte remaining...
1839 * lower byte will be read next packet
1841 if (tcp_payload_len == 1) {
1842 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1843 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1847 mpa_len = ntohs(*(__force __be16 *)mpa_data);
1848 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1850 if (fpdu->fpdu_length <= tcp_payload_len)
1851 pkt_type = QED_IWARP_MPA_PKT_PACKED;
1853 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1856 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1857 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1858 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1864 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1865 struct qed_iwarp_fpdu *fpdu,
1866 struct unaligned_opaque_data *pkt_data,
1867 u16 tcp_payload_size, u8 placement_offset)
1869 u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1871 fpdu->mpa_buf = buf;
1872 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874 fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
1875 fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
1877 if (tcp_payload_size == 1)
1878 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879 else if (tcp_payload_size < fpdu->fpdu_length)
1880 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1882 fpdu->incomplete_bytes = 0; /* complete fpdu */
1884 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1888 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889 struct qed_iwarp_fpdu *fpdu,
1890 struct unaligned_opaque_data *pkt_data,
1891 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1893 u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1894 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1897 /* need to copy the data from the partial packet stored in fpdu
1898 * to the new buf, for this we also need to move the data currently
1899 * placed on the buf. The assumption is that the buffer is big enough
1900 * since fpdu_length <= mss, we use an intermediate buffer since
1901 * we may need to copy the new data to an overlapping location
1903 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1905 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1906 buf->buff_size, fpdu->mpa_frag_len,
1907 tcp_payload_size, fpdu->incomplete_bytes);
1911 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1912 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1913 fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1914 (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1916 memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917 memcpy(tmp_buf + fpdu->mpa_frag_len,
1918 (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
1920 rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1924 /* If we managed to post the buffer copy the data to the new buffer
1925 * o/w this will occur in the next round...
1927 memcpy((u8 *)(buf->data), tmp_buf,
1928 fpdu->mpa_frag_len + tcp_payload_size);
1930 fpdu->mpa_buf = buf;
1931 /* fpdu->pkt_hdr remains as is */
1932 /* fpdu->mpa_frag is overridden with new buf */
1933 fpdu->mpa_frag = buf->data_phys_addr;
1934 fpdu->mpa_frag_virt = buf->data;
1935 fpdu->mpa_frag_len += tcp_payload_size;
1937 fpdu->incomplete_bytes -= tcp_payload_size;
1941 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1942 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1943 fpdu->incomplete_bytes);
1949 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1950 struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1954 /* Update incomplete packets if needed */
1955 if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1956 /* Missing lower byte is now available */
1957 mpa_len = fpdu->fpdu_length | *mpa_data;
1958 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1959 /* one byte of hdr */
1960 fpdu->mpa_frag_len = 1;
1961 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1964 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1965 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1969 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1970 (GET_FIELD((_curr_pkt)->flags, \
1971 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1973 /* This function is used to recycle a buffer using the ll2 drop option. It
1974 * uses the mechanism to ensure that all buffers posted to tx before this one
1975 * were completed. The buffer sent here will be sent as a cookie in the tx
1976 * completion function and can then be reposted to rx chain when done. The flow
1977 * that requires this is the flow where a FPDU splits over more than 3 tcp
1978 * segments. In this case the driver needs to re-post a rx buffer instead of
1979 * the one received, but driver can't simply repost a buffer it copied from
1980 * as there is a case where the buffer was originally a packed FPDU, and is
1981 * partially posted to FW. Driver needs to ensure FW is done with it.
1984 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1985 struct qed_iwarp_fpdu *fpdu,
1986 struct qed_iwarp_ll2_buff *buf)
1988 struct qed_ll2_tx_pkt_info tx_pkt;
1992 memset(&tx_pkt, 0, sizeof(tx_pkt));
1993 tx_pkt.num_of_bds = 1;
1994 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1995 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1996 tx_pkt.first_frag = fpdu->pkt_hdr;
1997 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1998 buf->piggy_buf = NULL;
1999 tx_pkt.cookie = buf;
2001 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2003 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2005 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2006 "Can't drop packet rc=%d\n", rc);
2010 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2011 (unsigned long int)tx_pkt.first_frag,
2012 tx_pkt.first_frag_len, buf, rc);
2018 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2020 struct qed_ll2_tx_pkt_info tx_pkt;
2024 memset(&tx_pkt, 0, sizeof(tx_pkt));
2025 tx_pkt.num_of_bds = 1;
2026 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2027 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2029 tx_pkt.first_frag = fpdu->pkt_hdr;
2030 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2031 tx_pkt.enable_ip_cksum = true;
2032 tx_pkt.enable_l4_cksum = true;
2033 tx_pkt.calc_ip_len = true;
2034 /* vlan overload with enum iwarp_ll2_tx_queues */
2035 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2037 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2039 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2041 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2042 "Can't send right edge rc=%d\n", rc);
2045 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2047 (unsigned long int)tx_pkt.first_frag,
2048 tx_pkt.first_frag_len, rc);
2054 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2055 struct qed_iwarp_fpdu *fpdu,
2056 struct unaligned_opaque_data *curr_pkt,
2057 struct qed_iwarp_ll2_buff *buf,
2058 u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2060 struct qed_ll2_tx_pkt_info tx_pkt;
2061 u16 first_mpa_offset;
2065 memset(&tx_pkt, 0, sizeof(tx_pkt));
2067 /* An unaligned packet means it's split over two tcp segments. So the
2068 * complete packet requires 3 bds, one for the header, one for the
2069 * part of the fpdu of the first tcp segment, and the last fragment
2070 * will point to the remainder of the fpdu. A packed pdu, requires only
2071 * two bds, one for the header and one for the data.
2073 tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2077 /* Send the mpa_buf only with the last fpdu (in case of packed) */
2078 if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079 tcp_payload_size <= fpdu->fpdu_length)
2080 tx_pkt.cookie = fpdu->mpa_buf;
2082 tx_pkt.first_frag = fpdu->pkt_hdr;
2083 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084 tx_pkt.enable_ip_cksum = true;
2085 tx_pkt.enable_l4_cksum = true;
2086 tx_pkt.calc_ip_len = true;
2087 /* vlan overload with enum iwarp_ll2_tx_queues */
2088 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2090 /* special case of unaligned packet and not packed, need to send
2091 * both buffers as cookie to release.
2093 if (tcp_payload_size == fpdu->incomplete_bytes)
2094 fpdu->mpa_buf->piggy_buf = buf;
2096 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2098 /* Set first fragment to header */
2099 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2103 /* Set second fragment to first part of packet */
2104 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2106 fpdu->mpa_frag_len);
2110 if (!fpdu->incomplete_bytes)
2113 first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2115 /* Set third fragment to second part of the packet */
2116 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2118 buf->data_phys_addr +
2120 fpdu->incomplete_bytes);
2124 "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2126 tx_pkt.first_frag_len,
2128 fpdu->incomplete_bytes, rc);
2134 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2135 struct unaligned_opaque_data *curr_pkt,
2136 u32 opaque_data0, u32 opaque_data1)
2140 opaque_data = HILO_64(cpu_to_le32(opaque_data1),
2141 cpu_to_le32(opaque_data0));
2142 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2144 le16_add_cpu(&curr_pkt->first_mpa_offset,
2145 curr_pkt->tcp_payload_offset);
2148 /* This function is called when an unaligned or incomplete MPA packet arrives
2149 * driver needs to align the packet, perhaps using previous data and send
2150 * it down to FW once it is aligned.
2153 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2154 struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2156 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2157 struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2158 enum qed_iwarp_mpa_pkt_type pkt_type;
2159 struct qed_iwarp_fpdu *fpdu;
2160 u16 cid, first_mpa_offset;
2164 cid = le32_to_cpu(curr_pkt->cid);
2166 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2167 if (!fpdu) { /* something corrupt with cid, post rx back */
2168 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2174 first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2175 mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
2177 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2178 mpa_buf->tcp_payload_len,
2182 case QED_IWARP_MPA_PKT_PARTIAL:
2183 qed_iwarp_init_fpdu(buf, fpdu,
2185 mpa_buf->tcp_payload_len,
2186 mpa_buf->placement_offset);
2188 if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2189 mpa_buf->tcp_payload_len = 0;
2193 rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2196 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2197 "Can't send FPDU:reset rc=%d\n", rc);
2198 memset(fpdu, 0, sizeof(*fpdu));
2202 mpa_buf->tcp_payload_len = 0;
2204 case QED_IWARP_MPA_PKT_PACKED:
2205 qed_iwarp_init_fpdu(buf, fpdu,
2207 mpa_buf->tcp_payload_len,
2208 mpa_buf->placement_offset);
2210 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2211 mpa_buf->tcp_payload_len,
2214 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2215 "Can't send FPDU:reset rc=%d\n", rc);
2216 memset(fpdu, 0, sizeof(*fpdu));
2220 mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2221 le16_add_cpu(&curr_pkt->first_mpa_offset,
2224 case QED_IWARP_MPA_PKT_UNALIGNED:
2225 qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2226 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2227 /* special handling of fpdu split over more
2230 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2231 rc = qed_iwarp_win_right_edge(p_hwfn,
2233 /* packet will be re-processed later */
2238 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2240 mpa_buf->tcp_payload_len);
2241 if (rc) /* packet will be re-processed later */
2244 mpa_buf->tcp_payload_len = 0;
2248 rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2249 mpa_buf->tcp_payload_len,
2252 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2253 "Can't send FPDU:delay rc=%d\n", rc);
2254 /* don't reset fpdu -> we need it for next
2260 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2261 le16_add_cpu(&curr_pkt->first_mpa_offset,
2262 fpdu->incomplete_bytes);
2264 /* The framed PDU was sent - no more incomplete bytes */
2265 fpdu->incomplete_bytes = 0;
2268 } while (mpa_buf->tcp_payload_len && !rc);
2273 qed_iwarp_ll2_post_rx(p_hwfn,
2275 p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2279 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2281 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2282 struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2285 while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2286 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2287 struct qed_iwarp_ll2_mpa_buf,
2290 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2292 /* busy means break and continue processing later, don't
2293 * remove the buf from the pending list.
2298 list_move_tail(&mpa_buf->list_entry,
2299 &iwarp_info->mpa_buf_list);
2301 if (rc) { /* different error, don't continue */
2302 DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2309 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2311 struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2312 struct qed_iwarp_info *iwarp_info;
2313 struct qed_hwfn *p_hwfn = cxt;
2314 u16 first_mpa_offset;
2316 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2317 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2318 struct qed_iwarp_ll2_mpa_buf, list_entry);
2320 DP_ERR(p_hwfn, "No free mpa buf\n");
2324 list_del(&mpa_buf->list_entry);
2325 qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2326 data->opaque_data_0, data->opaque_data_1);
2328 first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
2332 "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2333 data->length.packet_length, first_mpa_offset,
2334 mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2337 mpa_buf->ll2_buf = data->cookie;
2338 mpa_buf->tcp_payload_len = data->length.packet_length -
2341 first_mpa_offset += data->u.placement_offset;
2342 mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
2343 mpa_buf->placement_offset = data->u.placement_offset;
2345 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2347 qed_iwarp_process_pending_pkts(p_hwfn);
2350 qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2351 iwarp_info->ll2_mpa_handle);
2355 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2357 struct qed_iwarp_ll2_buff *buf = data->cookie;
2358 struct qed_iwarp_listener *listener;
2359 struct qed_ll2_tx_pkt_info tx_pkt;
2360 struct qed_iwarp_cm_info cm_info;
2361 struct qed_hwfn *p_hwfn = cxt;
2362 u8 remote_mac_addr[ETH_ALEN];
2363 u8 local_mac_addr[ETH_ALEN];
2364 struct qed_iwarp_ep *ep;
2365 int tcp_start_offset;
2371 memset(&cm_info, 0, sizeof(cm_info));
2372 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2374 /* Check if packet was received with errors... */
2375 if (data->err_flags) {
2376 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2381 if (GET_FIELD(data->parse_flags,
2382 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2383 GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2384 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2388 rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2389 data->u.placement_offset, remote_mac_addr,
2390 local_mac_addr, &payload_len,
2395 /* Check if there is a listener for this 4-tuple+vlan */
2396 listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2400 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2401 data->parse_flags, data->length.packet_length);
2403 memset(&tx_pkt, 0, sizeof(tx_pkt));
2404 tx_pkt.num_of_bds = 1;
2405 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2406 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2407 tx_pkt.first_frag = buf->data_phys_addr +
2408 data->u.placement_offset;
2409 tx_pkt.first_frag_len = data->length.packet_length;
2410 tx_pkt.cookie = buf;
2412 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2417 "Can't post SYN back to chip rc=%d\n", rc);
2423 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2424 /* There may be an open ep on this connection if this is a syn
2425 * retrasnmit... need to make sure there isn't...
2427 if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2430 ep = qed_iwarp_get_free_ep(p_hwfn);
2434 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2435 list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2436 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2438 ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2439 ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2441 memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2443 hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2444 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2445 ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2447 ep->event_cb = listener->event_cb;
2448 ep->cb_context = listener->cb_context;
2449 ep->connect_mode = TCP_CONNECT_PASSIVE;
2452 ep->syn_ip_payload_length = (u16)payload_len;
2453 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2456 rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2458 qed_iwarp_return_ep(p_hwfn, ep);
2464 qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2467 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2468 void *cookie, dma_addr_t rx_buf_addr,
2471 struct qed_iwarp_ll2_buff *buffer = cookie;
2472 struct qed_hwfn *p_hwfn = cxt;
2474 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2475 buffer->data, buffer->data_phys_addr);
2479 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2480 void *cookie, dma_addr_t first_frag_addr,
2481 bool b_last_fragment, bool b_last_packet)
2483 struct qed_iwarp_ll2_buff *buffer = cookie;
2484 struct qed_iwarp_ll2_buff *piggy;
2485 struct qed_hwfn *p_hwfn = cxt;
2487 if (!buffer) /* can happen in packed mpa unaligned... */
2490 /* this was originally an rx packet, post it back */
2491 piggy = buffer->piggy_buf;
2493 buffer->piggy_buf = NULL;
2494 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2497 qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2499 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2500 qed_iwarp_process_pending_pkts(p_hwfn);
2505 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2506 void *cookie, dma_addr_t first_frag_addr,
2507 bool b_last_fragment, bool b_last_packet)
2509 struct qed_iwarp_ll2_buff *buffer = cookie;
2510 struct qed_hwfn *p_hwfn = cxt;
2515 if (buffer->piggy_buf) {
2516 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2517 buffer->piggy_buf->buff_size,
2518 buffer->piggy_buf->data,
2519 buffer->piggy_buf->data_phys_addr);
2521 kfree(buffer->piggy_buf);
2524 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2525 buffer->data, buffer->data_phys_addr);
2530 /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2531 * is received, need to reset the FPDU.
2534 qed_iwarp_ll2_slowpath(void *cxt,
2535 u8 connection_handle,
2536 u32 opaque_data_0, u32 opaque_data_1)
2538 struct unaligned_opaque_data unalign_data;
2539 struct qed_hwfn *p_hwfn = cxt;
2540 struct qed_iwarp_fpdu *fpdu;
2543 qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2544 opaque_data_0, opaque_data_1);
2546 cid = le32_to_cpu(unalign_data.cid);
2548 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
2550 fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2552 memset(fpdu, 0, sizeof(*fpdu));
2555 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2557 struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2560 if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2561 rc = qed_ll2_terminate_connection(p_hwfn,
2562 iwarp_info->ll2_syn_handle);
2564 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2566 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2567 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2570 if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2571 rc = qed_ll2_terminate_connection(p_hwfn,
2572 iwarp_info->ll2_ooo_handle);
2574 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2576 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2577 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2580 if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2581 rc = qed_ll2_terminate_connection(p_hwfn,
2582 iwarp_info->ll2_mpa_handle);
2584 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2586 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2587 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2590 qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2591 p_hwfn->p_rdma_info->iwarp.mac_addr);
2597 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2598 int num_rx_bufs, int buff_size, u8 ll2_handle)
2600 struct qed_iwarp_ll2_buff *buffer;
2604 for (i = 0; i < num_rx_bufs; i++) {
2605 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2611 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2613 &buffer->data_phys_addr,
2615 if (!buffer->data) {
2621 buffer->buff_size = buff_size;
2622 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2624 /* buffers will be deallocated by qed_ll2 */
2630 #define QED_IWARP_MAX_BUF_SIZE(mtu) \
2631 ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2632 ETH_CACHE_LINE_SIZE)
2635 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2636 struct qed_rdma_start_in_params *params,
2639 struct qed_iwarp_info *iwarp_info;
2640 struct qed_ll2_acquire_data data;
2641 struct qed_ll2_cbs cbs;
2647 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2648 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2649 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2650 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2652 iwarp_info->max_mtu = params->max_mtu;
2654 ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2656 rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2660 /* Start SYN connection */
2661 cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2662 cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2663 cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2664 cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2665 cbs.slowpath_cb = NULL;
2666 cbs.cookie = p_hwfn;
2668 memset(&data, 0, sizeof(data));
2669 data.input.conn_type = QED_LL2_TYPE_IWARP;
2670 /* SYN will use ctx based queues */
2671 data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
2672 data.input.mtu = params->max_mtu;
2673 data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2674 data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2675 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
2676 data.input.tx_tc = PKT_LB_TC;
2677 data.input.tx_dest = QED_LL2_TX_DEST_LB;
2678 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2681 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2683 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2684 qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2688 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2690 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2694 buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2695 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2696 QED_IWARP_LL2_SYN_RX_SIZE,
2698 iwarp_info->ll2_syn_handle);
2702 /* Start OOO connection */
2703 data.input.conn_type = QED_LL2_TYPE_OOO;
2704 /* OOO/unaligned will use legacy ll2 queues (ram based) */
2705 data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
2706 data.input.mtu = params->max_mtu;
2708 n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2709 iwarp_info->max_mtu;
2710 n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2712 data.input.rx_num_desc = n_ooo_bufs;
2713 data.input.rx_num_ooo_buffers = n_ooo_bufs;
2715 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
2716 data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2717 data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2719 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2723 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2727 /* Start Unaligned MPA connection */
2728 cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2729 cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2731 memset(&data, 0, sizeof(data));
2732 data.input.conn_type = QED_LL2_TYPE_IWARP;
2733 data.input.mtu = params->max_mtu;
2734 /* FW requires that once a packet arrives OOO, it must have at
2735 * least 2 rx buffers available on the unaligned connection
2736 * for handling the case that it is a partial fpdu.
2738 data.input.rx_num_desc = n_ooo_bufs * 2;
2739 data.input.tx_num_desc = data.input.rx_num_desc;
2740 data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2741 data.input.tx_tc = PKT_LB_TC;
2742 data.input.tx_dest = QED_LL2_TX_DEST_LB;
2743 data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2744 data.input.secondary_queue = true;
2747 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2751 rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2755 rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2756 data.input.rx_num_desc,
2758 iwarp_info->ll2_mpa_handle);
2762 iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2763 sizeof(*iwarp_info->partial_fpdus),
2765 if (!iwarp_info->partial_fpdus) {
2770 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2772 iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2773 if (!iwarp_info->mpa_intermediate_buf) {
2778 /* The mpa_bufs array serves for pending RX packets received on the
2779 * mpa ll2 that don't have place on the tx ring and require later
2780 * processing. We can't fail on allocation of such a struct therefore
2781 * we allocate enough to take care of all rx packets
2783 iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2784 sizeof(*iwarp_info->mpa_bufs),
2786 if (!iwarp_info->mpa_bufs) {
2791 INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2792 INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2793 for (i = 0; i < data.input.rx_num_desc; i++)
2794 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2795 &iwarp_info->mpa_buf_list);
2798 qed_iwarp_ll2_stop(p_hwfn);
2806 } qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2807 {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2808 {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2811 int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2812 struct qed_rdma_start_in_params *params)
2814 struct qed_dev *cdev = p_hwfn->cdev;
2815 struct qed_iwarp_info *iwarp_info;
2816 enum chip_ids chip_id;
2819 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2821 iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2823 chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2824 rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2825 qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2826 qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2828 /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2829 iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2830 ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2831 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2832 iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2833 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2835 iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2837 iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
2838 MPA_RTR_TYPE_ZERO_WRITE |
2839 MPA_RTR_TYPE_ZERO_READ;
2841 spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2842 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2843 INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2845 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2846 qed_iwarp_async_event);
2847 qed_ooo_setup(p_hwfn);
2849 return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2852 int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2856 qed_iwarp_free_prealloc_ep(p_hwfn);
2857 rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2861 return qed_iwarp_ll2_stop(p_hwfn);
2864 static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2865 struct qed_iwarp_ep *ep,
2868 struct qed_iwarp_cm_event_params params;
2870 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2872 params.event = QED_IWARP_EVENT_CLOSE;
2873 params.ep_context = ep;
2874 params.cm_info = &ep->cm_info;
2875 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2878 /* paired with READ_ONCE in destroy_qp */
2879 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2881 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2882 list_del(&ep->list_entry);
2883 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2885 ep->event_cb(ep->cb_context, ¶ms);
2888 static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2889 struct qed_iwarp_ep *ep,
2892 struct qed_iwarp_cm_event_params params;
2893 bool event_cb = false;
2895 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2896 ep->cid, fw_ret_code);
2898 switch (fw_ret_code) {
2899 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2901 params.event = QED_IWARP_EVENT_DISCONNECT;
2904 case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2905 params.status = -ECONNRESET;
2906 params.event = QED_IWARP_EVENT_DISCONNECT;
2909 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2910 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2913 case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2914 params.event = QED_IWARP_EVENT_IRQ_FULL;
2917 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2918 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2921 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2922 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2925 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2926 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2929 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2930 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2933 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2934 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2937 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2938 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2941 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2942 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2946 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2947 "Unhandled exception received...fw_ret_code=%d\n",
2953 params.ep_context = ep;
2954 params.cm_info = &ep->cm_info;
2955 ep->event_cb(ep->cb_context, ¶ms);
2960 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2961 struct qed_iwarp_ep *ep, u8 fw_return_code)
2963 struct qed_iwarp_cm_event_params params;
2965 memset(¶ms, 0, sizeof(params));
2966 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2967 params.ep_context = ep;
2968 params.cm_info = &ep->cm_info;
2969 /* paired with READ_ONCE in destroy_qp */
2970 smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2972 switch (fw_return_code) {
2973 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2974 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2975 "%s(0x%x) TCP connect got invalid packet\n",
2976 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2977 params.status = -ECONNRESET;
2979 case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2980 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2981 "%s(0x%x) TCP Connection Reset\n",
2982 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2983 params.status = -ECONNRESET;
2985 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2986 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2987 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2988 params.status = -EBUSY;
2990 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2991 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2992 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2993 params.status = -ECONNREFUSED;
2995 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2996 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2997 QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2998 params.status = -ECONNRESET;
3002 "%s(0x%x) Unexpected return code tcp connect: %d\n",
3003 QED_IWARP_CONNECT_MODE_STRING(ep),
3004 ep->tcp_cid, fw_return_code);
3005 params.status = -ECONNRESET;
3009 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3010 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
3011 qed_iwarp_return_ep(p_hwfn, ep);
3013 ep->event_cb(ep->cb_context, ¶ms);
3014 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3015 list_del(&ep->list_entry);
3016 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3021 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
3022 struct qed_iwarp_ep *ep, u8 fw_return_code)
3024 u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3026 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3027 /* Done with the SYN packet, post back to ll2 rx */
3028 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3032 /* If connect failed - upper layer doesn't know about it */
3033 if (fw_return_code == RDMA_RETURN_OK)
3034 qed_iwarp_mpa_received(p_hwfn, ep);
3036 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3039 if (fw_return_code == RDMA_RETURN_OK)
3040 qed_iwarp_mpa_offload(p_hwfn, ep);
3042 qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3048 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3050 if (!ep || (ep->sig != QED_EP_SIG)) {
3051 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3058 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
3059 __le16 echo, union event_ring_data *data,
3062 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3063 struct regpair *fw_handle = &data->rdma_data.async_handle;
3064 struct qed_iwarp_ep *ep = NULL;
3069 ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3072 switch (fw_event_code) {
3073 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3074 /* Async completion after TCP 3-way handshake */
3075 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3079 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3080 ep->tcp_cid, fw_return_code);
3081 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3083 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3084 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3088 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3089 ep->cid, fw_return_code);
3090 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3092 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3093 /* Async completion for Close Connection ramrod */
3094 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3098 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3099 ep->cid, fw_return_code);
3100 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3102 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3103 /* Async event for active side only */
3104 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3108 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3109 ep->cid, fw_return_code);
3110 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3112 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3113 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3117 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3118 ep->cid, fw_return_code);
3119 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3121 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3122 cid = (u16)le32_to_cpu(fw_handle->lo);
3123 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3124 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3125 qed_iwarp_cid_cleaned(p_hwfn, cid);
3128 case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3129 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3130 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3131 /* FW assigns value that is no greater than u16 */
3132 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3133 events.affiliated_event(events.context,
3134 QED_IWARP_EVENT_SRQ_EMPTY,
3137 case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3138 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3139 srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3140 /* FW assigns value that is no greater than u16 */
3141 srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3142 events.affiliated_event(events.context,
3143 QED_IWARP_EVENT_SRQ_LIMIT,
3146 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3147 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3149 p_hwfn->p_rdma_info->events.affiliated_event(
3150 p_hwfn->p_rdma_info->events.context,
3151 QED_IWARP_EVENT_CQ_OVERFLOW,
3155 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3163 qed_iwarp_create_listen(void *rdma_cxt,
3164 struct qed_iwarp_listen_in *iparams,
3165 struct qed_iwarp_listen_out *oparams)
3167 struct qed_hwfn *p_hwfn = rdma_cxt;
3168 struct qed_iwarp_listener *listener;
3170 listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3174 listener->ip_version = iparams->ip_version;
3175 memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3176 listener->port = iparams->port;
3177 listener->vlan = iparams->vlan;
3179 listener->event_cb = iparams->event_cb;
3180 listener->cb_context = iparams->cb_context;
3181 listener->max_backlog = iparams->max_backlog;
3182 oparams->handle = listener;
3184 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3185 list_add_tail(&listener->list_entry,
3186 &p_hwfn->p_rdma_info->iwarp.listen_list);
3187 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3191 "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3194 listener->ip_addr[0],
3195 listener->ip_addr[1],
3196 listener->ip_addr[2],
3197 listener->ip_addr[3], listener->port, listener->vlan);
3202 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3204 struct qed_iwarp_listener *listener = handle;
3205 struct qed_hwfn *p_hwfn = rdma_cxt;
3207 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3209 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3210 list_del(&listener->list_entry);
3211 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3218 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3220 struct qed_hwfn *p_hwfn = rdma_cxt;
3221 struct qed_sp_init_data init_data;
3222 struct qed_spq_entry *p_ent;
3223 struct qed_iwarp_ep *ep;
3224 struct qed_rdma_qp *qp;
3227 ep = iparams->ep_context;
3229 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3235 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3236 qp->icid, ep->tcp_cid);
3238 memset(&init_data, 0, sizeof(init_data));
3239 init_data.cid = qp->icid;
3240 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3241 init_data.comp_mode = QED_SPQ_MODE_CB;
3243 rc = qed_sp_init_request(p_hwfn, &p_ent,
3244 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3245 PROTOCOLID_IWARP, &init_data);
3250 rc = qed_spq_post(p_hwfn, p_ent, NULL);
3252 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3258 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3259 struct qed_rdma_query_qp_out_params *out_params)
3261 out_params->state = qed_iwarp2roce_state(qp->iwarp_state);