1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
43 #include "i40iw_puda.h"
45 static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
46 struct i40iw_puda_buf *buf);
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
49 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
52 * i40iw_puda_get_listbuf - get buffer from puda list
53 * @list: list to use for buffers (ILQ or IEQ)
55 static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
57 struct i40iw_puda_buf *buf = NULL;
59 if (!list_empty(list)) {
60 buf = (struct i40iw_puda_buf *)list->next;
61 list_del((struct list_head *)&buf->list);
67 * i40iw_puda_get_bufpool - return buffer from resource
68 * @rsrc: resource to use for buffer
70 struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
72 struct i40iw_puda_buf *buf = NULL;
73 struct list_head *list = &rsrc->bufpool;
76 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
77 buf = i40iw_puda_get_listbuf(list);
79 rsrc->avail_buf_count--;
81 rsrc->stats_buf_alloc_fail++;
82 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
87 * i40iw_puda_ret_bufpool - return buffer to rsrc list
88 * @rsrc: resource to use for buffer
89 * @buf: buffe to return to resouce
91 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
92 struct i40iw_puda_buf *buf)
96 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
97 list_add(&buf->list, &rsrc->bufpool);
98 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
99 rsrc->avail_buf_count++;
103 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
104 * @rsrc: resource ptr
105 * @wqe_idx: wqe index to use
106 * @buf: puda buffer for rcv q
107 * @initial: flag if during init time
109 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
110 struct i40iw_puda_buf *buf, bool initial)
113 struct i40iw_sc_qp *qp = &rsrc->qp;
116 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
117 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
118 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
119 "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
122 get_64bit_val(wqe, 24, &offset24);
124 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
125 set_64bit_val(wqe, 24, offset24);
127 set_64bit_val(wqe, 0, buf->mem.pa);
128 set_64bit_val(wqe, 8,
129 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
130 set_64bit_val(wqe, 24, offset24);
134 * i40iw_puda_replenish_rq - post rcv buffers
135 * @rsrc: resource to use for buffer
136 * @initial: flag if during init time
138 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
142 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
143 struct i40iw_puda_buf *buf = NULL;
145 for (i = 0; i < invalid_cnt; i++) {
146 buf = i40iw_puda_get_bufpool(rsrc);
148 return I40IW_ERR_list_empty;
149 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
152 ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
153 rsrc->rxq_invalid_cnt--;
159 * i40iw_puda_alloc_buf - allocate mem for buffer
161 * @length: length of buffer
163 static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
166 struct i40iw_puda_buf *buf = NULL;
167 struct i40iw_virt_mem buf_mem;
168 enum i40iw_status_code ret;
170 ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
171 sizeof(struct i40iw_puda_buf));
173 i40iw_debug(dev, I40IW_DEBUG_PUDA,
174 "%s: error mem for buf\n", __func__);
177 buf = (struct i40iw_puda_buf *)buf_mem.va;
178 ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
180 i40iw_debug(dev, I40IW_DEBUG_PUDA,
181 "%s: error dma mem for buf\n", __func__);
182 i40iw_free_virt_mem(dev->hw, &buf_mem);
185 buf->buf_mem.va = buf_mem.va;
186 buf->buf_mem.size = buf_mem.size;
191 * i40iw_puda_dele_buf - delete buffer back to system
193 * @buf: buffer to free
195 static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
196 struct i40iw_puda_buf *buf)
198 i40iw_free_dma_mem(dev->hw, &buf->mem);
199 i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
203 * i40iw_puda_get_next_send_wqe - return next wqe for processing
204 * @qp: puda qp for wqe
205 * @wqe_idx: wqe index for caller
207 static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
210 enum i40iw_status_code ret_code = 0;
212 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
214 qp->swqe_polarity = !qp->swqe_polarity;
215 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
218 wqe = qp->sq_base[*wqe_idx].elem;
224 * i40iw_puda_poll_info - poll cq for completion
226 * @info: info return for successful completion
228 static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
229 struct i40iw_puda_completion_info *info)
231 u64 qword0, qword2, qword3;
235 u32 major_err, minor_err;
238 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
239 get_64bit_val(cqe, 24, &qword3);
240 valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
242 if (valid_bit != cq->cq_uk.polarity)
243 return I40IW_ERR_QUEUE_EMPTY;
245 i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
246 error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
248 i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
249 major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
250 minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
251 info->compl_error = major_err << 16 | minor_err;
252 return I40IW_ERR_CQ_COMPL_ERROR;
255 get_64bit_val(cqe, 0, &qword0);
256 get_64bit_val(cqe, 16, &qword2);
258 info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
259 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
261 get_64bit_val(cqe, 8, &comp_ctx);
262 info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
263 info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
265 if (info->q_type == I40IW_CQE_QTYPE_RQ) {
266 info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
267 info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
268 info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
269 info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
276 * i40iw_puda_poll_completion - processes completion for cq
278 * @cq: cq getting interrupt
279 * @compl_err: return any completion err
281 enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
282 struct i40iw_sc_cq *cq, u32 *compl_err)
284 struct i40iw_qp_uk *qp;
285 struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
286 struct i40iw_puda_completion_info info;
287 enum i40iw_status_code ret = 0;
288 struct i40iw_puda_buf *buf;
289 struct i40iw_puda_rsrc *rsrc;
291 u8 cq_type = cq->cq_type;
294 if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
295 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
297 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
298 return I40IW_ERR_BAD_PTR;
300 memset(&info, 0, sizeof(info));
301 ret = i40iw_puda_poll_info(cq, &info);
302 *compl_err = info.compl_error;
303 if (ret == I40IW_ERR_QUEUE_EMPTY)
310 ret = I40IW_ERR_BAD_PTR;
314 if (qp->qp_id != rsrc->qp_id) {
315 ret = I40IW_ERR_BAD_PTR;
319 if (info.q_type == I40IW_CQE_QTYPE_RQ) {
320 buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
321 /* Get all the tcpip information in the buf header */
322 ret = i40iw_puda_get_tcpip_info(&info, buf);
324 rsrc->stats_rcvd_pkt_err++;
325 if (cq_type == I40IW_CQ_TYPE_ILQ) {
326 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
329 i40iw_puda_ret_bufpool(rsrc, buf);
330 i40iw_puda_replenish_rq(rsrc, false);
335 rsrc->stats_pkt_rcvd++;
336 rsrc->compl_rxwqe_idx = info.wqe_idx;
337 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
338 rsrc->receive(rsrc->dev, buf);
339 if (cq_type == I40IW_CQ_TYPE_ILQ)
340 i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
342 i40iw_puda_replenish_rq(rsrc, false);
345 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
346 sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
347 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
348 rsrc->xmit_complete(rsrc->dev, sqwrid);
349 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
350 rsrc->tx_wqe_avail_cnt++;
351 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
352 if (!list_empty(&dev->ilq->txpend))
353 i40iw_puda_send_buf(dev->ilq, NULL);
357 I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
358 if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
359 cq_uk->polarity = !cq_uk->polarity;
360 /* update cq tail in cq shadow memory also */
361 I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
362 set_64bit_val(cq_uk->shadow_area, 0,
363 I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
368 * i40iw_puda_send - complete send wqe for transmit
369 * @qp: puda qp for send
370 * @info: buffer information for transmit
372 enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
373 struct i40iw_puda_send_info *info)
381 /* number of 32 bits DWORDS in header */
382 l4len = info->tcplen >> 2;
391 wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
393 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
394 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
395 /* Third line of WQE descriptor */
396 /* maclen is in words */
397 header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
398 LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
399 LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
400 LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
401 /* Forth line of WQE descriptor */
402 header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
403 LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
404 LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
405 LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
407 set_64bit_val(wqe, 0, info->paddr);
408 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
409 set_64bit_val(wqe, 16, header[0]);
410 set_64bit_val(wqe, 24, header[1]);
412 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
413 i40iw_qp_post_wr(&qp->qp_uk);
418 * i40iw_puda_send_buf - transmit puda buffer
419 * @rsrc: resource to use for buffer
420 * @buf: puda buffer to transmit
422 void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
424 struct i40iw_puda_send_info info;
425 enum i40iw_status_code ret = 0;
428 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
429 /* if no wqe available or not from a completion and we have
430 * pending buffers, we must queue new buffer
432 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
433 list_add_tail(&buf->list, &rsrc->txpend);
434 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
435 rsrc->stats_sent_pkt_q++;
436 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
437 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
438 "%s: adding to txpend\n", __func__);
441 rsrc->tx_wqe_avail_cnt--;
442 /* if we are coming from a completion and have pending buffers
443 * then Get one from pending list
446 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
451 info.scratch = (void *)buf;
452 info.paddr = buf->mem.pa;
453 info.len = buf->totallen;
454 info.tcplen = buf->tcphlen;
455 info.maclen = buf->maclen;
456 info.ipv4 = buf->ipv4;
457 info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
459 ret = i40iw_puda_send(&rsrc->qp, &info);
461 rsrc->tx_wqe_avail_cnt++;
462 rsrc->stats_sent_pkt_q++;
463 list_add(&buf->list, &rsrc->txpend);
464 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
465 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
466 "%s: adding to puda_send\n", __func__);
468 rsrc->stats_pkt_sent++;
471 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
475 * i40iw_puda_qp_setctx - during init, set qp's context
476 * @rsrc: qp's resource
478 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
480 struct i40iw_sc_qp *qp = &rsrc->qp;
481 u64 *qp_ctx = qp->hw_host_ctx;
483 set_64bit_val(qp_ctx, 8, qp->sq_pa);
484 set_64bit_val(qp_ctx, 16, qp->rq_pa);
486 set_64bit_val(qp_ctx, 24,
487 LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
488 LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
490 set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
491 set_64bit_val(qp_ctx, 56, 0);
492 set_64bit_val(qp_ctx, 64, 1);
494 set_64bit_val(qp_ctx, 136,
495 LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
496 LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
498 set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
500 set_64bit_val(qp_ctx, 168,
501 LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
503 set_64bit_val(qp_ctx, 176,
504 LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
505 LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
506 LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
508 i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
509 qp_ctx, I40IW_QP_CTX_SIZE);
513 * i40iw_puda_qp_wqe - setup wqe for qp create
514 * @rsrc: resource for qp
516 static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
518 struct i40iw_sc_qp *qp = &rsrc->qp;
519 struct i40iw_sc_dev *dev = rsrc->dev;
520 struct i40iw_sc_cqp *cqp;
523 struct i40iw_ccq_cqe_info compl_info;
524 enum i40iw_status_code status = 0;
527 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
529 return I40IW_ERR_RING_FULL;
531 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
532 set_64bit_val(wqe, 40, qp->shadow_area_pa);
533 header = qp->qp_uk.qp_id |
534 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
535 LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
536 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
537 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
538 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
540 set_64bit_val(wqe, 24, header);
542 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
543 i40iw_sc_cqp_post_sq(cqp);
544 status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
545 I40IW_CQP_OP_CREATE_QP,
551 * i40iw_puda_qp_create - create qp for resource
552 * @rsrc: resource to use for buffer
554 static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
556 struct i40iw_sc_qp *qp = &rsrc->qp;
557 struct i40iw_qp_uk *ukqp = &qp->qp_uk;
558 enum i40iw_status_code ret = 0;
559 u32 sq_size, rq_size, t_size;
560 struct i40iw_dma_mem *mem;
562 sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
563 rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
564 t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
566 /* Get page aligned memory */
568 i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
571 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
576 memset(mem->va, 0, t_size);
577 qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
578 qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
579 qp->pd = &rsrc->sc_pd;
580 qp->qp_type = I40IW_QP_TYPE_UDA;
582 qp->back_qp = (void *)rsrc;
584 qp->rq_pa = qp->sq_pa + sq_size;
585 ukqp->sq_base = mem->va;
586 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
587 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
588 qp->shadow_area_pa = qp->rq_pa + rq_size;
589 qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
591 qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
592 ukqp->qp_id = rsrc->qp_id;
593 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
594 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
596 ukqp->qp_id = rsrc->qp_id;
597 ukqp->sq_size = rsrc->sq_size;
598 ukqp->rq_size = rsrc->rq_size;
600 I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
601 I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
602 I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
604 if (qp->pd->dev->is_pf)
605 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
608 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
609 I40E_VFPE_WQEALLOC1);
611 qp->qs_handle = qp->dev->qs_handle;
612 i40iw_puda_qp_setctx(rsrc);
613 ret = i40iw_puda_qp_wqe(rsrc);
615 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
620 * i40iw_puda_cq_create - create cq for resource
621 * @rsrc: resource for which cq to create
623 static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
625 struct i40iw_sc_dev *dev = rsrc->dev;
626 struct i40iw_sc_cq *cq = &rsrc->cq;
628 struct i40iw_sc_cqp *cqp;
630 enum i40iw_status_code ret = 0;
632 u32 shadow_read_threshold = 128;
633 struct i40iw_dma_mem *mem;
634 struct i40iw_ccq_cqe_info compl_info;
635 struct i40iw_cq_init_info info;
636 struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
638 cq->back_cq = (void *)rsrc;
639 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
640 tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
641 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
642 I40IW_CQ0_ALIGNMENT_MASK);
647 memset(&info, 0, sizeof(info));
649 info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
650 I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
651 info.shadow_read_threshold = rsrc->cq_size >> 2;
652 info.ceq_id_valid = true;
653 info.cq_base_pa = mem->pa;
654 info.shadow_area_pa = mem->pa + cqsize;
655 init_info->cq_base = mem->va;
656 init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
657 init_info->cq_size = rsrc->cq_size;
658 init_info->cq_id = rsrc->cq_id;
659 ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
663 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
665 ret = I40IW_ERR_RING_FULL;
669 set_64bit_val(wqe, 0, rsrc->cq_size);
670 set_64bit_val(wqe, 8, RS_64_1(cq, 1));
671 set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
672 set_64bit_val(wqe, 32, cq->cq_pa);
674 set_64bit_val(wqe, 40, cq->shadow_area_pa);
676 header = rsrc->cq_id |
677 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
678 LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
679 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
680 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
681 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
682 set_64bit_val(wqe, 24, header);
684 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
685 wqe, I40IW_CQP_WQE_SIZE * 8);
687 i40iw_sc_cqp_post_sq(dev->cqp);
688 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
689 I40IW_CQP_OP_CREATE_CQ,
694 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
699 * i40iw_puda_dele_resources - delete all resources during close
701 * @type: type of resource to dele
702 * @reset: true if reset chip
704 void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
705 enum puda_resource_type type,
708 struct i40iw_ccq_cqe_info compl_info;
709 struct i40iw_puda_rsrc *rsrc;
710 struct i40iw_puda_buf *buf = NULL;
711 struct i40iw_puda_buf *nextbuf = NULL;
712 struct i40iw_virt_mem *vmem;
713 enum i40iw_status_code ret;
716 case I40IW_PUDA_RSRC_TYPE_ILQ:
718 vmem = &dev->ilq_mem;
720 case I40IW_PUDA_RSRC_TYPE_IEQ:
722 vmem = &dev->ieq_mem;
725 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
730 switch (rsrc->completion) {
731 case PUDA_HASH_CRC_COMPLETE:
732 i40iw_free_hash_desc(rsrc->hash_desc);
733 case PUDA_QP_CREATED:
737 ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
738 0, false, true, true);
740 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
741 "%s error ieq qp destroy\n",
744 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
745 I40IW_CQP_OP_DESTROY_QP,
748 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
749 "%s error ieq qp destroy done\n",
753 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
755 case PUDA_CQ_CREATED:
759 ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
761 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
762 "%s error ieq cq destroy\n",
765 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
766 I40IW_CQP_OP_DESTROY_CQ,
769 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
770 "%s error ieq qp destroy done\n",
774 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
777 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
780 /* Free all allocated puda buffers for both tx and rx */
781 buf = rsrc->alloclist;
784 i40iw_puda_dele_buf(dev, buf);
786 rsrc->alloc_buf_count--;
788 i40iw_free_virt_mem(dev->hw, vmem);
792 * i40iw_puda_allocbufs - allocate buffers for resource
793 * @rsrc: resource for buffer allocation
794 * @count: number of buffers to create
796 static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
800 struct i40iw_puda_buf *buf;
801 struct i40iw_puda_buf *nextbuf;
803 for (i = 0; i < count; i++) {
804 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
806 rsrc->stats_buf_alloc_fail++;
807 return I40IW_ERR_NO_MEMORY;
809 i40iw_puda_ret_bufpool(rsrc, buf);
810 rsrc->alloc_buf_count++;
811 if (!rsrc->alloclist) {
812 rsrc->alloclist = buf;
814 nextbuf = rsrc->alloclist;
815 rsrc->alloclist = buf;
819 rsrc->avail_buf_count = rsrc->alloc_buf_count;
824 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
826 * @info: resource information
828 enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
829 struct i40iw_puda_rsrc_info *info)
831 enum i40iw_status_code ret = 0;
832 struct i40iw_puda_rsrc *rsrc;
834 u32 sqwridsize, rqwridsize;
835 struct i40iw_virt_mem *vmem;
838 pudasize = sizeof(struct i40iw_puda_rsrc);
839 sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
840 rqwridsize = info->rq_size * 8;
841 switch (info->type) {
842 case I40IW_PUDA_RSRC_TYPE_ILQ:
843 vmem = &dev->ilq_mem;
845 case I40IW_PUDA_RSRC_TYPE_IEQ:
846 vmem = &dev->ieq_mem;
849 return I40IW_NOT_SUPPORTED;
852 i40iw_allocate_virt_mem(dev->hw, vmem,
853 pudasize + sqwridsize + rqwridsize);
856 rsrc = (struct i40iw_puda_rsrc *)vmem->va;
857 spin_lock_init(&rsrc->bufpool_lock);
858 if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
859 dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
860 dev->ilq_count = info->count;
861 rsrc->receive = info->receive;
862 rsrc->xmit_complete = info->xmit_complete;
864 vmem = &dev->ieq_mem;
865 dev->ieq_count = info->count;
866 dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
867 rsrc->receive = i40iw_ieq_receive;
868 rsrc->xmit_complete = i40iw_ieq_tx_compl;
871 rsrc->type = info->type;
872 rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
873 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
874 rsrc->mss = info->mss;
875 /* Initialize all ieq lists */
876 INIT_LIST_HEAD(&rsrc->bufpool);
877 INIT_LIST_HEAD(&rsrc->txpend);
879 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
880 dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
881 rsrc->qp_id = info->qp_id;
882 rsrc->cq_id = info->cq_id;
883 rsrc->sq_size = info->sq_size;
884 rsrc->rq_size = info->rq_size;
885 rsrc->cq_size = info->rq_size + info->sq_size;
886 rsrc->buf_size = info->buf_size;
889 ret = i40iw_puda_cq_create(rsrc);
891 rsrc->completion = PUDA_CQ_CREATED;
892 ret = i40iw_puda_qp_create(rsrc);
895 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
898 rsrc->completion = PUDA_QP_CREATED;
900 ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
902 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
906 rsrc->rxq_invalid_cnt = info->rq_size;
907 ret = i40iw_puda_replenish_rq(rsrc, true);
911 if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
912 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
913 rsrc->check_crc = true;
914 rsrc->completion = PUDA_HASH_CRC_COMPLETE;
919 dev->ccq_ops->ccq_arm(&rsrc->cq);
922 i40iw_puda_dele_resources(dev, info->type, false);
928 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
929 * @qp: ilq's qp resource
930 * @wqe_idx: wqe index of completed rcvbuf
932 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
937 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
938 get_64bit_val(wqe, 24, &offset24);
939 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
940 set_64bit_val(wqe, 24, offset24);
944 * i40iw_ieq_get_fpdu - given length return fpdu length
945 * @length: length if fpdu
947 static u16 i40iw_ieq_get_fpdu_length(u16 length)
951 fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
952 fpdu_len = (fpdu_len + 3) & 0xfffffffc;
957 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
958 * @buf: rcv buffer with partial
959 * @txbuf: tx buffer for sendign back
960 * @buf_offset: rcv buffer offset to copy from
961 * @txbuf_offset: at offset in tx buf to copy
962 * @length: length of data to copy
964 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
965 struct i40iw_puda_buf *txbuf,
966 u16 buf_offset, u32 txbuf_offset,
969 void *mem1 = (u8 *)buf->mem.va + buf_offset;
970 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
972 memcpy(mem2, mem1, length);
976 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
977 * @buf: reeive buffer with partial
978 * @txbuf: buffer to prepare
980 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
981 struct i40iw_puda_buf *txbuf)
983 txbuf->maclen = buf->maclen;
984 txbuf->tcphlen = buf->tcphlen;
985 txbuf->ipv4 = buf->ipv4;
986 txbuf->hdrlen = buf->hdrlen;
987 i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
991 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
992 * @buf: receive exception buffer
993 * @fps: first partial sequence number
995 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
999 if (buf->seqnum < fps) {
1000 offset = fps - buf->seqnum;
1001 if (offset > buf->datalen)
1003 buf->data += offset;
1004 buf->datalen -= (u16)offset;
1010 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1011 * @ieq: ieq resource
1012 * @rxlist: ieq's received buffer list
1013 * @pbufl: temporary list for buffers for fpddu
1014 * @txbuf: tx buffer for fpdu
1015 * @fpdu_len: total length of fpdu
1017 static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1018 struct list_head *rxlist,
1019 struct list_head *pbufl,
1020 struct i40iw_puda_buf *txbuf,
1023 struct i40iw_puda_buf *buf;
1025 u16 txoffset, bufoffset;
1027 buf = i40iw_puda_get_listbuf(pbufl);
1030 nextseqnum = buf->seqnum + fpdu_len;
1031 txbuf->totallen = buf->hdrlen + fpdu_len;
1032 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1033 i40iw_ieq_setup_tx_buf(buf, txbuf);
1035 txoffset = buf->hdrlen;
1036 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1039 if (buf->datalen >= fpdu_len) {
1040 /* copied full fpdu */
1041 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1042 buf->datalen -= fpdu_len;
1043 buf->data += fpdu_len;
1044 buf->seqnum = nextseqnum;
1047 /* copy partial fpdu */
1048 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1049 txoffset += buf->datalen;
1050 fpdu_len -= buf->datalen;
1051 i40iw_puda_ret_bufpool(ieq, buf);
1052 buf = i40iw_puda_get_listbuf(pbufl);
1055 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1058 /* last buffer on the list*/
1060 list_add(&buf->list, rxlist);
1062 i40iw_puda_ret_bufpool(ieq, buf);
1066 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1067 * @rxlist: resource list for receive ieq buffes
1068 * @pbufl: temp. list for buffers for fpddu
1069 * @buf: first receive buffer
1070 * @fpdu_len: total length of fpdu
1072 static enum i40iw_status_code i40iw_ieq_create_pbufl(
1073 struct i40iw_pfpdu *pfpdu,
1074 struct list_head *rxlist,
1075 struct list_head *pbufl,
1076 struct i40iw_puda_buf *buf,
1079 enum i40iw_status_code status = 0;
1080 struct i40iw_puda_buf *nextbuf;
1082 u16 plen = fpdu_len - buf->datalen;
1085 nextseqnum = buf->seqnum + buf->datalen;
1087 nextbuf = i40iw_puda_get_listbuf(rxlist);
1089 status = I40IW_ERR_list_empty;
1092 list_add_tail(&nextbuf->list, pbufl);
1093 if (nextbuf->seqnum != nextseqnum) {
1094 pfpdu->bad_seq_num++;
1095 status = I40IW_ERR_SEQ_NUM;
1098 if (nextbuf->datalen >= plen) {
1101 plen -= nextbuf->datalen;
1102 nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1111 * i40iw_ieq_handle_partial - process partial fpdu buffer
1112 * @ieq: ieq resource
1113 * @pfpdu: partial management per user qp
1114 * @buf: receive buffer
1115 * @fpdu_len: fpdu len in the buffer
1117 static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1118 struct i40iw_pfpdu *pfpdu,
1119 struct i40iw_puda_buf *buf,
1122 enum i40iw_status_code status = 0;
1125 u32 seqnum = buf->seqnum;
1126 struct list_head pbufl; /* partial buffer list */
1127 struct i40iw_puda_buf *txbuf = NULL;
1128 struct list_head *rxlist = &pfpdu->rxlist;
1130 INIT_LIST_HEAD(&pbufl);
1131 list_add(&buf->list, &pbufl);
1133 status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1137 txbuf = i40iw_puda_get_bufpool(ieq);
1139 pfpdu->no_tx_bufs++;
1140 status = I40IW_ERR_NO_TXBUFS;
1144 i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1145 i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1146 crcptr = txbuf->data + fpdu_len - 4;
1147 mpacrc = *(u32 *)crcptr;
1148 if (ieq->check_crc) {
1149 status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1150 (fpdu_len - 4), mpacrc);
1152 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1153 "%s: error bad crc\n", __func__);
1158 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1159 txbuf->mem.va, txbuf->totallen);
1160 i40iw_puda_send_buf(ieq, txbuf);
1161 pfpdu->rcv_nxt = seqnum + fpdu_len;
1164 while (!list_empty(&pbufl)) {
1165 buf = (struct i40iw_puda_buf *)(pbufl.prev);
1166 list_del(&buf->list);
1167 list_add(&buf->list, rxlist);
1170 i40iw_puda_ret_bufpool(ieq, txbuf);
1175 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1176 * @ieq: ieq resource
1177 * @pfpdu: partial management per user qp
1178 * @buf: receive buffer
1180 static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1181 struct i40iw_pfpdu *pfpdu,
1182 struct i40iw_puda_buf *buf)
1185 u16 datalen = buf->datalen;
1186 u8 *datap = buf->data;
1190 u32 seqnum = buf->seqnum;
1193 bool partial = false;
1194 struct i40iw_puda_buf *txbuf;
1195 struct list_head *rxlist = &pfpdu->rxlist;
1196 enum i40iw_status_code ret = 0;
1197 enum i40iw_status_code status = 0;
1199 ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1201 fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
1202 if (fpdu_len > pfpdu->max_fpdu_data) {
1203 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1204 "%s: error bad fpdu_len\n", __func__);
1205 status = I40IW_ERR_MPA_CRC;
1206 list_add(&buf->list, rxlist);
1210 if (datalen < fpdu_len) {
1214 crcptr = datap + fpdu_len - 4;
1215 mpacrc = *(u32 *)crcptr;
1217 ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
1218 datap, fpdu_len - 4, mpacrc);
1220 status = I40IW_ERR_MPA_CRC;
1221 list_add(&buf->list, rxlist);
1225 pfpdu->fpdu_processed++;
1228 datalen -= fpdu_len;
1231 /* copy full pdu's in the txbuf and send them out */
1232 txbuf = i40iw_puda_get_bufpool(ieq);
1234 pfpdu->no_tx_bufs++;
1235 status = I40IW_ERR_NO_TXBUFS;
1236 list_add(&buf->list, rxlist);
1239 /* modify txbuf's buffer header */
1240 i40iw_ieq_setup_tx_buf(buf, txbuf);
1241 /* copy full fpdu's to new buffer */
1242 i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1244 txbuf->totallen = buf->hdrlen + length;
1246 i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1247 i40iw_puda_send_buf(ieq, txbuf);
1250 pfpdu->rcv_nxt = buf->seqnum + length;
1251 i40iw_puda_ret_bufpool(ieq, buf);
1255 buf->seqnum = seqnum + length;
1256 buf->datalen = datalen;
1257 pfpdu->rcv_nxt = buf->seqnum;
1260 status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1266 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1267 * @qp: qp for which partial fpdus
1268 * @ieq: ieq resource
1270 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1271 struct i40iw_puda_rsrc *ieq)
1273 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1274 struct list_head *rxlist = &pfpdu->rxlist;
1275 struct i40iw_puda_buf *buf;
1276 enum i40iw_status_code status;
1279 if (list_empty(rxlist))
1281 buf = i40iw_puda_get_listbuf(rxlist);
1283 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1284 "%s: error no buf\n", __func__);
1287 if (buf->seqnum != pfpdu->rcv_nxt) {
1288 /* This could be out of order or missing packet */
1289 pfpdu->out_of_order++;
1290 list_add(&buf->list, rxlist);
1293 /* keep processing buffers from the head of the list */
1294 status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1295 if (status == I40IW_ERR_MPA_CRC) {
1296 pfpdu->mpa_crc_err = true;
1297 while (!list_empty(rxlist)) {
1298 buf = i40iw_puda_get_listbuf(rxlist);
1299 i40iw_puda_ret_bufpool(ieq, buf);
1302 /* create CQP for AE */
1303 i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1309 * i40iw_ieq_handle_exception - handle qp's exception
1310 * @ieq: ieq resource
1311 * @qp: qp receiving excpetion
1312 * @buf: receive buffer
1314 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1315 struct i40iw_sc_qp *qp,
1316 struct i40iw_puda_buf *buf)
1318 struct i40iw_puda_buf *tmpbuf = NULL;
1319 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1320 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1321 u32 rcv_wnd = hw_host_ctx[23];
1322 /* first partial seq # in q2 */
1323 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
1324 struct list_head *rxlist = &pfpdu->rxlist;
1325 struct list_head *plist;
1327 pfpdu->total_ieq_bufs++;
1329 if (pfpdu->mpa_crc_err) {
1333 if (pfpdu->mode && (fps != pfpdu->fps)) {
1334 /* clean up qp as it is new partial sequence */
1335 i40iw_ieq_cleanup_qp(ieq->dev, qp);
1336 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1337 "%s: restarting new partial\n", __func__);
1338 pfpdu->mode = false;
1342 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1343 /* First_Partial_Sequence_Number check */
1344 pfpdu->rcv_nxt = fps;
1347 pfpdu->max_fpdu_data = ieq->mss;
1348 pfpdu->pmode_count++;
1349 INIT_LIST_HEAD(rxlist);
1350 i40iw_ieq_check_first_buf(buf, fps);
1353 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1354 pfpdu->bad_seq_num++;
1358 if (!list_empty(rxlist)) {
1359 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1360 plist = &tmpbuf->list;
1361 while ((struct list_head *)tmpbuf != rxlist) {
1362 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1364 tmpbuf = (struct i40iw_puda_buf *)plist->next;
1366 /* Insert buf before tmpbuf */
1367 list_add_tail(&buf->list, &tmpbuf->list);
1369 list_add_tail(&buf->list, rxlist);
1371 i40iw_ieq_process_fpdus(qp, ieq);
1374 i40iw_puda_ret_bufpool(ieq, buf);
1378 * i40iw_ieq_receive - received exception buffer
1379 * @dev: iwarp device
1380 * @buf: exception buffer received
1382 static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
1383 struct i40iw_puda_buf *buf)
1385 struct i40iw_puda_rsrc *ieq = dev->ieq;
1386 struct i40iw_sc_qp *qp = NULL;
1387 u32 wqe_idx = ieq->compl_rxwqe_idx;
1389 qp = i40iw_ieq_get_qp(dev, buf);
1391 ieq->stats_bad_qp_id++;
1392 i40iw_puda_ret_bufpool(ieq, buf);
1394 i40iw_ieq_handle_exception(ieq, qp, buf);
1397 * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1398 * on which wqe_idx to start replenish rq
1400 if (!ieq->rxq_invalid_cnt)
1401 ieq->rx_wqe_idx = wqe_idx;
1402 ieq->rxq_invalid_cnt++;
1406 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1407 * @dev: iwarp device
1408 * @sqwrid: pointer to puda buffer
1410 static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
1412 struct i40iw_puda_rsrc *ieq = dev->ieq;
1413 struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1415 i40iw_puda_ret_bufpool(ieq, buf);
1416 if (!list_empty(&ieq->txpend)) {
1417 buf = i40iw_puda_get_listbuf(&ieq->txpend);
1418 i40iw_puda_send_buf(ieq, buf);
1423 * i40iw_ieq_cleanup_qp - qp is being destroyed
1424 * @dev: iwarp device
1425 * @qp: all pending fpdu buffers
1427 void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1429 struct i40iw_puda_buf *buf;
1430 struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1431 struct list_head *rxlist = &pfpdu->rxlist;
1432 struct i40iw_puda_rsrc *ieq = dev->ieq;
1436 while (!list_empty(rxlist)) {
1437 buf = i40iw_puda_get_listbuf(rxlist);
1438 i40iw_puda_ret_bufpool(ieq, buf);