2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/compiler.h>
38 #include <linux/slab.h>
40 #include <linux/cache.h>
42 #include "t4_values.h"
46 #include "csio_defs.h"
48 int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
49 static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
51 int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
52 static int csio_sge_timer_reg = 1;
54 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
55 csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
58 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
60 sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
61 reg * sizeof(uint32_t));
64 /* Free list buffer size */
65 static inline uint32_t
66 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
68 return sge->sge_fl_buf_size[buf->paddr & 0xF];
71 /* Size of the egress queue status page */
72 static inline uint32_t
73 csio_wr_qstat_pgsz(struct csio_hw *hw)
75 return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
78 /* Ring freelist doorbell */
80 csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
83 * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
84 * number of bytes in the freelist queue. This translates to atleast
85 * 8 freelist buffer pointers (since each pointer is 8 bytes).
87 if (flq->inc_idx >= 8) {
88 csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
89 PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
90 MYPF_REG(SGE_PF_KDOORBELL_A));
95 /* Write a 0 cidx increment value to enable SGE interrupts for this queue */
97 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
99 csio_wr_reg32(hw, CIDXINC_V(0) |
101 TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
102 MYPF_REG(SGE_PF_GTS_A));
106 * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
108 * @flq: Freelist queue.
110 * Fill up freelist buffer entries with buffers of size specified
111 * in the size register.
115 csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
117 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
118 struct csio_sge *sge = &wrm->sge;
119 __be64 *d = (__be64 *)(flq->vstart);
120 struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
122 int sreg = flq->un.fl.sreg;
123 int n = flq->credits;
126 buf->len = sge->sge_fl_buf_size[sreg];
127 buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
130 csio_err(hw, "Could only fill %d buffers!\n", n + 1);
134 paddr = buf->paddr | (sreg & 0xF);
136 *d++ = cpu_to_be64(paddr);
144 * csio_wr_update_fl -
146 * @flq: Freelist queue.
151 csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
156 if (unlikely(flq->pidx >= flq->credits))
157 flq->pidx -= (uint16_t)flq->credits;
159 CSIO_INC_STATS(flq, n_flq_refill);
163 * csio_wr_alloc_q - Allocate a WR queue and initialize it.
165 * @qsize: Size of the queue in bytes
166 * @wrsize: Since of WR in this queue, if fixed.
167 * @type: Type of queue (Ingress/Egress/Freelist)
168 * @owner: Module that owns this queue.
169 * @nflb: Number of freelist buffers for FL.
170 * @sreg: What is the FL buffer size register?
171 * @iq_int_handler: Ingress queue handler in INTx mode.
173 * This function allocates and sets up a queue for the caller
174 * of size qsize, aligned at the required boundary. This is subject to
175 * be free entries being available in the queue array. If one is found,
176 * it is initialized with the allocated queue, marked as being used (owner),
177 * and a handle returned to the caller in form of the queue's index
178 * into the q_arr array.
179 * If user has indicated a freelist (by specifying nflb > 0), create
180 * another queue (with its own index into q_arr) for the freelist. Allocate
181 * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
182 * idx in the ingress queue's flq.idx. This is how a Freelist is associated
183 * with its owning ingress queue.
186 csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
187 uint16_t type, void *owner, uint32_t nflb, int sreg,
188 iq_handler_t iq_intx_handler)
190 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
191 struct csio_q *q, *flq;
192 int free_idx = wrm->free_qidx;
193 int ret_idx = free_idx;
197 if (free_idx >= wrm->num_q) {
198 csio_err(hw, "No more free queues.\n");
204 qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
214 csio_err(hw, "Invalid Ingress queue WR size:%d\n",
220 * Number of elements must be a multiple of 16
221 * So this includes status page size
223 qsz = ALIGN(qsize/wrsize, 16) * wrsize;
227 qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
230 csio_err(hw, "Invalid queue type: 0x%x\n", type);
234 q = wrm->q_arr[free_idx];
236 q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
239 "Failed to allocate DMA memory for "
240 "queue at id: %d size: %d\n", free_idx, qsize);
246 q->pidx = q->cidx = q->inc_idx = 0;
248 q->wr_sz = wrsize; /* If using fixed size WRs */
252 if (type == CSIO_INGRESS) {
253 /* Since queue area is set to zero */
257 * Ingress queue status page size is always the size of
258 * the ingress queue entry.
260 q->credits = (qsz - q->wr_sz) / q->wr_sz;
261 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
264 /* Allocate memory for FL if requested */
266 flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
267 sizeof(__be64), CSIO_FREELIST,
268 owner, 0, sreg, NULL);
271 "Failed to allocate FL queue"
272 " for IQ idx:%d\n", free_idx);
276 /* Associate the new FL with the Ingress quue */
277 q->un.iq.flq_idx = flq_idx;
279 flq = wrm->q_arr[q->un.iq.flq_idx];
280 flq->un.fl.bufs = kcalloc(flq->credits,
281 sizeof(struct csio_dma_buf),
283 if (!flq->un.fl.bufs) {
285 "Failed to allocate FL queue bufs"
286 " for IQ idx:%d\n", free_idx);
290 flq->un.fl.packen = 0;
291 flq->un.fl.offset = 0;
292 flq->un.fl.sreg = sreg;
294 /* Fill up the free list buffers */
295 if (csio_wr_fill_fl(hw, flq))
299 * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
300 * remains unpopulated,otherwise HW thinks
303 flq->pidx = flq->inc_idx = flq->credits - 8;
305 q->un.iq.flq_idx = -1;
308 /* Associate the IQ INTx handler. */
309 q->un.iq.iq_intx_handler = iq_intx_handler;
311 csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
313 } else if (type == CSIO_EGRESS) {
314 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
315 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
316 - csio_wr_qstat_pgsz(hw));
317 csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
318 } else { /* Freelist */
319 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
320 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
321 - csio_wr_qstat_pgsz(hw));
322 csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
329 * csio_wr_iq_create_rsp - Response handler for IQ creation.
330 * @hw: The HW module.
332 * @iq_idx: Ingress queue that got created.
334 * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
337 csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
339 struct csio_iq_params iqp;
340 enum fw_retval retval;
344 memset(&iqp, 0, sizeof(struct csio_iq_params));
346 csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
348 if (retval != FW_SUCCESS) {
349 csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
350 mempool_free(mbp, hw->mb_mempool);
354 csio_q_iqid(hw, iq_idx) = iqp.iqid;
355 csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
356 csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
357 csio_q_inc_idx(hw, iq_idx) = 0;
360 iq_id = iqp.iqid - hw->wrm.fw_iq_start;
362 /* Set the iq-id to iq map table. */
363 if (iq_id >= CSIO_MAX_IQ) {
365 "Exceeding MAX_IQ(%d) supported!"
366 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
367 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
368 mempool_free(mbp, hw->mb_mempool);
371 csio_q_set_intr_map(hw, iq_idx, iq_id);
374 * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
375 * ingress context of this queue. This will block interrupts to
376 * this queue until the next GTS write. Therefore, we do a
377 * 0-cidx increment GTS write for this queue just to clear the
378 * interrupt_sent bit. This will re-enable interrupts to this
381 csio_wr_sge_intr_enable(hw, iqp.physiqid);
383 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
385 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
387 csio_q_flid(hw, flq_idx) = iqp.fl0id;
388 csio_q_cidx(hw, flq_idx) = 0;
389 csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
390 csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
392 /* Now update SGE about the buffers allocated during init */
393 csio_wr_ring_fldb(hw, flq);
396 mempool_free(mbp, hw->mb_mempool);
402 * csio_wr_iq_create - Configure an Ingress queue with FW.
403 * @hw: The HW module.
404 * @priv: Private data object.
405 * @iq_idx: Ingress queue index in the WR module.
407 * @portid: PCIE Channel to be associated with this queue.
408 * @async: Is this a FW asynchronous message handling queue?
409 * @cbfn: Completion callback.
411 * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
412 * with alloc/write bits set.
415 csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
416 uint32_t vec, uint8_t portid, bool async,
417 void (*cbfn) (struct csio_hw *, struct csio_mb *))
420 struct csio_iq_params iqp;
423 memset(&iqp, 0, sizeof(struct csio_iq_params));
424 csio_q_portid(hw, iq_idx) = portid;
426 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
428 csio_err(hw, "IQ command out of memory!\n");
432 switch (hw->intr_mode) {
435 /* For interrupt forwarding queue only */
436 if (hw->intr_iq_idx == iq_idx)
437 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
439 iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
441 csio_q_physiqid(hw, hw->intr_iq_idx);
444 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
445 iqp.iqandstindex = (uint16_t)vec;
448 mempool_free(mbp, hw->mb_mempool);
452 /* Pass in the ingress queue cmd parameters */
457 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
458 iqp.iqasynch = async;
459 if (csio_intr_coalesce_cnt)
460 iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
462 iqp.iqanus = X_UPDATESCHEDULING_TIMER;
463 iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
464 iqp.iqpciech = portid;
465 iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
467 switch (csio_q_wr_sz(hw, iq_idx)) {
469 iqp.iqesize = 0; break;
471 iqp.iqesize = 1; break;
473 iqp.iqesize = 2; break;
475 iqp.iqesize = 3; break;
478 iqp.iqsize = csio_q_size(hw, iq_idx) /
479 csio_q_wr_sz(hw, iq_idx);
480 iqp.iqaddr = csio_q_pstart(hw, iq_idx);
482 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
484 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
485 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
488 iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
489 iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
490 iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
491 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
492 iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
493 iqp.fl0addr = csio_q_pstart(hw, flq_idx);
496 csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
498 if (csio_mb_issue(hw, mbp)) {
499 csio_err(hw, "Issue of IQ cmd failed!\n");
500 mempool_free(mbp, hw->mb_mempool);
507 return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
511 * csio_wr_eq_create_rsp - Response handler for EQ creation.
512 * @hw: The HW module.
514 * @eq_idx: Egress queue that got created.
516 * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
519 csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
521 struct csio_eq_params eqp;
522 enum fw_retval retval;
524 memset(&eqp, 0, sizeof(struct csio_eq_params));
526 csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
528 if (retval != FW_SUCCESS) {
529 csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
530 mempool_free(mbp, hw->mb_mempool);
534 csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
535 csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
536 csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
537 csio_q_inc_idx(hw, eq_idx) = 0;
539 mempool_free(mbp, hw->mb_mempool);
545 * csio_wr_eq_create - Configure an Egress queue with FW.
547 * @priv: Private data.
548 * @eq_idx: Egress queue index in the WR module.
549 * @iq_idx: Associated ingress queue index.
550 * @cbfn: Completion callback.
552 * This API configures a offload egress queue with FW by issuing a
553 * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
556 csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
557 int iq_idx, uint8_t portid,
558 void (*cbfn) (struct csio_hw *, struct csio_mb *))
561 struct csio_eq_params eqp;
563 memset(&eqp, 0, sizeof(struct csio_eq_params));
565 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
567 csio_err(hw, "EQ command out of memory!\n");
574 eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
575 eqp.iqid = csio_q_iqid(hw, iq_idx);
576 eqp.fbmin = X_FETCHBURSTMIN_64B;
577 eqp.fbmax = X_FETCHBURSTMAX_512B;
579 eqp.pciechn = portid;
580 eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
581 eqp.eqaddr = csio_q_pstart(hw, eq_idx);
583 csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
586 if (csio_mb_issue(hw, mbp)) {
587 csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
588 mempool_free(mbp, hw->mb_mempool);
595 return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
599 * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
600 * @hw: The HW module.
602 * @iq_idx: Ingress queue that was freed.
604 * Handle FW_IQ_CMD (free) mailbox completion.
607 csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
609 enum fw_retval retval = csio_mb_fw_retval(mbp);
612 if (retval != FW_SUCCESS)
615 mempool_free(mbp, hw->mb_mempool);
621 * csio_wr_iq_destroy - Free an ingress queue.
622 * @hw: The HW module.
623 * @priv: Private data object.
624 * @iq_idx: Ingress queue index to destroy
625 * @cbfn: Completion callback.
627 * This API frees an ingress queue by issuing the FW_IQ_CMD
628 * with the free bit set.
631 csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
632 void (*cbfn)(struct csio_hw *, struct csio_mb *))
636 struct csio_iq_params iqp;
639 memset(&iqp, 0, sizeof(struct csio_iq_params));
641 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
647 iqp.iqid = csio_q_iqid(hw, iq_idx);
648 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
650 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
652 iqp.fl0id = csio_q_flid(hw, flq_idx);
658 csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
660 rv = csio_mb_issue(hw, mbp);
662 mempool_free(mbp, hw->mb_mempool);
669 return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
673 * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
674 * @hw: The HW module.
676 * @eq_idx: Egress queue that was freed.
678 * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
681 csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
683 enum fw_retval retval = csio_mb_fw_retval(mbp);
686 if (retval != FW_SUCCESS)
689 mempool_free(mbp, hw->mb_mempool);
695 * csio_wr_eq_destroy - Free an Egress queue.
696 * @hw: The HW module.
697 * @priv: Private data object.
698 * @eq_idx: Egress queue index to destroy
699 * @cbfn: Completion callback.
701 * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
702 * with the free bit set.
705 csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
706 void (*cbfn) (struct csio_hw *, struct csio_mb *))
710 struct csio_eq_params eqp;
712 memset(&eqp, 0, sizeof(struct csio_eq_params));
714 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
720 eqp.eqid = csio_q_eqid(hw, eq_idx);
722 csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
724 rv = csio_mb_issue(hw, mbp);
726 mempool_free(mbp, hw->mb_mempool);
733 return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
737 * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
739 * @qidx: Egress queue index
741 * Cleanup the Egress queue status page.
744 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
746 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
747 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
749 memset(stp, 0, sizeof(*stp));
753 * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
755 * @qidx: Ingress queue index
757 * Cleanup the footer entries in the given ingress queue,
758 * set to 1 the internal copy of genbit.
761 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
763 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
764 struct csio_q *q = wrm->q_arr[qidx];
766 struct csio_iqwr_footer *ftr;
769 /* set to 1 since we are just about zero out genbit */
772 for (i = 0; i < q->credits; i++) {
774 wr = (void *)((uintptr_t)q->vstart +
777 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
778 (q->wr_sz - sizeof(*ftr)));
779 /* Zero out footer */
780 memset(ftr, 0, sizeof(*ftr));
785 csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
789 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
792 for (i = 0; i < wrm->free_qidx; i++) {
797 if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
798 csio_wr_cleanup_eq_stpg(hw, i);
800 csio_q_eqid(hw, i) = CSIO_MAX_QID;
804 rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
805 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
808 csio_q_eqid(hw, i) = CSIO_MAX_QID;
811 if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
812 csio_wr_cleanup_iq_ftr(hw, i);
814 csio_q_iqid(hw, i) = CSIO_MAX_QID;
815 flq_idx = csio_q_iq_flq_idx(hw, i);
817 csio_q_flid(hw, flq_idx) =
822 rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
823 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
826 csio_q_iqid(hw, i) = CSIO_MAX_QID;
827 flq_idx = csio_q_iq_flq_idx(hw, i);
829 csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
836 hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
842 * csio_wr_get - Get requested size of WR entry/entries from queue.
844 * @qidx: Index of queue.
845 * @size: Cumulative size of Work request(s).
846 * @wrp: Work request pair.
848 * If requested credits are available, return the start address of the
849 * work request in the work request pair. Set pidx accordingly and
852 * NOTE about WR pair:
854 * A WR can start towards the end of a queue, and then continue at the
855 * beginning, since the queue is considered to be circular. This will
856 * require a pair of address/size to be passed back to the caller -
857 * hence Work request pair format.
860 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
861 struct csio_wr_pair *wrp)
863 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
864 struct csio_q *q = wrm->q_arr[qidx];
865 void *cwr = (void *)((uintptr_t)(q->vstart) +
866 (q->pidx * CSIO_QCREDIT_SZ));
867 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
868 uint16_t cidx = q->cidx = ntohs(stp->cidx);
869 uint16_t pidx = q->pidx;
870 uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
871 int req_credits = req_sz / CSIO_QCREDIT_SZ;
874 CSIO_DB_ASSERT(q->owner != NULL);
875 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
876 CSIO_DB_ASSERT(cidx <= q->credits);
878 /* Calculate credits */
880 credits = q->credits - (pidx - cidx) - 1;
881 } else if (cidx > pidx) {
882 credits = cidx - pidx - 1;
884 /* cidx == pidx, empty queue */
885 credits = q->credits;
886 CSIO_INC_STATS(q, n_qempty);
890 * Check if we have enough credits.
891 * credits = 1 implies queue is full.
893 if (!credits || (req_credits > credits)) {
894 CSIO_INC_STATS(q, n_qfull);
899 * If we are here, we have enough credits to satisfy the
900 * request. Check if we are near the end of q, and if WR spills over.
901 * If it does, use the first addr/size to cover the queue until
902 * the end. Fit the remainder portion of the request at the top
903 * of queue and return it in the second addr/len. Set pidx
906 if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
908 wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
909 wrp->addr2 = q->vstart;
910 wrp->size2 = req_sz - wrp->size1;
911 q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
913 CSIO_INC_STATS(q, n_qwrap);
914 CSIO_INC_STATS(q, n_eq_wr_split);
920 q->pidx += (uint16_t)req_credits;
922 /* We are the end of queue, roll back pidx to top of queue */
923 if (unlikely(q->pidx == q->credits)) {
925 CSIO_INC_STATS(q, n_qwrap);
929 q->inc_idx = (uint16_t)req_credits;
931 CSIO_INC_STATS(q, n_tot_reqs);
937 * csio_wr_copy_to_wrp - Copies given data into WR.
938 * @data_buf - Data buffer
939 * @wrp - Work request pair.
940 * @wr_off - Work request offset.
941 * @data_len - Data length.
943 * Copies the given data in Work Request. Work request pair(wrp) specifies
944 * address information of Work request.
948 csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
949 uint32_t wr_off, uint32_t data_len)
953 /* Number of space available in buffer addr1 of WRP */
954 nbytes = ((wrp->size1 - wr_off) >= data_len) ?
955 data_len : (wrp->size1 - wr_off);
957 memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
960 /* Write the remaining data from the begining of circular buffer */
962 CSIO_DB_ASSERT(data_len <= wrp->size2);
963 CSIO_DB_ASSERT(wrp->addr2 != NULL);
964 memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
969 * csio_wr_issue - Notify chip of Work request.
971 * @qidx: Index of queue.
972 * @prio: 0: Low priority, 1: High priority
974 * Rings the SGE Doorbell by writing the current producer index of the passed
975 * in queue into the register.
979 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
981 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
982 struct csio_q *q = wrm->q_arr[qidx];
984 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
987 /* Ring SGE Doorbell writing q->pidx into it */
988 csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
989 PIDX_T5_V(q->inc_idx) | DBTYPE_F,
990 MYPF_REG(SGE_PF_KDOORBELL_A));
996 static inline uint32_t
997 csio_wr_avail_qcredits(struct csio_q *q)
999 if (q->pidx > q->cidx)
1000 return q->pidx - q->cidx;
1001 else if (q->cidx > q->pidx)
1002 return q->credits - (q->cidx - q->pidx);
1004 return 0; /* cidx == pidx, empty queue */
1008 * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
1010 * @flq: The freelist queue.
1012 * Invalidate the driver's version of a freelist buffer entry,
1013 * without freeing the associated the DMA memory. The entry
1014 * to be invalidated is picked up from the current Free list
1019 csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
1022 if (flq->cidx == flq->credits) {
1024 CSIO_INC_STATS(flq, n_qwrap);
1029 * csio_wr_process_fl - Process a freelist completion.
1031 * @q: The ingress queue attached to the Freelist.
1032 * @wr: The freelist completion WR in the ingress queue.
1033 * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
1034 * @iq_handler: Caller's handler for this completion.
1035 * @priv: Private pointer of caller
1039 csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
1040 void *wr, uint32_t len_to_qid,
1041 void (*iq_handler)(struct csio_hw *, void *,
1042 uint32_t, struct csio_fl_dma_buf *,
1046 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1047 struct csio_sge *sge = &wrm->sge;
1048 struct csio_fl_dma_buf flb;
1049 struct csio_dma_buf *buf, *fbuf;
1050 uint32_t bufsz, len, lastlen = 0;
1052 struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
1054 CSIO_DB_ASSERT(flq != NULL);
1058 if (len & IQWRF_NEWBUF) {
1059 if (flq->un.fl.offset > 0) {
1060 csio_wr_inval_flq_buf(hw, flq);
1061 flq->un.fl.offset = 0;
1063 len = IQWRF_LEN_GET(len);
1066 CSIO_DB_ASSERT(len != 0);
1070 /* Consume all freelist buffers used for len bytes */
1071 for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
1072 buf = &flq->un.fl.bufs[flq->cidx];
1073 bufsz = csio_wr_fl_bufsz(sge, buf);
1075 fbuf->paddr = buf->paddr;
1076 fbuf->vaddr = buf->vaddr;
1078 flb.offset = flq->un.fl.offset;
1079 lastlen = min(bufsz, len);
1080 fbuf->len = lastlen;
1085 csio_wr_inval_flq_buf(hw, flq);
1088 flb.defer_free = flq->un.fl.packen ? 0 : 1;
1090 iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
1093 if (flq->un.fl.packen)
1094 flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
1096 csio_wr_inval_flq_buf(hw, flq);
1101 * csio_is_new_iqwr - Is this a new Ingress queue entry ?
1102 * @q: Ingress quueue.
1103 * @ftr: Ingress queue WR SGE footer.
1105 * The entry is new if our generation bit matches the corresponding
1106 * bit in the footer of the current WR.
1109 csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
1111 return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
1115 * csio_wr_process_iq - Process elements in Ingress queue.
1117 * @qidx: Index of queue
1118 * @iq_handler: Handler for this queue
1119 * @priv: Caller's private pointer
1121 * This routine walks through every entry of the ingress queue, calling
1122 * the provided iq_handler with the entry, until the generation bit
1126 csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
1127 void (*iq_handler)(struct csio_hw *, void *,
1128 uint32_t, struct csio_fl_dma_buf *,
1132 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1133 void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
1134 struct csio_iqwr_footer *ftr;
1135 uint32_t wr_type, fw_qid, qid;
1136 struct csio_q *q_completed;
1137 struct csio_q *flq = csio_iq_has_fl(q) ?
1138 wrm->q_arr[q->un.iq.flq_idx] : NULL;
1141 /* Get the footer */
1142 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1143 (q->wr_sz - sizeof(*ftr)));
1146 * When q wrapped around last time, driver should have inverted
1147 * ic.genbit as well.
1149 while (csio_is_new_iqwr(q, ftr)) {
1151 CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
1152 (uintptr_t)q->vwrap);
1154 wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
1157 case X_RSPD_TYPE_CPL:
1158 /* Subtract footer from WR len */
1159 iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
1161 case X_RSPD_TYPE_FLBUF:
1162 csio_wr_process_fl(hw, q, wr,
1163 ntohl(ftr->pldbuflen_qid),
1166 case X_RSPD_TYPE_INTR:
1167 fw_qid = ntohl(ftr->pldbuflen_qid);
1168 qid = fw_qid - wrm->fw_iq_start;
1169 q_completed = hw->wrm.intr_map[qid];
1172 csio_q_physiqid(hw, hw->intr_iq_idx))) {
1174 * We are already in the Forward Interrupt
1175 * Interrupt Queue Service! Do-not service
1180 CSIO_DB_ASSERT(q_completed);
1182 q_completed->un.iq.iq_intx_handler);
1184 /* Call the queue handler. */
1185 q_completed->un.iq.iq_intx_handler(hw, NULL,
1186 0, NULL, (void *)q_completed);
1190 csio_warn(hw, "Unknown resp type 0x%x received\n",
1192 CSIO_INC_STATS(q, n_rsp_unknown);
1197 * Ingress *always* has fixed size WR entries. Therefore,
1198 * there should always be complete WRs towards the end of
1201 if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
1203 /* Roll over to start of queue */
1208 q->un.iq.genbit ^= 0x1;
1210 CSIO_INC_STATS(q, n_qwrap);
1213 wr = (void *)((uintptr_t)(q->vstart) +
1214 (q->cidx * q->wr_sz));
1217 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1218 (q->wr_sz - sizeof(*ftr)));
1221 } /* while (q->un.iq.genbit == hdr->genbit) */
1224 * We need to re-arm SGE interrupts in case we got a stray interrupt,
1225 * especially in msix mode. With INTx, this may be a common occurence.
1227 if (unlikely(!q->inc_idx)) {
1228 CSIO_INC_STATS(q, n_stray_comp);
1233 /* Replenish free list buffers if pending falls below low water mark */
1235 uint32_t avail = csio_wr_avail_qcredits(flq);
1237 /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
1238 * remains unpopulated otherwise HW thinks
1241 csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
1242 csio_wr_ring_fldb(hw, flq);
1247 /* Now inform SGE about our incremental index value */
1248 csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
1249 INGRESSQID_V(q->un.iq.physiqid) |
1250 TIMERREG_V(csio_sge_timer_reg),
1251 MYPF_REG(SGE_PF_GTS_A));
1252 q->stats.n_tot_rsps += q->inc_idx;
1260 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
1261 void (*iq_handler)(struct csio_hw *, void *,
1262 uint32_t, struct csio_fl_dma_buf *,
1266 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1267 struct csio_q *iq = wrm->q_arr[qidx];
1269 return csio_wr_process_iq(hw, iq, iq_handler, priv);
1273 csio_closest_timer(struct csio_sge *s, int time)
1275 int i, delta, match = 0, min_delta = INT_MAX;
1277 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1278 delta = time - s->timer_val[i];
1281 if (delta < min_delta) {
1290 csio_closest_thresh(struct csio_sge *s, int cnt)
1292 int i, delta, match = 0, min_delta = INT_MAX;
1294 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1295 delta = cnt - s->counter_val[i];
1298 if (delta < min_delta) {
1307 csio_wr_fixup_host_params(struct csio_hw *hw)
1309 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1310 struct csio_sge *sge = &wrm->sge;
1311 uint32_t clsz = L1_CACHE_BYTES;
1312 uint32_t s_hps = PAGE_SHIFT - 10;
1313 uint32_t stat_len = clsz > 64 ? 128 : 64;
1314 u32 fl_align = clsz < 32 ? 32 : clsz;
1316 u32 ingpad, ingpack;
1319 csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
1320 HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
1321 HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
1322 HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
1323 SGE_HOST_PAGE_SIZE_A);
1325 /* T5 introduced the separation of the Free List Padding and
1326 * Packing Boundaries. Thus, we can select a smaller Padding
1327 * Boundary to avoid uselessly chewing up PCIe Link and Memory
1328 * Bandwidth, and use a Packing Boundary which is large enough
1329 * to avoid false sharing between CPUs, etc.
1331 * For the PCI Link, the smaller the Padding Boundary the
1332 * better. For the Memory Controller, a smaller Padding
1333 * Boundary is better until we cross under the Memory Line
1334 * Size (the minimum unit of transfer to/from Memory). If we
1335 * have a Padding Boundary which is smaller than the Memory
1336 * Line Size, that'll involve a Read-Modify-Write cycle on the
1337 * Memory Controller which is never good.
1340 /* We want the Packing Boundary to be based on the Cache Line
1341 * Size in order to help avoid False Sharing performance
1342 * issues between CPUs, etc. We also want the Packing
1343 * Boundary to incorporate the PCI-E Maximum Payload Size. We
1344 * get best performance when the Packing Boundary is a
1345 * multiple of the Maximum Payload Size.
1347 pack_align = fl_align;
1348 pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
1353 /* The PCIe Device Control Maximum Payload Size field
1354 * [bits 7:5] encodes sizes as powers of 2 starting at
1357 pci_read_config_word(hw->pdev,
1358 pcie_cap + PCI_EXP_DEVCTL,
1360 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
1362 if (mps > pack_align)
1366 /* T5/T6 have a special interpretation of the "0"
1367 * value for the Packing Boundary. This corresponds to 16
1368 * bytes instead of the expected 32 bytes.
1370 if (pack_align <= 16) {
1371 ingpack = INGPACKBOUNDARY_16B_X;
1373 } else if (pack_align == 32) {
1374 ingpack = INGPACKBOUNDARY_64B_X;
1377 u32 pack_align_log = fls(pack_align) - 1;
1379 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
1380 fl_align = pack_align;
1383 /* Use the smallest Ingress Padding which isn't smaller than
1384 * the Memory Controller Read/Write Size. We'll take that as
1385 * being 8 bytes since we don't know of any system with a
1386 * wider Memory Controller Bus Width.
1388 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
1389 ingpad = INGPADBOUNDARY_32B_X;
1391 ingpad = T6_INGPADBOUNDARY_8B_X;
1393 csio_set_reg_field(hw, SGE_CONTROL_A,
1394 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
1395 EGRSTATUSPAGESIZE_F,
1396 INGPADBOUNDARY_V(ingpad) |
1397 EGRSTATUSPAGESIZE_V(stat_len != 64));
1398 csio_set_reg_field(hw, SGE_CONTROL2_A,
1399 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
1400 INGPACKBOUNDARY_V(ingpack));
1402 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1403 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
1406 * If using hard params, the following will get set correctly
1407 * in csio_wr_set_sge().
1409 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1411 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
1412 fl_align - 1) & ~(fl_align - 1),
1413 SGE_FL_BUFFER_SIZE2_A);
1415 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
1416 fl_align - 1) & ~(fl_align - 1),
1417 SGE_FL_BUFFER_SIZE3_A);
1420 sge->csio_fl_align = fl_align;
1422 csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
1424 /* default value of rx_dma_offset of the NIC driver */
1425 csio_set_reg_field(hw, SGE_CONTROL_A,
1426 PKTSHIFT_V(PKTSHIFT_M),
1427 PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
1429 csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
1430 CSUM_HAS_PSEUDO_HDR_F, 0);
1434 csio_init_intr_coalesce_parms(struct csio_hw *hw)
1436 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1437 struct csio_sge *sge = &wrm->sge;
1439 csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
1440 if (csio_intr_coalesce_cnt) {
1441 csio_sge_thresh_reg = 0;
1442 csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
1446 csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
1450 * csio_wr_get_sge - Get SGE register values.
1453 * Used by non-master functions and by master-functions relying on config file.
1456 csio_wr_get_sge(struct csio_hw *hw)
1458 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1459 struct csio_sge *sge = &wrm->sge;
1462 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1463 u32 ingress_rx_threshold;
1465 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1467 ingpad = INGPADBOUNDARY_G(sge->sge_control);
1470 case X_INGPCIEBOUNDARY_32B:
1471 sge->csio_fl_align = 32; break;
1472 case X_INGPCIEBOUNDARY_64B:
1473 sge->csio_fl_align = 64; break;
1474 case X_INGPCIEBOUNDARY_128B:
1475 sge->csio_fl_align = 128; break;
1476 case X_INGPCIEBOUNDARY_256B:
1477 sge->csio_fl_align = 256; break;
1478 case X_INGPCIEBOUNDARY_512B:
1479 sge->csio_fl_align = 512; break;
1480 case X_INGPCIEBOUNDARY_1024B:
1481 sge->csio_fl_align = 1024; break;
1482 case X_INGPCIEBOUNDARY_2048B:
1483 sge->csio_fl_align = 2048; break;
1484 case X_INGPCIEBOUNDARY_4096B:
1485 sge->csio_fl_align = 4096; break;
1488 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1489 csio_get_flbuf_size(hw, sge, i);
1491 timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
1492 timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
1493 timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
1495 sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
1496 TIMERVALUE0_G(timer_value_0_and_1));
1497 sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
1498 TIMERVALUE1_G(timer_value_0_and_1));
1499 sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
1500 TIMERVALUE2_G(timer_value_2_and_3));
1501 sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
1502 TIMERVALUE3_G(timer_value_2_and_3));
1503 sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
1504 TIMERVALUE4_G(timer_value_4_and_5));
1505 sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
1506 TIMERVALUE5_G(timer_value_4_and_5));
1508 ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
1509 sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
1510 sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
1511 sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
1512 sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
1514 csio_init_intr_coalesce_parms(hw);
1518 * csio_wr_set_sge - Initialize SGE registers
1521 * Used by Master function to initialize SGE registers in the absence
1525 csio_wr_set_sge(struct csio_hw *hw)
1527 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1528 struct csio_sge *sge = &wrm->sge;
1532 * Set up our basic SGE mode to deliver CPL messages to our Ingress
1533 * Queue and Packet Date to the Free List.
1535 csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
1537 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1539 /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
1542 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
1543 * and generate an interrupt when this occurs so we can recover.
1545 csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
1546 LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1547 LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1548 csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
1549 HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1550 HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1552 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
1555 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1557 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1558 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1559 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
1560 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1561 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
1562 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1563 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1564 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
1565 CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
1566 CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
1568 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1569 csio_get_flbuf_size(hw, sge, i);
1571 /* Initialize interrupt coalescing attributes */
1572 sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
1573 sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
1574 sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
1575 sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
1576 sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
1577 sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
1579 sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
1580 sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
1581 sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
1582 sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
1584 csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
1585 THRESHOLD_1_V(sge->counter_val[1]) |
1586 THRESHOLD_2_V(sge->counter_val[2]) |
1587 THRESHOLD_3_V(sge->counter_val[3]),
1588 SGE_INGRESS_RX_THRESHOLD_A);
1591 TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
1592 TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
1593 SGE_TIMER_VALUE_0_AND_1_A);
1596 TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
1597 TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
1598 SGE_TIMER_VALUE_2_AND_3_A);
1601 TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
1602 TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
1603 SGE_TIMER_VALUE_4_AND_5_A);
1605 csio_init_intr_coalesce_parms(hw);
1609 csio_wr_sge_init(struct csio_hw *hw)
1612 * If we are master and chip is not initialized:
1613 * - If we plan to use the config file, we need to fixup some
1614 * host specific registers, and read the rest of the SGE
1616 * - If we dont plan to use the config file, we need to initialize
1617 * SGE entirely, including fixing the host specific registers.
1618 * If we are master and chip is initialized, just read and work off of
1619 * the already initialized SGE values.
1620 * If we arent the master, we are only allowed to read and work off of
1621 * the already initialized SGE values.
1623 * Therefore, before calling this function, we assume that the master-
1624 * ship of the card, state and whether to use config file or not, have
1625 * already been decided.
1627 if (csio_is_hw_master(hw)) {
1628 if (hw->fw_state != CSIO_DEV_STATE_INIT)
1629 csio_wr_fixup_host_params(hw);
1631 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1632 csio_wr_get_sge(hw);
1634 csio_wr_set_sge(hw);
1636 csio_wr_get_sge(hw);
1640 * csio_wrm_init - Initialize Work request module.
1644 * Allocates memory for an array of queue pointers starting at q_arr.
1647 csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
1652 csio_err(hw, "Num queues is not set\n");
1656 wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL);
1660 for (i = 0; i < wrm->num_q; i++) {
1661 wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
1662 if (!wrm->q_arr[i]) {
1664 kfree(wrm->q_arr[i]);
1679 * csio_wrm_exit - Initialize Work request module.
1683 * Uninitialize WR module. Free q_arr and pointers in it.
1684 * We have the additional job of freeing the DMA memory associated
1688 csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1693 struct csio_dma_buf *buf;
1695 for (i = 0; i < wrm->num_q; i++) {
1698 if (wrm->free_qidx && (i < wrm->free_qidx)) {
1699 if (q->type == CSIO_FREELIST) {
1702 for (j = 0; j < q->credits; j++) {
1703 buf = &q->un.fl.bufs[j];
1706 pci_free_consistent(hw->pdev, buf->len,
1710 kfree(q->un.fl.bufs);
1712 pci_free_consistent(hw->pdev, q->size,
1713 q->vstart, q->pstart);
1718 hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;