2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "t4_values.h"
38 #include "t4fw_ri_api.h"
40 #define T4_MAX_NUM_PD 65536
41 #define T4_MAX_MR_SIZE (~0ULL)
42 #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
43 #define T4_STAG_UNSET 0xffffffff
45 #define PCIE_MA_SYNC_A 0x30b4
47 struct t4_status_page {
48 __be32 rsvd1; /* flit 0 - hw owns */
53 u8 qp_err; /* flit 1 - sw owns */
63 #define T4_RQT_ENTRY_SHIFT 6
64 #define T4_RQT_ENTRY_SIZE BIT(T4_RQT_ENTRY_SHIFT)
65 #define T4_EQ_ENTRY_SIZE 64
67 #define T4_SQ_NUM_SLOTS 5
68 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
69 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
70 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
71 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
72 sizeof(struct fw_ri_immd)))
73 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
74 sizeof(struct fw_ri_rdma_write_wr) - \
75 sizeof(struct fw_ri_immd)))
76 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
77 sizeof(struct fw_ri_rdma_write_wr) - \
78 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
79 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
80 sizeof(struct fw_ri_immd)) & ~31UL)
81 #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
82 #define T4_MAX_FR_DSGL 1024
83 #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
85 static inline int t4_max_fr_depth(int use_dsgl)
87 return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
90 #define T4_RQ_NUM_SLOTS 2
91 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
92 #define T4_MAX_RECV_SGE 4
94 #define T4_WRITE_CMPL_MAX_SGL 4
95 #define T4_WRITE_CMPL_MAX_CQE 16
98 struct fw_ri_res_wr res;
100 struct fw_ri_rdma_write_wr write;
101 struct fw_ri_send_wr send;
102 struct fw_ri_rdma_read_wr read;
103 struct fw_ri_bind_mw_wr bind;
104 struct fw_ri_fr_nsmr_wr fr;
105 struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
106 struct fw_ri_inv_lstag_wr inv;
107 struct fw_ri_rdma_write_cmpl_wr write_cmpl;
108 struct t4_status_page status;
109 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
113 struct fw_ri_recv_wr recv;
114 struct t4_status_page status;
115 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
118 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
119 enum fw_wr_opcodes opcode, u8 flags, u8 len16)
121 wqe->send.opcode = (u8)opcode;
122 wqe->send.flags = flags;
123 wqe->send.wrid = wrid;
127 wqe->send.len16 = len16;
130 /* CQE/AE status codes */
131 #define T4_ERR_SUCCESS 0x0
132 #define T4_ERR_STAG 0x1 /* STAG invalid: either the */
133 /* STAG is offlimt, being 0, */
134 /* or STAG_key mismatch */
135 #define T4_ERR_PDID 0x2 /* PDID mismatch */
136 #define T4_ERR_QPID 0x3 /* QPID mismatch */
137 #define T4_ERR_ACCESS 0x4 /* Invalid access right */
138 #define T4_ERR_WRAP 0x5 /* Wrap error */
139 #define T4_ERR_BOUND 0x6 /* base and bounds voilation */
140 #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
141 /* shared memory region */
142 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
143 /* shared memory region */
144 #define T4_ERR_ECC 0x9 /* ECC error detected */
145 #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
146 /* reading PSTAG for a MW */
148 #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
150 #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
151 #define T4_ERR_CRC 0x10 /* CRC error */
152 #define T4_ERR_MARKER 0x11 /* Marker error */
153 #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
154 #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
155 #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
156 #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
157 #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
158 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
159 #define T4_ERR_MSN 0x18 /* MSN error */
160 #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
161 #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
163 #define T4_ERR_MSN_GAP 0x1B
164 #define T4_ERR_MSN_RANGE 0x1C
165 #define T4_ERR_IRD_OVERFLOW 0x1D
166 #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
168 #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
200 * Use union for immediate data to be consistent with
201 * stack's 32 bit data and iWARP spec's 64 bit data.
219 /* macros for flit 0 of the cqe */
221 #define CQE_QPID_S 12
222 #define CQE_QPID_M 0xFFFFF
223 #define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
224 #define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
226 #define CQE_SWCQE_S 11
227 #define CQE_SWCQE_M 0x1
228 #define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
229 #define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
231 #define CQE_DRAIN_S 10
232 #define CQE_DRAIN_M 0x1
233 #define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
234 #define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
236 #define CQE_STATUS_S 5
237 #define CQE_STATUS_M 0x1F
238 #define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
239 #define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
242 #define CQE_TYPE_M 0x1
243 #define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
244 #define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
246 #define CQE_OPCODE_S 0
247 #define CQE_OPCODE_M 0xF
248 #define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
249 #define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
251 #define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
252 #define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
253 #define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
254 #define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
255 #define SQ_TYPE(x) (CQE_TYPE((x)))
256 #define RQ_TYPE(x) (!CQE_TYPE((x)))
257 #define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
258 #define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
260 #define CQE_SEND_OPCODE(x)( \
261 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
262 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
263 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
264 (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
266 #define CQE_LEN(x) (be32_to_cpu((x)->len))
268 /* used for RQ completion processing */
269 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
270 #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
271 #define CQE_ABS_RQE_IDX(x) (be32_to_cpu((x)->u.srcqe.abs_rqe_idx))
272 #define CQE_IMM_DATA(x)( \
273 (x)->u.imm_data_rcqe.iw_imm_data.ib_imm_data.imm_data32)
275 /* used for SQ completion processing */
276 #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
277 #define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
279 /* generic accessor macros */
280 #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
281 #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
282 #define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
284 /* macros for flit 3 of the cqe */
285 #define CQE_GENBIT_S 63
286 #define CQE_GENBIT_M 0x1
287 #define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
288 #define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
290 #define CQE_OVFBIT_S 62
291 #define CQE_OVFBIT_M 0x1
292 #define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
294 #define CQE_IQTYPE_S 60
295 #define CQE_IQTYPE_M 0x3
296 #define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
298 #define CQE_TS_M 0x0fffffffffffffffULL
299 #define CQE_TS_G(x) ((x) & CQE_TS_M)
301 #define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
302 #define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
303 #define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
318 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
320 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
321 return pgprot_writecombine(prot);
323 return pgprot_noncached(prot);
328 T4_SQ_ONCHIP = (1<<0),
334 DEFINE_DMA_UNMAP_ADDR(mapping);
335 unsigned long phys_addr;
336 struct t4_swsqe *sw_sq;
337 struct t4_swsqe *oldest_read;
338 void __iomem *bar2_va;
361 union t4_recv_wr *queue;
363 DEFINE_DMA_UNMAP_ADDR(mapping);
364 struct t4_swrqe *sw_rq;
365 void __iomem *bar2_va;
385 struct c4iw_rdev *rdev;
391 struct t4_srq_pending_wr {
393 union t4_recv_wr wqe;
398 union t4_recv_wr *queue;
400 DECLARE_PCI_UNMAP_ADDR(mapping);
401 struct t4_swrqe *sw_rq;
402 void __iomem *bar2_va;
417 struct t4_srq_pending_wr *pending_wrs;
424 static inline u32 t4_srq_avail(struct t4_srq *srq)
426 return srq->size - 1 - srq->in_use;
429 static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
432 if (++srq->pidx == srq->size)
434 srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
435 if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS)
436 srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS;
437 srq->queue[srq->size].status.host_pidx = srq->pidx;
440 static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
442 srq->pending_in_use++;
444 if (++srq->pending_pidx == srq->size)
445 srq->pending_pidx = 0;
448 static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
450 srq->pending_in_use--;
452 if (++srq->pending_cidx == srq->size)
453 srq->pending_cidx = 0;
456 static inline void t4_srq_produce_ooo(struct t4_srq *srq)
462 static inline void t4_srq_consume_ooo(struct t4_srq *srq)
465 if (srq->cidx == srq->size)
467 srq->queue[srq->size].status.host_cidx = srq->cidx;
471 static inline void t4_srq_consume(struct t4_srq *srq)
474 if (++srq->cidx == srq->size)
476 srq->queue[srq->size].status.host_cidx = srq->cidx;
479 static inline int t4_rqes_posted(struct t4_wq *wq)
481 return wq->rq.in_use;
484 static inline int t4_rq_empty(struct t4_wq *wq)
486 return wq->rq.in_use == 0;
489 static inline int t4_rq_full(struct t4_wq *wq)
491 return wq->rq.in_use == (wq->rq.size - 1);
494 static inline u32 t4_rq_avail(struct t4_wq *wq)
496 return wq->rq.size - 1 - wq->rq.in_use;
499 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
502 if (++wq->rq.pidx == wq->rq.size)
504 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
505 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
506 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
509 static inline void t4_rq_consume(struct t4_wq *wq)
512 if (++wq->rq.cidx == wq->rq.size)
516 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
518 return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
521 static inline u16 t4_rq_wq_size(struct t4_wq *wq)
523 return wq->rq.size * T4_RQ_NUM_SLOTS;
526 static inline int t4_sq_onchip(struct t4_sq *sq)
528 return sq->flags & T4_SQ_ONCHIP;
531 static inline int t4_sq_empty(struct t4_wq *wq)
533 return wq->sq.in_use == 0;
536 static inline int t4_sq_full(struct t4_wq *wq)
538 return wq->sq.in_use == (wq->sq.size - 1);
541 static inline u32 t4_sq_avail(struct t4_wq *wq)
543 return wq->sq.size - 1 - wq->sq.in_use;
546 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
549 if (++wq->sq.pidx == wq->sq.size)
551 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
552 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
553 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
556 static inline void t4_sq_consume(struct t4_wq *wq)
558 if (wq->sq.cidx == wq->sq.flush_cidx)
559 wq->sq.flush_cidx = -1;
561 if (++wq->sq.cidx == wq->sq.size)
565 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
567 return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
570 static inline u16 t4_sq_wq_size(struct t4_wq *wq)
572 return wq->sq.size * T4_SQ_NUM_SLOTS;
575 /* This function copies 64 byte coalesced work request to memory
576 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
577 * from the FIFO instead of from Host.
579 static inline void pio_copy(u64 __iomem *dst, u64 *src)
591 static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
592 union t4_recv_wr *wqe)
594 /* Flush host queue memory writes. */
596 if (inc == 1 && srq->bar2_qid == 0 && wqe) {
597 pr_debug("%s : WC srq->pidx = %d; len16=%d\n",
598 __func__, srq->pidx, len16);
599 pio_copy(srq->bar2_va + SGE_UDB_WCDOORBELL, (u64 *)wqe);
601 pr_debug("%s: DB srq->pidx = %d; len16=%d\n",
602 __func__, srq->pidx, len16);
603 writel(PIDX_T5_V(inc) | QID_V(srq->bar2_qid),
604 srq->bar2_va + SGE_UDB_KDOORBELL);
606 /* Flush user doorbell area writes. */
610 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
613 /* Flush host queue memory writes. */
615 if (wq->sq.bar2_va) {
616 if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
617 pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
618 pio_copy((u64 __iomem *)
619 (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
622 pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
623 writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
624 wq->sq.bar2_va + SGE_UDB_KDOORBELL);
627 /* Flush user doorbell area writes. */
631 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
634 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
635 union t4_recv_wr *wqe)
638 /* Flush host queue memory writes. */
640 if (wq->rq.bar2_va) {
641 if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
642 pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
643 pio_copy((u64 __iomem *)
644 (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
647 pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
648 writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
649 wq->rq.bar2_va + SGE_UDB_KDOORBELL);
652 /* Flush user doorbell area writes. */
656 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
659 static inline int t4_wq_in_error(struct t4_wq *wq)
664 static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
667 *wq->srqidxp = srqidx;
671 static inline void t4_disable_wq_db(struct t4_wq *wq)
673 wq->rq.queue[wq->rq.size].status.db_off = 1;
676 static inline void t4_enable_wq_db(struct t4_wq *wq)
678 wq->rq.queue[wq->rq.size].status.db_off = 0;
681 static inline int t4_wq_db_enabled(struct t4_wq *wq)
683 return !wq->rq.queue[wq->rq.size].status.db_off;
691 struct t4_cqe *queue;
693 DEFINE_DMA_UNMAP_ADDR(mapping);
694 struct t4_cqe *sw_queue;
696 void __iomem *bar2_va;
699 struct c4iw_rdev *rdev;
705 u16 size; /* including status page */
717 static inline void write_gts(struct t4_cq *cq, u32 val)
720 writel(val | INGRESSQID_V(cq->bar2_qid),
721 cq->bar2_va + SGE_UDB_GTS);
723 writel(val | INGRESSQID_V(cq->cqid), cq->gts);
726 static inline int t4_clear_cq_armed(struct t4_cq *cq)
728 return test_and_clear_bit(CQ_ARMED, &cq->flags);
731 static inline int t4_arm_cq(struct t4_cq *cq, int se)
735 set_bit(CQ_ARMED, &cq->flags);
736 while (cq->cidx_inc > CIDXINC_M) {
737 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7);
739 cq->cidx_inc -= CIDXINC_M;
741 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6);
747 static inline void t4_swcq_produce(struct t4_cq *cq)
750 if (cq->sw_in_use == cq->size) {
751 pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
757 if (++cq->sw_pidx == cq->size)
761 static inline void t4_swcq_consume(struct t4_cq *cq)
764 if (++cq->sw_cidx == cq->size)
768 static inline void t4_hwcq_consume(struct t4_cq *cq)
770 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
771 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
774 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7);
778 if (++cq->cidx == cq->size) {
784 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
786 return (CQE_GENBIT(cqe) == cq->gen);
789 static inline int t4_cq_notempty(struct t4_cq *cq)
791 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
794 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
800 prev_cidx = cq->size - 1;
802 prev_cidx = cq->cidx - 1;
804 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
807 pr_err("cq overflow cqid %u\n", cq->cqid);
808 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
810 /* Ensure CQE is flushed to memory */
812 *cqe = &cq->queue[cq->cidx];
819 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
821 if (cq->sw_in_use == cq->size) {
822 pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
828 return &cq->sw_queue[cq->sw_cidx];
832 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
838 else if (cq->sw_in_use)
839 *cqe = &cq->sw_queue[cq->sw_cidx];
841 ret = t4_next_hw_cqe(cq, cqe);
845 static inline int t4_cq_in_error(struct t4_cq *cq)
850 static inline void t4_set_cq_in_error(struct t4_cq *cq)
856 struct t4_dev_status_page {
858 u8 write_cmpl_supported;