2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/math64.h>
38 #include <rdma/ib_verbs.h>
42 #define DRV_VERSION "0.1"
44 MODULE_AUTHOR("Steve Wise");
45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46 MODULE_LICENSE("Dual BSD/GPL");
48 static int allow_db_fc_on_t5;
49 module_param(allow_db_fc_on_t5, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5;
54 module_param(allow_db_coalescing_on_t5, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
59 module_param(c4iw_wr_log, int, 0444);
60 MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
62 static int c4iw_wr_log_size_order = 12;
63 module_param(c4iw_wr_log_size_order, int, 0444);
64 MODULE_PARM_DESC(c4iw_wr_log_size_order,
65 "Number of entries (log2) in the work request timing log.");
68 struct list_head entry;
69 struct cxgb4_lld_info lldi;
73 static LIST_HEAD(uld_ctx_list);
74 static DEFINE_MUTEX(dev_mutex);
76 #define DB_FC_RESUME_SIZE 64
77 #define DB_FC_RESUME_DELAY 1
78 #define DB_FC_DRAIN_THRESH 0
80 static struct dentry *c4iw_debugfs_root;
82 struct c4iw_debugfs_data {
83 struct c4iw_dev *devp;
89 static int count_idrs(int id, void *p, void *data)
93 *countp = *countp + 1;
97 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
100 struct c4iw_debugfs_data *d = file->private_data;
102 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
105 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
107 struct wr_log_entry le;
110 if (!wq->rdev->wr_log)
113 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
114 (wq->rdev->wr_log_size - 1);
115 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
116 getnstimeofday(&le.poll_host_ts);
118 le.cqe_sge_ts = CQE_TS(cqe);
121 le.opcode = CQE_OPCODE(cqe);
122 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
123 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
124 le.wr_id = CQE_WRID_SQ_IDX(cqe);
127 le.opcode = FW_RI_RECEIVE;
128 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
129 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
130 le.wr_id = CQE_WRID_MSN(cqe);
132 wq->rdev->wr_log[idx] = le;
135 static int wr_log_show(struct seq_file *seq, void *v)
137 struct c4iw_dev *dev = seq->private;
138 struct timespec prev_ts = {0, 0};
139 struct wr_log_entry *lep;
143 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
145 idx = atomic_read(&dev->rdev.wr_log_idx) &
146 (dev->rdev.wr_log_size - 1);
149 end = dev->rdev.wr_log_size - 1;
150 lep = &dev->rdev.wr_log[idx];
155 prev_ts = lep->poll_host_ts;
157 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
158 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
159 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
160 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
161 "cqe_poll_delta_ns %llu\n",
163 timespec_sub(lep->poll_host_ts,
165 timespec_sub(lep->poll_host_ts,
167 lep->qid, lep->opcode,
168 lep->opcode == FW_RI_RECEIVE ?
171 timespec_sub(lep->poll_host_ts,
172 lep->post_host_ts).tv_sec,
173 timespec_sub(lep->poll_host_ts,
174 lep->post_host_ts).tv_nsec,
175 lep->post_sge_ts, lep->cqe_sge_ts,
177 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
178 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
179 prev_ts = lep->poll_host_ts;
182 if (idx > (dev->rdev.wr_log_size - 1))
184 lep = &dev->rdev.wr_log[idx];
190 static int wr_log_open(struct inode *inode, struct file *file)
192 return single_open(file, wr_log_show, inode->i_private);
195 static ssize_t wr_log_clear(struct file *file, const char __user *buf,
196 size_t count, loff_t *pos)
198 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
201 if (dev->rdev.wr_log)
202 for (i = 0; i < dev->rdev.wr_log_size; i++)
203 dev->rdev.wr_log[i].valid = 0;
207 static const struct file_operations wr_log_debugfs_fops = {
208 .owner = THIS_MODULE,
210 .release = single_release,
213 .write = wr_log_clear,
216 static struct sockaddr_in zero_sin = {
217 .sin_family = AF_INET,
220 static struct sockaddr_in6 zero_sin6 = {
221 .sin6_family = AF_INET6,
224 static void set_ep_sin_addrs(struct c4iw_ep *ep,
225 struct sockaddr_in **lsin,
226 struct sockaddr_in **rsin,
227 struct sockaddr_in **m_lsin,
228 struct sockaddr_in **m_rsin)
230 struct iw_cm_id *id = ep->com.cm_id;
232 *lsin = (struct sockaddr_in *)&ep->com.local_addr;
233 *rsin = (struct sockaddr_in *)&ep->com.remote_addr;
235 *m_lsin = (struct sockaddr_in *)&id->m_local_addr;
236 *m_rsin = (struct sockaddr_in *)&id->m_remote_addr;
243 static void set_ep_sin6_addrs(struct c4iw_ep *ep,
244 struct sockaddr_in6 **lsin6,
245 struct sockaddr_in6 **rsin6,
246 struct sockaddr_in6 **m_lsin6,
247 struct sockaddr_in6 **m_rsin6)
249 struct iw_cm_id *id = ep->com.cm_id;
251 *lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
252 *rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
254 *m_lsin6 = (struct sockaddr_in6 *)&id->m_local_addr;
255 *m_rsin6 = (struct sockaddr_in6 *)&id->m_remote_addr;
257 *m_lsin6 = &zero_sin6;
258 *m_rsin6 = &zero_sin6;
262 static int dump_qp(int id, void *p, void *data)
264 struct c4iw_qp *qp = p;
265 struct c4iw_debugfs_data *qpd = data;
269 if (id != qp->wq.sq.qid)
272 space = qpd->bufsize - qpd->pos - 1;
277 struct c4iw_ep *ep = qp->ep;
279 if (ep->com.local_addr.ss_family == AF_INET) {
280 struct sockaddr_in *lsin;
281 struct sockaddr_in *rsin;
282 struct sockaddr_in *m_lsin;
283 struct sockaddr_in *m_rsin;
285 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
286 cc = snprintf(qpd->buf + qpd->pos, space,
287 "rc qp sq id %u rq id %u state %u "
288 "onchip %u ep tid %u state %u "
289 "%pI4:%u/%u->%pI4:%u/%u\n",
290 qp->wq.sq.qid, qp->wq.rq.qid,
292 qp->wq.sq.flags & T4_SQ_ONCHIP,
293 ep->hwtid, (int)ep->com.state,
294 &lsin->sin_addr, ntohs(lsin->sin_port),
295 ntohs(m_lsin->sin_port),
296 &rsin->sin_addr, ntohs(rsin->sin_port),
297 ntohs(m_rsin->sin_port));
299 struct sockaddr_in6 *lsin6;
300 struct sockaddr_in6 *rsin6;
301 struct sockaddr_in6 *m_lsin6;
302 struct sockaddr_in6 *m_rsin6;
304 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6,
306 cc = snprintf(qpd->buf + qpd->pos, space,
307 "rc qp sq id %u rq id %u state %u "
308 "onchip %u ep tid %u state %u "
309 "%pI6:%u/%u->%pI6:%u/%u\n",
310 qp->wq.sq.qid, qp->wq.rq.qid,
312 qp->wq.sq.flags & T4_SQ_ONCHIP,
313 ep->hwtid, (int)ep->com.state,
315 ntohs(lsin6->sin6_port),
316 ntohs(m_lsin6->sin6_port),
318 ntohs(rsin6->sin6_port),
319 ntohs(m_rsin6->sin6_port));
322 cc = snprintf(qpd->buf + qpd->pos, space,
323 "qp sq id %u rq id %u state %u onchip %u\n",
324 qp->wq.sq.qid, qp->wq.rq.qid,
326 qp->wq.sq.flags & T4_SQ_ONCHIP);
332 static int qp_release(struct inode *inode, struct file *file)
334 struct c4iw_debugfs_data *qpd = file->private_data;
336 pr_info("%s null qpd?\n", __func__);
344 static int qp_open(struct inode *inode, struct file *file)
346 struct c4iw_debugfs_data *qpd;
349 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
353 qpd->devp = inode->i_private;
356 spin_lock_irq(&qpd->devp->lock);
357 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
358 spin_unlock_irq(&qpd->devp->lock);
360 qpd->bufsize = count * 180;
361 qpd->buf = vmalloc(qpd->bufsize);
367 spin_lock_irq(&qpd->devp->lock);
368 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
369 spin_unlock_irq(&qpd->devp->lock);
371 qpd->buf[qpd->pos++] = 0;
372 file->private_data = qpd;
376 static const struct file_operations qp_debugfs_fops = {
377 .owner = THIS_MODULE,
379 .release = qp_release,
380 .read = debugfs_read,
381 .llseek = default_llseek,
384 static int dump_stag(int id, void *p, void *data)
386 struct c4iw_debugfs_data *stagd = data;
389 struct fw_ri_tpte tpte;
392 space = stagd->bufsize - stagd->pos - 1;
396 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
399 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
400 "%s cxgb4_read_tpte err %d\n", __func__, ret);
403 cc = snprintf(stagd->buf + stagd->pos, space,
404 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
405 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
407 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
408 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
409 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
410 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
411 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
412 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
413 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
414 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
420 static int stag_release(struct inode *inode, struct file *file)
422 struct c4iw_debugfs_data *stagd = file->private_data;
424 pr_info("%s null stagd?\n", __func__);
432 static int stag_open(struct inode *inode, struct file *file)
434 struct c4iw_debugfs_data *stagd;
438 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
443 stagd->devp = inode->i_private;
446 spin_lock_irq(&stagd->devp->lock);
447 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
448 spin_unlock_irq(&stagd->devp->lock);
450 stagd->bufsize = count * 256;
451 stagd->buf = vmalloc(stagd->bufsize);
457 spin_lock_irq(&stagd->devp->lock);
458 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
459 spin_unlock_irq(&stagd->devp->lock);
461 stagd->buf[stagd->pos++] = 0;
462 file->private_data = stagd;
470 static const struct file_operations stag_debugfs_fops = {
471 .owner = THIS_MODULE,
473 .release = stag_release,
474 .read = debugfs_read,
475 .llseek = default_llseek,
478 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
480 static int stats_show(struct seq_file *seq, void *v)
482 struct c4iw_dev *dev = seq->private;
484 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
486 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
487 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
488 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
489 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
490 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
491 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
492 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
493 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
494 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
495 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
496 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
497 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
498 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
499 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
500 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
501 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
502 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
503 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
504 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
505 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
506 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
507 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
508 db_state_str[dev->db_state],
509 dev->rdev.stats.db_state_transitions,
510 dev->rdev.stats.db_fc_interruptions);
511 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
512 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
513 dev->rdev.stats.act_ofld_conn_fails);
514 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
515 dev->rdev.stats.pas_ofld_conn_fails);
516 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
517 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
521 static int stats_open(struct inode *inode, struct file *file)
523 return single_open(file, stats_show, inode->i_private);
526 static ssize_t stats_clear(struct file *file, const char __user *buf,
527 size_t count, loff_t *pos)
529 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
531 mutex_lock(&dev->rdev.stats.lock);
532 dev->rdev.stats.pd.max = 0;
533 dev->rdev.stats.pd.fail = 0;
534 dev->rdev.stats.qid.max = 0;
535 dev->rdev.stats.qid.fail = 0;
536 dev->rdev.stats.stag.max = 0;
537 dev->rdev.stats.stag.fail = 0;
538 dev->rdev.stats.pbl.max = 0;
539 dev->rdev.stats.pbl.fail = 0;
540 dev->rdev.stats.rqt.max = 0;
541 dev->rdev.stats.rqt.fail = 0;
542 dev->rdev.stats.ocqp.max = 0;
543 dev->rdev.stats.ocqp.fail = 0;
544 dev->rdev.stats.db_full = 0;
545 dev->rdev.stats.db_empty = 0;
546 dev->rdev.stats.db_drop = 0;
547 dev->rdev.stats.db_state_transitions = 0;
548 dev->rdev.stats.tcam_full = 0;
549 dev->rdev.stats.act_ofld_conn_fails = 0;
550 dev->rdev.stats.pas_ofld_conn_fails = 0;
551 mutex_unlock(&dev->rdev.stats.lock);
555 static const struct file_operations stats_debugfs_fops = {
556 .owner = THIS_MODULE,
558 .release = single_release,
561 .write = stats_clear,
564 static int dump_ep(int id, void *p, void *data)
566 struct c4iw_ep *ep = p;
567 struct c4iw_debugfs_data *epd = data;
571 space = epd->bufsize - epd->pos - 1;
575 if (ep->com.local_addr.ss_family == AF_INET) {
576 struct sockaddr_in *lsin;
577 struct sockaddr_in *rsin;
578 struct sockaddr_in *m_lsin;
579 struct sockaddr_in *m_rsin;
581 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
582 cc = snprintf(epd->buf + epd->pos, space,
583 "ep %p cm_id %p qp %p state %d flags 0x%lx "
584 "history 0x%lx hwtid %d atid %d "
585 "conn_na %u abort_na %u "
586 "%pI4:%d/%d <-> %pI4:%d/%d\n",
587 ep, ep->com.cm_id, ep->com.qp,
588 (int)ep->com.state, ep->com.flags,
589 ep->com.history, ep->hwtid, ep->atid,
590 ep->stats.connect_neg_adv,
591 ep->stats.abort_neg_adv,
592 &lsin->sin_addr, ntohs(lsin->sin_port),
593 ntohs(m_lsin->sin_port),
594 &rsin->sin_addr, ntohs(rsin->sin_port),
595 ntohs(m_rsin->sin_port));
597 struct sockaddr_in6 *lsin6;
598 struct sockaddr_in6 *rsin6;
599 struct sockaddr_in6 *m_lsin6;
600 struct sockaddr_in6 *m_rsin6;
602 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6);
603 cc = snprintf(epd->buf + epd->pos, space,
604 "ep %p cm_id %p qp %p state %d flags 0x%lx "
605 "history 0x%lx hwtid %d atid %d "
606 "conn_na %u abort_na %u "
607 "%pI6:%d/%d <-> %pI6:%d/%d\n",
608 ep, ep->com.cm_id, ep->com.qp,
609 (int)ep->com.state, ep->com.flags,
610 ep->com.history, ep->hwtid, ep->atid,
611 ep->stats.connect_neg_adv,
612 ep->stats.abort_neg_adv,
613 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
614 ntohs(m_lsin6->sin6_port),
615 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
616 ntohs(m_rsin6->sin6_port));
623 static int dump_listen_ep(int id, void *p, void *data)
625 struct c4iw_listen_ep *ep = p;
626 struct c4iw_debugfs_data *epd = data;
630 space = epd->bufsize - epd->pos - 1;
634 if (ep->com.local_addr.ss_family == AF_INET) {
635 struct sockaddr_in *lsin = (struct sockaddr_in *)
636 &ep->com.cm_id->local_addr;
637 struct sockaddr_in *m_lsin = (struct sockaddr_in *)
638 &ep->com.cm_id->m_local_addr;
640 cc = snprintf(epd->buf + epd->pos, space,
641 "ep %p cm_id %p state %d flags 0x%lx stid %d "
642 "backlog %d %pI4:%d/%d\n",
643 ep, ep->com.cm_id, (int)ep->com.state,
644 ep->com.flags, ep->stid, ep->backlog,
645 &lsin->sin_addr, ntohs(lsin->sin_port),
646 ntohs(m_lsin->sin_port));
648 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
649 &ep->com.cm_id->local_addr;
650 struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *)
651 &ep->com.cm_id->m_local_addr;
653 cc = snprintf(epd->buf + epd->pos, space,
654 "ep %p cm_id %p state %d flags 0x%lx stid %d "
655 "backlog %d %pI6:%d/%d\n",
656 ep, ep->com.cm_id, (int)ep->com.state,
657 ep->com.flags, ep->stid, ep->backlog,
658 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
659 ntohs(m_lsin6->sin6_port));
666 static int ep_release(struct inode *inode, struct file *file)
668 struct c4iw_debugfs_data *epd = file->private_data;
670 pr_info("%s null qpd?\n", __func__);
678 static int ep_open(struct inode *inode, struct file *file)
680 struct c4iw_debugfs_data *epd;
684 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
689 epd->devp = inode->i_private;
692 spin_lock_irq(&epd->devp->lock);
693 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
694 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
695 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
696 spin_unlock_irq(&epd->devp->lock);
698 epd->bufsize = count * 240;
699 epd->buf = vmalloc(epd->bufsize);
705 spin_lock_irq(&epd->devp->lock);
706 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
707 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
708 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
709 spin_unlock_irq(&epd->devp->lock);
711 file->private_data = epd;
719 static const struct file_operations ep_debugfs_fops = {
720 .owner = THIS_MODULE,
722 .release = ep_release,
723 .read = debugfs_read,
726 static int setup_debugfs(struct c4iw_dev *devp)
728 if (!devp->debugfs_root)
731 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
732 (void *)devp, &qp_debugfs_fops, 4096);
734 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
735 (void *)devp, &stag_debugfs_fops, 4096);
737 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
738 (void *)devp, &stats_debugfs_fops, 4096);
740 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
741 (void *)devp, &ep_debugfs_fops, 4096);
744 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
745 (void *)devp, &wr_log_debugfs_fops, 4096);
749 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
750 struct c4iw_dev_ucontext *uctx)
752 struct list_head *pos, *nxt;
753 struct c4iw_qid_list *entry;
755 mutex_lock(&uctx->lock);
756 list_for_each_safe(pos, nxt, &uctx->qpids) {
757 entry = list_entry(pos, struct c4iw_qid_list, entry);
758 list_del_init(&entry->entry);
759 if (!(entry->qid & rdev->qpmask)) {
760 c4iw_put_resource(&rdev->resource.qid_table,
762 mutex_lock(&rdev->stats.lock);
763 rdev->stats.qid.cur -= rdev->qpmask + 1;
764 mutex_unlock(&rdev->stats.lock);
769 list_for_each_safe(pos, nxt, &uctx->cqids) {
770 entry = list_entry(pos, struct c4iw_qid_list, entry);
771 list_del_init(&entry->entry);
774 mutex_unlock(&uctx->lock);
777 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
778 struct c4iw_dev_ucontext *uctx)
780 INIT_LIST_HEAD(&uctx->qpids);
781 INIT_LIST_HEAD(&uctx->cqids);
782 mutex_init(&uctx->lock);
785 /* Caller takes care of locking if needed */
786 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
790 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
793 * This implementation assumes udb_density == ucq_density! Eventually
794 * we might need to support this but for now fail the open. Also the
795 * cqid and qpid range must match for now.
797 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
798 pr_err("%s: unsupported udb/ucq densities %u/%u\n",
799 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
800 rdev->lldi.ucq_density);
803 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
804 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
805 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
806 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
807 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
808 rdev->lldi.vr->cq.size);
812 rdev->qpmask = rdev->lldi.udb_density - 1;
813 rdev->cqmask = rdev->lldi.ucq_density - 1;
814 pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
815 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
816 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
817 rdev->lldi.vr->pbl.start,
818 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
819 rdev->lldi.vr->rq.size,
820 rdev->lldi.vr->qp.start,
821 rdev->lldi.vr->qp.size,
822 rdev->lldi.vr->cq.start,
823 rdev->lldi.vr->cq.size);
824 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
825 &rdev->lldi.pdev->resource[2],
826 rdev->lldi.db_reg, rdev->lldi.gts_reg,
827 rdev->qpmask, rdev->cqmask);
829 if (c4iw_num_stags(rdev) == 0)
832 rdev->stats.pd.total = T4_MAX_NUM_PD;
833 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
834 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
835 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
836 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
837 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
839 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
841 pr_err("error %d initializing resources\n", err);
844 err = c4iw_pblpool_create(rdev);
846 pr_err("error %d initializing pbl pool\n", err);
847 goto destroy_resource;
849 err = c4iw_rqtpool_create(rdev);
851 pr_err("error %d initializing rqt pool\n", err);
852 goto destroy_pblpool;
854 err = c4iw_ocqp_pool_create(rdev);
856 pr_err("error %d initializing ocqp pool\n", err);
857 goto destroy_rqtpool;
859 rdev->status_page = (struct t4_dev_status_page *)
860 __get_free_page(GFP_KERNEL);
861 if (!rdev->status_page) {
863 goto destroy_ocqp_pool;
865 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
866 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
867 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
868 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
871 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
872 sizeof(*rdev->wr_log), GFP_KERNEL);
874 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
875 atomic_set(&rdev->wr_log_idx, 0);
879 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
880 if (!rdev->free_workq) {
882 goto err_free_status_page_and_wr_log;
885 rdev->status_page->db_off = 0;
887 init_completion(&rdev->rqt_compl);
888 init_completion(&rdev->pbl_compl);
889 kref_init(&rdev->rqt_kref);
890 kref_init(&rdev->pbl_kref);
893 err_free_status_page_and_wr_log:
894 if (c4iw_wr_log && rdev->wr_log)
896 free_page((unsigned long)rdev->status_page);
898 c4iw_ocqp_pool_destroy(rdev);
900 c4iw_rqtpool_destroy(rdev);
902 c4iw_pblpool_destroy(rdev);
904 c4iw_destroy_resource(&rdev->resource);
908 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
911 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
912 free_page((unsigned long)rdev->status_page);
913 c4iw_pblpool_destroy(rdev);
914 c4iw_rqtpool_destroy(rdev);
915 wait_for_completion(&rdev->pbl_compl);
916 wait_for_completion(&rdev->rqt_compl);
917 c4iw_ocqp_pool_destroy(rdev);
918 destroy_workqueue(rdev->free_workq);
919 c4iw_destroy_resource(&rdev->resource);
922 static void c4iw_dealloc(struct uld_ctx *ctx)
924 c4iw_rdev_close(&ctx->dev->rdev);
925 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
926 idr_destroy(&ctx->dev->cqidr);
927 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
928 idr_destroy(&ctx->dev->qpidr);
929 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
930 idr_destroy(&ctx->dev->mmidr);
931 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
932 idr_destroy(&ctx->dev->hwtid_idr);
933 idr_destroy(&ctx->dev->stid_idr);
934 idr_destroy(&ctx->dev->atid_idr);
935 if (ctx->dev->rdev.bar2_kva)
936 iounmap(ctx->dev->rdev.bar2_kva);
937 if (ctx->dev->rdev.oc_mw_kva)
938 iounmap(ctx->dev->rdev.oc_mw_kva);
939 ib_dealloc_device(&ctx->dev->ibdev);
943 static void c4iw_remove(struct uld_ctx *ctx)
945 pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev);
946 c4iw_unregister_device(ctx->dev);
950 static int rdma_supported(const struct cxgb4_lld_info *infop)
952 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
953 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
954 infop->vr->cq.size > 0;
957 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
959 struct c4iw_dev *devp;
962 if (!rdma_supported(infop)) {
963 pr_info("%s: RDMA not supported on this device\n",
964 pci_name(infop->pdev));
965 return ERR_PTR(-ENOSYS);
967 if (!ocqp_supported(infop))
968 pr_info("%s: On-Chip Queues not supported on this device\n",
969 pci_name(infop->pdev));
971 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
973 pr_err("Cannot allocate ib device\n");
974 return ERR_PTR(-ENOMEM);
976 devp->rdev.lldi = *infop;
978 /* init various hw-queue params based on lld info */
979 pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
980 __func__, devp->rdev.lldi.sge_ingpadboundary,
981 devp->rdev.lldi.sge_egrstatuspagesize);
983 devp->rdev.hw_queue.t4_eq_status_entries =
984 devp->rdev.lldi.sge_egrstatuspagesize / 64;
985 devp->rdev.hw_queue.t4_max_eq_size = 65520;
986 devp->rdev.hw_queue.t4_max_iq_size = 65520;
987 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
988 devp->rdev.hw_queue.t4_eq_status_entries - 1;
989 devp->rdev.hw_queue.t4_max_sq_size =
990 devp->rdev.hw_queue.t4_max_eq_size -
991 devp->rdev.hw_queue.t4_eq_status_entries - 1;
992 devp->rdev.hw_queue.t4_max_qp_depth =
993 devp->rdev.hw_queue.t4_max_rq_size;
994 devp->rdev.hw_queue.t4_max_cq_depth =
995 devp->rdev.hw_queue.t4_max_iq_size - 2;
996 devp->rdev.hw_queue.t4_stat_len =
997 devp->rdev.lldi.sge_egrstatuspagesize;
1000 * For T5/T6 devices, we map all of BAR2 with WC.
1001 * For T4 devices with onchip qp mem, we map only that part
1004 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
1005 if (!is_t4(devp->rdev.lldi.adapter_type)) {
1006 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
1007 pci_resource_len(devp->rdev.lldi.pdev, 2));
1008 if (!devp->rdev.bar2_kva) {
1009 pr_err("Unable to ioremap BAR2\n");
1010 ib_dealloc_device(&devp->ibdev);
1011 return ERR_PTR(-EINVAL);
1013 } else if (ocqp_supported(infop)) {
1014 devp->rdev.oc_mw_pa =
1015 pci_resource_start(devp->rdev.lldi.pdev, 2) +
1016 pci_resource_len(devp->rdev.lldi.pdev, 2) -
1017 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
1018 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
1019 devp->rdev.lldi.vr->ocq.size);
1020 if (!devp->rdev.oc_mw_kva) {
1021 pr_err("Unable to ioremap onchip mem\n");
1022 ib_dealloc_device(&devp->ibdev);
1023 return ERR_PTR(-EINVAL);
1027 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
1028 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
1029 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
1031 ret = c4iw_rdev_open(&devp->rdev);
1033 pr_err("Unable to open CXIO rdev err %d\n", ret);
1034 ib_dealloc_device(&devp->ibdev);
1035 return ERR_PTR(ret);
1038 idr_init(&devp->cqidr);
1039 idr_init(&devp->qpidr);
1040 idr_init(&devp->mmidr);
1041 idr_init(&devp->hwtid_idr);
1042 idr_init(&devp->stid_idr);
1043 idr_init(&devp->atid_idr);
1044 spin_lock_init(&devp->lock);
1045 mutex_init(&devp->rdev.stats.lock);
1046 mutex_init(&devp->db_mutex);
1047 INIT_LIST_HEAD(&devp->db_fc_list);
1048 init_waitqueue_head(&devp->wait);
1049 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
1051 if (c4iw_debugfs_root) {
1052 devp->debugfs_root = debugfs_create_dir(
1053 pci_name(devp->rdev.lldi.pdev),
1055 setup_debugfs(devp);
1062 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1064 struct uld_ctx *ctx;
1065 static int vers_printed;
1068 if (!vers_printed++)
1069 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1072 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1074 ctx = ERR_PTR(-ENOMEM);
1079 pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1080 __func__, pci_name(ctx->lldi.pdev),
1081 ctx->lldi.nchan, ctx->lldi.nrxq,
1082 ctx->lldi.ntxq, ctx->lldi.nports);
1084 mutex_lock(&dev_mutex);
1085 list_add_tail(&ctx->entry, &uld_ctx_list);
1086 mutex_unlock(&dev_mutex);
1088 for (i = 0; i < ctx->lldi.nrxq; i++)
1089 pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
1094 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1098 struct sk_buff *skb;
1101 * Allocate space for cpl_pass_accept_req which will be synthesized by
1102 * driver. Once the driver synthesizes the request the skb will go
1103 * through the regular cpl_pass_accept_req processing.
1104 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1107 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1108 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1112 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1113 sizeof(struct rss_header) - pktshift);
1116 * This skb will contain:
1117 * rss_header from the rspq descriptor (1 flit)
1118 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1119 * space for the difference between the size of an
1120 * rx_pkt and pass_accept_req cpl (1 flit)
1121 * the packet data from the gl
1123 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1124 sizeof(struct rss_header));
1125 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1126 sizeof(struct cpl_pass_accept_req),
1128 gl->tot_len - pktshift);
1132 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1135 unsigned int opcode = *(u8 *)rsp;
1136 struct sk_buff *skb;
1138 if (opcode != CPL_RX_PKT)
1141 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1145 if (c4iw_handlers[opcode] == NULL) {
1146 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
1150 c4iw_handlers[opcode](dev, skb);
1156 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1157 const struct pkt_gl *gl)
1159 struct uld_ctx *ctx = handle;
1160 struct c4iw_dev *dev = ctx->dev;
1161 struct sk_buff *skb;
1165 /* omit RSS and rsp_ctrl at end of descriptor */
1166 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1168 skb = alloc_skb(256, GFP_ATOMIC);
1171 __skb_put(skb, len);
1172 skb_copy_to_linear_data(skb, &rsp[1], len);
1173 } else if (gl == CXGB4_MSG_AN) {
1174 const struct rsp_ctrl *rc = (void *)rsp;
1176 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1177 c4iw_ev_handler(dev, qid);
1179 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1180 if (recv_rx_pkt(dev, gl, rsp))
1183 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
1184 pci_name(ctx->lldi.pdev), gl->va,
1186 be64_to_cpu(*(__force __be64 *)gl->va),
1191 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
1196 opcode = *(u8 *)rsp;
1197 if (c4iw_handlers[opcode]) {
1198 c4iw_handlers[opcode](dev, skb);
1200 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
1209 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1211 struct uld_ctx *ctx = handle;
1213 pr_debug("%s new_state %u\n", __func__, new_state);
1214 switch (new_state) {
1215 case CXGB4_STATE_UP:
1216 pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
1220 ctx->dev = c4iw_alloc(&ctx->lldi);
1221 if (IS_ERR(ctx->dev)) {
1222 pr_err("%s: initialization failed: %ld\n",
1223 pci_name(ctx->lldi.pdev),
1228 ret = c4iw_register_device(ctx->dev);
1230 pr_err("%s: RDMA registration failed: %d\n",
1231 pci_name(ctx->lldi.pdev), ret);
1236 case CXGB4_STATE_DOWN:
1237 pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
1241 case CXGB4_STATE_START_RECOVERY:
1242 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
1244 struct ib_event event;
1246 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1247 memset(&event, 0, sizeof event);
1248 event.event = IB_EVENT_DEVICE_FATAL;
1249 event.device = &ctx->dev->ibdev;
1250 ib_dispatch_event(&event);
1254 case CXGB4_STATE_DETACH:
1255 pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
1263 static int disable_qp_db(int id, void *p, void *data)
1265 struct c4iw_qp *qp = p;
1267 t4_disable_wq_db(&qp->wq);
1271 static void stop_queues(struct uld_ctx *ctx)
1273 unsigned long flags;
1275 spin_lock_irqsave(&ctx->dev->lock, flags);
1276 ctx->dev->rdev.stats.db_state_transitions++;
1277 ctx->dev->db_state = STOPPED;
1278 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
1279 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
1281 ctx->dev->rdev.status_page->db_off = 1;
1282 spin_unlock_irqrestore(&ctx->dev->lock, flags);
1285 static int enable_qp_db(int id, void *p, void *data)
1287 struct c4iw_qp *qp = p;
1289 t4_enable_wq_db(&qp->wq);
1293 static void resume_rc_qp(struct c4iw_qp *qp)
1295 spin_lock(&qp->lock);
1296 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
1297 qp->wq.sq.wq_pidx_inc = 0;
1298 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
1299 qp->wq.rq.wq_pidx_inc = 0;
1300 spin_unlock(&qp->lock);
1303 static void resume_a_chunk(struct uld_ctx *ctx)
1308 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1309 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1311 list_del_init(&qp->db_fc_entry);
1313 if (list_empty(&ctx->dev->db_fc_list))
1318 static void resume_queues(struct uld_ctx *ctx)
1320 spin_lock_irq(&ctx->dev->lock);
1321 if (ctx->dev->db_state != STOPPED)
1323 ctx->dev->db_state = FLOW_CONTROL;
1325 if (list_empty(&ctx->dev->db_fc_list)) {
1326 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1327 ctx->dev->db_state = NORMAL;
1328 ctx->dev->rdev.stats.db_state_transitions++;
1329 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1330 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1333 ctx->dev->rdev.status_page->db_off = 0;
1337 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1338 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1339 DB_FC_DRAIN_THRESH)) {
1340 resume_a_chunk(ctx);
1342 if (!list_empty(&ctx->dev->db_fc_list)) {
1343 spin_unlock_irq(&ctx->dev->lock);
1344 if (DB_FC_RESUME_DELAY) {
1345 set_current_state(TASK_UNINTERRUPTIBLE);
1346 schedule_timeout(DB_FC_RESUME_DELAY);
1348 spin_lock_irq(&ctx->dev->lock);
1349 if (ctx->dev->db_state != FLOW_CONTROL)
1355 if (ctx->dev->db_state != NORMAL)
1356 ctx->dev->rdev.stats.db_fc_interruptions++;
1357 spin_unlock_irq(&ctx->dev->lock);
1362 struct c4iw_qp **qps;
1365 static int add_and_ref_qp(int id, void *p, void *data)
1367 struct qp_list *qp_listp = data;
1368 struct c4iw_qp *qp = p;
1370 c4iw_qp_add_ref(&qp->ibqp);
1371 qp_listp->qps[qp_listp->idx++] = qp;
1375 static int count_qps(int id, void *p, void *data)
1377 unsigned *countp = data;
1382 static void deref_qps(struct qp_list *qp_list)
1386 for (idx = 0; idx < qp_list->idx; idx++)
1387 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1390 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1395 for (idx = 0; idx < qp_list->idx; idx++) {
1396 struct c4iw_qp *qp = qp_list->qps[idx];
1398 spin_lock_irq(&qp->rhp->lock);
1399 spin_lock(&qp->lock);
1400 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1402 t4_sq_host_wq_pidx(&qp->wq),
1403 t4_sq_wq_size(&qp->wq));
1405 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
1406 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1407 spin_unlock(&qp->lock);
1408 spin_unlock_irq(&qp->rhp->lock);
1411 qp->wq.sq.wq_pidx_inc = 0;
1413 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1415 t4_rq_host_wq_pidx(&qp->wq),
1416 t4_rq_wq_size(&qp->wq));
1419 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
1420 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1421 spin_unlock(&qp->lock);
1422 spin_unlock_irq(&qp->rhp->lock);
1425 qp->wq.rq.wq_pidx_inc = 0;
1426 spin_unlock(&qp->lock);
1427 spin_unlock_irq(&qp->rhp->lock);
1429 /* Wait for the dbfifo to drain */
1430 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1431 set_current_state(TASK_UNINTERRUPTIBLE);
1432 schedule_timeout(usecs_to_jiffies(10));
1437 static void recover_queues(struct uld_ctx *ctx)
1440 struct qp_list qp_list;
1443 /* slow everybody down */
1444 set_current_state(TASK_UNINTERRUPTIBLE);
1445 schedule_timeout(usecs_to_jiffies(1000));
1447 /* flush the SGE contexts */
1448 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1450 pr_err("%s: Fatal error - DB overflow recovery failed\n",
1451 pci_name(ctx->lldi.pdev));
1455 /* Count active queues so we can build a list of queues to recover */
1456 spin_lock_irq(&ctx->dev->lock);
1457 WARN_ON(ctx->dev->db_state != STOPPED);
1458 ctx->dev->db_state = RECOVERY;
1459 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1461 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1463 spin_unlock_irq(&ctx->dev->lock);
1468 /* add and ref each qp so it doesn't get freed */
1469 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1471 spin_unlock_irq(&ctx->dev->lock);
1473 /* now traverse the list in a safe context to recover the db state*/
1474 recover_lost_dbs(ctx, &qp_list);
1476 /* we're almost done! deref the qps and clean up */
1477 deref_qps(&qp_list);
1480 spin_lock_irq(&ctx->dev->lock);
1481 WARN_ON(ctx->dev->db_state != RECOVERY);
1482 ctx->dev->db_state = STOPPED;
1483 spin_unlock_irq(&ctx->dev->lock);
1486 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1488 struct uld_ctx *ctx = handle;
1491 case CXGB4_CONTROL_DB_FULL:
1493 ctx->dev->rdev.stats.db_full++;
1495 case CXGB4_CONTROL_DB_EMPTY:
1497 mutex_lock(&ctx->dev->rdev.stats.lock);
1498 ctx->dev->rdev.stats.db_empty++;
1499 mutex_unlock(&ctx->dev->rdev.stats.lock);
1501 case CXGB4_CONTROL_DB_DROP:
1502 recover_queues(ctx);
1503 mutex_lock(&ctx->dev->rdev.stats.lock);
1504 ctx->dev->rdev.stats.db_drop++;
1505 mutex_unlock(&ctx->dev->rdev.stats.lock);
1508 pr_warn("%s: unknown control cmd %u\n",
1509 pci_name(ctx->lldi.pdev), control);
1515 static struct cxgb4_uld_info c4iw_uld_info = {
1517 .nrxq = MAX_ULD_QSETS,
1518 .ntxq = MAX_ULD_QSETS,
1522 .add = c4iw_uld_add,
1523 .rx_handler = c4iw_uld_rx_handler,
1524 .state_change = c4iw_uld_state_change,
1525 .control = c4iw_uld_control,
1528 static int __init c4iw_init_module(void)
1532 err = c4iw_cm_init();
1536 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1537 if (!c4iw_debugfs_root)
1538 pr_warn("could not create debugfs entry, continuing\n");
1540 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1545 static void __exit c4iw_exit_module(void)
1547 struct uld_ctx *ctx, *tmp;
1549 mutex_lock(&dev_mutex);
1550 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1555 mutex_unlock(&dev_mutex);
1556 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1558 debugfs_remove_recursive(c4iw_debugfs_root);
1561 module_init(c4iw_init_module);
1562 module_exit(c4iw_exit_module);