2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 static int get_msix_idx_from_bmap(struct adapter *adap)
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
60 unsigned int msix_idx;
62 spin_lock_irqsave(&bmap->lock, flags);
63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64 if (msix_idx < bmap->mapsize) {
65 __set_bit(msix_idx, bmap->msix_bmap);
67 spin_unlock_irqrestore(&bmap->lock, flags);
71 spin_unlock_irqrestore(&bmap->lock, flags);
75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags);
85 /* Flush the aggregated lro sessions */
86 static void uldrx_flush_handler(struct sge_rspq *q)
88 struct adapter *adap = q->adap;
90 if (adap->uld[q->uld].lro_flush)
91 adap->uld[q->uld].lro_flush(&q->lro_mgr);
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104 const struct pkt_gl *gl)
106 struct adapter *adap = q->adap;
107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
115 if (q->flush_handler)
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117 rsp, gl, &q->lro_mgr,
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
130 else if (gl == CXGB4_MSG_AN)
137 static int alloc_uld_rxqs(struct adapter *adap,
138 struct sge_uld_rxq_info *rxq_info, bool lro)
140 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
141 int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143 unsigned short *ids = rxq_info->rspq_id;
144 struct sge *s = &adap->sge;
145 unsigned int per_chan;
147 per_chan = rxq_info->nrxq / adap->params.nports;
149 if (adap->flags & USING_MSIX)
152 msi_idx = -((int)s->intrq.abs_id + 1);
154 for (i = 0; i < nq; i++, q++) {
155 if (i == rxq_info->nrxq) {
156 /* start allocation of concentrator queues */
157 per_chan = rxq_info->nciq / adap->params.nports;
162 bmap_idx = get_msix_idx_from_bmap(adap);
167 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
169 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
170 adap->port[que_idx++ / per_chan],
172 q->fl.size ? &q->fl : NULL,
174 lro ? uldrx_flush_handler : NULL,
179 rxq_info->msix_tbl[i] = bmap_idx;
180 memset(&q->stats, 0, sizeof(q->stats));
182 ids[i] = q->rspq.abs_id;
186 q = rxq_info->uldrxq;
187 for ( ; i; i--, q++) {
189 free_rspq_fl(adap, &q->rspq,
190 q->fl.size ? &q->fl : NULL);
196 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
198 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
201 if (adap->flags & USING_MSIX) {
202 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
203 sizeof(unsigned short),
205 if (!rxq_info->msix_tbl)
209 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
211 /* Tell uP to route control queue completions to rdma rspq */
212 if (adap->flags & FULL_INIT_DONE &&
213 !ret && uld_type == CXGB4_ULD_RDMA) {
214 struct sge *s = &adap->sge;
215 unsigned int cmplqid;
218 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
219 for_each_port(adap, i) {
220 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
221 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
222 FW_PARAMS_PARAM_X_V(cmdop) |
223 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
224 ret = t4_set_params(adap, adap->mbox, adap->pf,
225 0, 1, ¶m, &cmplqid);
231 static void t4_free_uld_rxqs(struct adapter *adap, int n,
232 struct sge_ofld_rxq *q)
234 for ( ; n; n--, q++) {
236 free_rspq_fl(adap, &q->rspq,
237 q->fl.size ? &q->fl : NULL);
241 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
243 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
245 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
246 struct sge *s = &adap->sge;
247 u32 param, cmdop, cmplqid = 0;
250 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
251 for_each_port(adap, i) {
252 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
253 FW_PARAMS_PARAM_X_V(cmdop) |
254 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
255 t4_set_params(adap, adap->mbox, adap->pf,
256 0, 1, ¶m, &cmplqid);
261 t4_free_uld_rxqs(adap, rxq_info->nciq,
262 rxq_info->uldrxq + rxq_info->nrxq);
263 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
264 if (adap->flags & USING_MSIX)
265 kfree(rxq_info->msix_tbl);
268 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
269 const struct cxgb4_uld_info *uld_info)
271 struct sge *s = &adap->sge;
272 struct sge_uld_rxq_info *rxq_info;
273 int i, nrxq, ciq_size;
275 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
279 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
281 rxq_info->nrxq = roundup(i, adap->params.nports);
283 i = min_t(int, uld_info->nrxq,
285 rxq_info->nrxq = roundup(i, adap->params.nports);
287 if (!uld_info->ciq) {
290 if (adap->flags & USING_MSIX)
291 rxq_info->nciq = min_t(int, s->nqs_per_uld,
294 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
296 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
297 adap->params.nports);
298 rxq_info->nciq = max_t(int, rxq_info->nciq,
299 adap->params.nports);
302 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
303 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
305 if (!rxq_info->uldrxq) {
310 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
311 if (!rxq_info->rspq_id) {
312 kfree(rxq_info->uldrxq);
317 for (i = 0; i < rxq_info->nrxq; i++) {
318 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
320 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
321 r->rspq.uld = uld_type;
325 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
326 if (ciq_size > SGE_MAX_IQ_SIZE) {
327 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
328 ciq_size = SGE_MAX_IQ_SIZE;
331 for (i = rxq_info->nrxq; i < nrxq; i++) {
332 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
334 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
335 r->rspq.uld = uld_type;
338 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
339 adap->sge.uld_rxq_info[uld_type] = rxq_info;
344 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
346 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
348 adap->sge.uld_rxq_info[uld_type] = NULL;
349 kfree(rxq_info->rspq_id);
350 kfree(rxq_info->uldrxq);
355 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
357 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
359 unsigned int idx, bmap_idx;
361 for_each_uldrxq(rxq_info, idx) {
362 bmap_idx = rxq_info->msix_tbl[idx];
363 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
365 adap->msix_info_ulds[bmap_idx].desc,
366 &rxq_info->uldrxq[idx].rspq);
373 bmap_idx = rxq_info->msix_tbl[idx];
374 free_msix_idx_in_bmap(adap, bmap_idx);
375 free_irq(adap->msix_info_ulds[bmap_idx].vec,
376 &rxq_info->uldrxq[idx].rspq);
382 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
384 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
385 unsigned int idx, bmap_idx;
387 for_each_uldrxq(rxq_info, idx) {
388 bmap_idx = rxq_info->msix_tbl[idx];
390 free_msix_idx_in_bmap(adap, bmap_idx);
391 free_irq(adap->msix_info_ulds[bmap_idx].vec,
392 &rxq_info->uldrxq[idx].rspq);
396 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
398 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
399 int n = sizeof(adap->msix_info_ulds[0].desc);
400 unsigned int idx, bmap_idx;
402 for_each_uldrxq(rxq_info, idx) {
403 bmap_idx = rxq_info->msix_tbl[idx];
405 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
406 adap->port[0]->name, rxq_info->name, idx);
410 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
416 napi_enable(&q->napi);
418 /* 0-increment GTS to start the timer and enable interrupts */
419 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
420 SEINTARM_V(q->intr_params) |
421 INGRESSQID_V(q->cntxt_id));
424 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
427 napi_disable(&q->napi);
430 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
432 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
435 for_each_uldrxq(rxq_info, idx)
436 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
439 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
441 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
444 for_each_uldrxq(rxq_info, idx)
445 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
449 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
451 int nq = txq_info->ntxq;
454 for (i = 0; i < nq; i++) {
455 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
457 if (txq && txq->q.desc) {
458 tasklet_kill(&txq->qresume_tsk);
459 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
461 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
463 __skb_queue_purge(&txq->sendq);
464 free_txq(adap, &txq->q);
470 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
471 unsigned int uld_type)
473 struct sge *s = &adap->sge;
474 int nq = txq_info->ntxq;
477 j = nq / adap->params.nports;
478 for (i = 0; i < nq; i++) {
479 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
482 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
483 s->fw_evtq.cntxt_id, uld_type);
489 free_sge_txq_uld(adap, txq_info);
494 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
496 struct sge_uld_txq_info *txq_info = NULL;
497 int tx_uld_type = TX_ULD(uld_type);
499 txq_info = adap->sge.uld_txq_info[tx_uld_type];
501 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
502 free_sge_txq_uld(adap, txq_info);
503 kfree(txq_info->uldtxq);
505 adap->sge.uld_txq_info[tx_uld_type] = NULL;
510 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
511 const struct cxgb4_uld_info *uld_info)
513 struct sge_uld_txq_info *txq_info = NULL;
516 tx_uld_type = TX_ULD(uld_type);
517 txq_info = adap->sge.uld_txq_info[tx_uld_type];
519 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
520 (atomic_inc_return(&txq_info->users) > 1))
523 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
527 i = min_t(int, uld_info->ntxq, num_online_cpus());
528 txq_info->ntxq = roundup(i, adap->params.nports);
530 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
532 if (!txq_info->uldtxq) {
537 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
538 kfree(txq_info->uldtxq);
543 atomic_inc(&txq_info->users);
544 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
548 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
549 struct cxgb4_lld_info *lli)
551 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
553 lli->rxq_ids = rxq_info->rspq_id;
554 lli->nrxq = rxq_info->nrxq;
555 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
556 lli->nciq = rxq_info->nciq;
559 int t4_uld_mem_alloc(struct adapter *adap)
561 struct sge *s = &adap->sge;
563 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
567 s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
568 sizeof(struct sge_uld_rxq_info *),
570 if (!s->uld_rxq_info)
573 s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
574 sizeof(struct sge_uld_txq_info *),
576 if (!s->uld_txq_info)
581 kfree(s->uld_rxq_info);
587 void t4_uld_mem_free(struct adapter *adap)
589 struct sge *s = &adap->sge;
591 kfree(s->uld_txq_info);
592 kfree(s->uld_rxq_info);
596 /* This function should be called with uld_mutex taken. */
597 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
599 if (adap->uld[type].handle) {
600 adap->uld[type].handle = NULL;
601 adap->uld[type].add = NULL;
602 release_sge_txq_uld(adap, type);
604 if (adap->flags & FULL_INIT_DONE)
605 quiesce_rx_uld(adap, type);
607 if (adap->flags & USING_MSIX)
608 free_msix_queue_irqs_uld(adap, type);
610 free_sge_queues_uld(adap, type);
611 free_queues_uld(adap, type);
615 void t4_uld_clean_up(struct adapter *adap)
619 mutex_lock(&uld_mutex);
620 for (i = 0; i < CXGB4_ULD_MAX; i++) {
621 if (!adap->uld[i].handle)
624 cxgb4_shutdown_uld_adapter(adap, i);
626 mutex_unlock(&uld_mutex);
629 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
633 lld->pdev = adap->pdev;
635 lld->l2t = adap->l2t;
636 lld->tids = &adap->tids;
637 lld->ports = adap->port;
638 lld->vr = &adap->vres;
639 lld->mtus = adap->params.mtus;
640 lld->ntxq = adap->sge.ofldqsets;
641 lld->nchan = adap->params.nports;
642 lld->nports = adap->params.nports;
643 lld->wr_cred = adap->params.ofldq_wr_cred;
644 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
645 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
646 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
647 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
648 lld->iscsi_ppm = &adap->iscsi_ppm;
649 lld->adapter_type = adap->params.chip;
650 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
651 lld->udb_density = 1 << adap->params.sge.eq_qpp;
652 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
653 lld->filt_mode = adap->params.tp.vlan_pri_map;
654 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
655 for (i = 0; i < NCHAN; i++)
657 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
658 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
659 lld->fw_vers = adap->params.fw_vers;
660 lld->dbfifo_int_thresh = dbfifo_int_thresh;
661 lld->sge_ingpadboundary = adap->sge.fl_align;
662 lld->sge_egrstatuspagesize = adap->sge.stat_len;
663 lld->sge_pktshift = adap->sge.pktshift;
664 lld->ulp_crypto = adap->params.crypto;
665 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
666 lld->max_ordird_qp = adap->params.max_ordird_qp;
667 lld->max_ird_adapter = adap->params.max_ird_adapter;
668 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
669 lld->nodeid = dev_to_node(adap->pdev_dev);
670 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
673 static int uld_attach(struct adapter *adap, unsigned int uld)
675 struct cxgb4_lld_info lli;
678 uld_init(adap, &lli);
679 uld_queue_init(adap, uld, &lli);
681 handle = adap->uld[uld].add(&lli);
682 if (IS_ERR(handle)) {
683 dev_warn(adap->pdev_dev,
684 "could not attach to the %s driver, error %ld\n",
685 adap->uld[uld].name, PTR_ERR(handle));
686 return PTR_ERR(handle);
689 adap->uld[uld].handle = handle;
690 t4_register_netevent_notifier();
692 if (adap->flags & FULL_INIT_DONE)
693 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
698 /* cxgb4_register_uld - register an upper-layer driver
699 * @type: the ULD type
700 * @p: the ULD methods
702 * Registers an upper-layer driver with this driver and notifies the ULD
703 * about any presently available devices that support its type. Returns
704 * %-EBUSY if a ULD of the same type is already registered.
706 int cxgb4_register_uld(enum cxgb4_uld type,
707 const struct cxgb4_uld_info *p)
709 unsigned int adap_idx = 0;
710 struct adapter *adap;
713 if (type >= CXGB4_ULD_MAX)
716 mutex_lock(&uld_mutex);
717 list_for_each_entry(adap, &adapter_list, list_node) {
718 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
719 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
721 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
723 ret = cfg_queues_uld(adap, type, p);
726 ret = setup_sge_queues_uld(adap, type, p->lro);
729 if (adap->flags & USING_MSIX) {
730 name_msix_vecs_uld(adap, type);
731 ret = request_msix_queue_irqs_uld(adap, type);
735 if (adap->flags & FULL_INIT_DONE)
736 enable_rx_uld(adap, type);
737 if (adap->uld[type].add) {
741 ret = setup_sge_txq_uld(adap, type, p);
744 adap->uld[type] = *p;
745 ret = uld_attach(adap, type);
750 mutex_unlock(&uld_mutex);
754 release_sge_txq_uld(adap, type);
756 if (adap->flags & FULL_INIT_DONE)
757 quiesce_rx_uld(adap, type);
758 if (adap->flags & USING_MSIX)
759 free_msix_queue_irqs_uld(adap, type);
761 free_sge_queues_uld(adap, type);
763 free_queues_uld(adap, type);
766 list_for_each_entry(adap, &adapter_list, list_node) {
767 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
768 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
770 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
774 adap->uld[type].handle = NULL;
775 adap->uld[type].add = NULL;
776 release_sge_txq_uld(adap, type);
777 if (adap->flags & FULL_INIT_DONE)
778 quiesce_rx_uld(adap, type);
779 if (adap->flags & USING_MSIX)
780 free_msix_queue_irqs_uld(adap, type);
781 free_sge_queues_uld(adap, type);
782 free_queues_uld(adap, type);
785 mutex_unlock(&uld_mutex);
788 EXPORT_SYMBOL(cxgb4_register_uld);
791 * cxgb4_unregister_uld - unregister an upper-layer driver
792 * @type: the ULD type
794 * Unregisters an existing upper-layer driver.
796 int cxgb4_unregister_uld(enum cxgb4_uld type)
798 struct adapter *adap;
800 if (type >= CXGB4_ULD_MAX)
803 mutex_lock(&uld_mutex);
804 list_for_each_entry(adap, &adapter_list, list_node) {
805 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
806 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
808 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
811 cxgb4_shutdown_uld_adapter(adap, type);
813 mutex_unlock(&uld_mutex);
817 EXPORT_SYMBOL(cxgb4_unregister_uld);