2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
35 #include <linux/netdevice.h>
36 #include <linux/moduleparam.h>
39 #include "qib_common.h"
41 /* default pio off, sdma on */
42 static ushort sdma_descq_cnt = 256;
43 module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
44 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
47 * Bits defined in the send DMA descriptor.
49 #define SDMA_DESC_LAST (1ULL << 11)
50 #define SDMA_DESC_FIRST (1ULL << 12)
51 #define SDMA_DESC_DMA_HEAD (1ULL << 13)
52 #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
53 #define SDMA_DESC_INTR (1ULL << 15)
54 #define SDMA_DESC_COUNT_LSB 16
55 #define SDMA_DESC_GEN_LSB 30
57 char *qib_sdma_state_names[] = {
58 [qib_sdma_state_s00_hw_down] = "s00_HwDown",
59 [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
60 [qib_sdma_state_s20_idle] = "s20_Idle",
61 [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
62 [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
63 [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
64 [qib_sdma_state_s99_running] = "s99_Running",
67 char *qib_sdma_event_names[] = {
68 [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
69 [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
70 [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
71 [qib_sdma_event_e30_go_running] = "e30_GoRunning",
72 [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
73 [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
74 [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
75 [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
76 [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
77 [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
78 [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
81 /* declare all statics here rather than keep sorting */
82 static int alloc_sdma(struct qib_pportdata *);
83 static void sdma_complete(struct kref *);
84 static void sdma_finalput(struct qib_sdma_state *);
85 static void sdma_get(struct qib_sdma_state *);
86 static void sdma_put(struct qib_sdma_state *);
87 static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
88 static void sdma_start_sw_clean_up(struct qib_pportdata *);
89 static void sdma_sw_clean_up_task(unsigned long);
90 static void unmap_desc(struct qib_pportdata *, unsigned);
92 static void sdma_get(struct qib_sdma_state *ss)
97 static void sdma_complete(struct kref *kref)
99 struct qib_sdma_state *ss =
100 container_of(kref, struct qib_sdma_state, kref);
105 static void sdma_put(struct qib_sdma_state *ss)
107 kref_put(&ss->kref, sdma_complete);
110 static void sdma_finalput(struct qib_sdma_state *ss)
113 wait_for_completion(&ss->comp);
117 * Complete all the sdma requests on the active list, in the correct
118 * order, and with appropriate processing. Called when cleaning up
119 * after sdma shutdown, and when new sdma requests are submitted for
120 * a link that is down. This matches what is done for requests
121 * that complete normally, it's just the full list.
123 * Must be called with sdma_lock held
125 static void clear_sdma_activelist(struct qib_pportdata *ppd)
127 struct qib_sdma_txreq *txp, *txp_next;
129 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
130 list_del_init(&txp->list);
131 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
134 idx = txp->start_idx;
135 while (idx != txp->next_descq_idx) {
136 unmap_desc(ppd, idx);
137 if (++idx == ppd->sdma_descq_cnt)
142 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
146 static void sdma_sw_clean_up_task(unsigned long opaque)
148 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
151 spin_lock_irqsave(&ppd->sdma_lock, flags);
154 * At this point, the following should always be true:
155 * - We are halted, so no more descriptors are getting retired.
156 * - We are not running, so no one is submitting new work.
157 * - Only we can send the e40_sw_cleaned, so we can't start
158 * running again until we say so. So, the active list and
159 * descq are ours to play with.
162 /* Process all retired requests. */
163 qib_sdma_make_progress(ppd);
165 clear_sdma_activelist(ppd);
168 * Resync count of added and removed. It is VERY important that
169 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
171 ppd->sdma_descq_removed = ppd->sdma_descq_added;
174 * Reset our notion of head and tail.
175 * Note that the HW registers will be reset when switching states
176 * due to calling __qib_sdma_process_event() below.
178 ppd->sdma_descq_tail = 0;
179 ppd->sdma_descq_head = 0;
180 ppd->sdma_head_dma[0] = 0;
181 ppd->sdma_generation = 0;
183 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
185 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
189 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
190 * as a result of send buffer errors or send DMA descriptor errors.
191 * We want to disarm the buffers in these cases.
193 static void sdma_hw_start_up(struct qib_pportdata *ppd)
195 struct qib_sdma_state *ss = &ppd->sdma_state;
198 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
199 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
201 ppd->dd->f_sdma_hw_start_up(ppd);
204 static void sdma_sw_tear_down(struct qib_pportdata *ppd)
206 struct qib_sdma_state *ss = &ppd->sdma_state;
208 /* Releasing this reference means the state machine has stopped. */
212 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
214 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
217 static void sdma_set_state(struct qib_pportdata *ppd,
218 enum qib_sdma_states next_state)
220 struct qib_sdma_state *ss = &ppd->sdma_state;
221 struct sdma_set_state_action *action = ss->set_state_action;
224 /* debugging bookkeeping */
225 ss->previous_state = ss->current_state;
226 ss->previous_op = ss->current_op;
228 ss->current_state = next_state;
230 if (action[next_state].op_enable)
231 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
233 if (action[next_state].op_intenable)
234 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
236 if (action[next_state].op_halt)
237 op |= QIB_SDMA_SENDCTRL_OP_HALT;
239 if (action[next_state].op_drain)
240 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
242 if (action[next_state].go_s99_running_tofalse)
243 ss->go_s99_running = 0;
245 if (action[next_state].go_s99_running_totrue)
246 ss->go_s99_running = 1;
250 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
253 static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
255 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
260 desc[0] = le64_to_cpu(descqp[0]);
261 desc[1] = le64_to_cpu(descqp[1]);
263 addr = (desc[1] << 32) | (desc[0] >> 32);
264 len = (desc[0] >> 14) & (0x7ffULL << 2);
265 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
268 static int alloc_sdma(struct qib_pportdata *ppd)
270 ppd->sdma_descq_cnt = sdma_descq_cnt;
271 if (!ppd->sdma_descq_cnt)
272 ppd->sdma_descq_cnt = 256;
274 /* Allocate memory for SendDMA descriptor FIFO */
275 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
276 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
279 if (!ppd->sdma_descq) {
281 "failed to allocate SendDMA descriptor FIFO memory\n");
285 /* Allocate memory for DMA of head register to memory */
286 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
287 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
288 if (!ppd->sdma_head_dma) {
290 "failed to allocate SendDMA head memory\n");
293 ppd->sdma_head_dma[0] = 0;
297 dma_free_coherent(&ppd->dd->pcidev->dev,
298 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
299 ppd->sdma_descq_phys);
300 ppd->sdma_descq = NULL;
301 ppd->sdma_descq_phys = 0;
303 ppd->sdma_descq_cnt = 0;
307 static void free_sdma(struct qib_pportdata *ppd)
309 struct qib_devdata *dd = ppd->dd;
311 if (ppd->sdma_head_dma) {
312 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
313 (void *)ppd->sdma_head_dma,
314 ppd->sdma_head_phys);
315 ppd->sdma_head_dma = NULL;
316 ppd->sdma_head_phys = 0;
319 if (ppd->sdma_descq) {
320 dma_free_coherent(&dd->pcidev->dev,
321 ppd->sdma_descq_cnt * sizeof(u64[2]),
322 ppd->sdma_descq, ppd->sdma_descq_phys);
323 ppd->sdma_descq = NULL;
324 ppd->sdma_descq_phys = 0;
328 static inline void make_sdma_desc(struct qib_pportdata *ppd,
329 u64 *sdmadesc, u64 addr, u64 dwlen,
334 /* SDmaPhyAddr[47:32] */
335 sdmadesc[1] = addr >> 32;
336 /* SDmaPhyAddr[31:0] */
337 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
338 /* SDmaGeneration[1:0] */
339 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
341 /* SDmaDwordCount[10:0] */
342 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
343 /* SDmaBufOffset[12:2] */
344 sdmadesc[0] |= dwoffset & 0x7ffULL;
347 /* sdma_lock must be held */
348 int qib_sdma_make_progress(struct qib_pportdata *ppd)
350 struct list_head *lp = NULL;
351 struct qib_sdma_txreq *txp = NULL;
352 struct qib_devdata *dd = ppd->dd;
357 hwhead = dd->f_sdma_gethead(ppd);
359 /* The reason for some of the complexity of this code is that
360 * not all descriptors have corresponding txps. So, we have to
361 * be able to skip over descs until we wander into the range of
362 * the next txp on the list.
365 if (!list_empty(&ppd->sdma_activelist)) {
366 lp = ppd->sdma_activelist.next;
367 txp = list_entry(lp, struct qib_sdma_txreq, list);
368 idx = txp->start_idx;
371 while (ppd->sdma_descq_head != hwhead) {
372 /* if desc is part of this txp, unmap if needed */
373 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
374 (idx == ppd->sdma_descq_head)) {
375 unmap_desc(ppd, ppd->sdma_descq_head);
376 if (++idx == ppd->sdma_descq_cnt)
380 /* increment dequed desc count */
381 ppd->sdma_descq_removed++;
383 /* advance head, wrap if needed */
384 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
385 ppd->sdma_descq_head = 0;
387 /* if now past this txp's descs, do the callback */
388 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
389 /* remove from active list */
390 list_del_init(&txp->list);
392 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
393 /* see if there is another txp */
394 if (list_empty(&ppd->sdma_activelist))
397 lp = ppd->sdma_activelist.next;
398 txp = list_entry(lp, struct qib_sdma_txreq,
400 idx = txp->start_idx;
406 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
411 * This is called from interrupt context.
413 void qib_sdma_intr(struct qib_pportdata *ppd)
417 spin_lock_irqsave(&ppd->sdma_lock, flags);
419 __qib_sdma_intr(ppd);
421 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
424 void __qib_sdma_intr(struct qib_pportdata *ppd)
426 if (__qib_sdma_running(ppd)) {
427 qib_sdma_make_progress(ppd);
428 if (!list_empty(&ppd->sdma_userpending))
429 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
433 int qib_setup_sdma(struct qib_pportdata *ppd)
435 struct qib_devdata *dd = ppd->dd;
439 ret = alloc_sdma(ppd);
443 /* set consistent sdma state */
444 ppd->dd->f_sdma_init_early(ppd);
445 spin_lock_irqsave(&ppd->sdma_lock, flags);
446 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
447 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
449 /* set up reference counting */
450 kref_init(&ppd->sdma_state.kref);
451 init_completion(&ppd->sdma_state.comp);
453 ppd->sdma_generation = 0;
454 ppd->sdma_descq_head = 0;
455 ppd->sdma_descq_removed = 0;
456 ppd->sdma_descq_added = 0;
458 ppd->sdma_intrequest = 0;
459 INIT_LIST_HEAD(&ppd->sdma_userpending);
461 INIT_LIST_HEAD(&ppd->sdma_activelist);
463 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
466 ret = dd->f_init_sdma_regs(ppd);
470 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
475 qib_teardown_sdma(ppd);
480 void qib_teardown_sdma(struct qib_pportdata *ppd)
482 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
485 * This waits for the state machine to exit so it is not
486 * necessary to kill the sdma_sw_clean_up_task to make sure
489 sdma_finalput(&ppd->sdma_state);
494 int qib_sdma_running(struct qib_pportdata *ppd)
499 spin_lock_irqsave(&ppd->sdma_lock, flags);
500 ret = __qib_sdma_running(ppd);
501 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
507 * Complete a request when sdma not running; likely only request
508 * but to simplify the code, always queue it, then process the full
509 * activelist. We process the entire list to ensure that this particular
510 * request does get it's callback, but in the correct order.
511 * Must be called with sdma_lock held
513 static void complete_sdma_err_req(struct qib_pportdata *ppd,
514 struct qib_verbs_txreq *tx)
516 struct qib_qp_priv *priv = tx->qp->priv;
518 atomic_inc(&priv->s_dma_busy);
519 /* no sdma descriptors, so no unmap_desc */
520 tx->txreq.start_idx = 0;
521 tx->txreq.next_descq_idx = 0;
522 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
523 clear_sdma_activelist(ppd);
527 * This function queues one IB packet onto the send DMA queue per call.
528 * The caller is responsible for checking:
529 * 1) The number of send DMA descriptor entries is less than the size of
530 * the descriptor queue.
531 * 2) The IB SGE addresses and lengths are 32-bit aligned
532 * (except possibly the last SGE's length)
533 * 3) The SGE addresses are suitable for passing to dma_map_single().
535 int qib_sdma_verbs_send(struct qib_pportdata *ppd,
536 struct rvt_sge_state *ss, u32 dwords,
537 struct qib_verbs_txreq *tx)
548 struct qib_qp_priv *priv;
550 spin_lock_irqsave(&ppd->sdma_lock, flags);
553 if (unlikely(!__qib_sdma_running(ppd))) {
554 complete_sdma_err_req(ppd, tx);
558 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
559 if (qib_sdma_make_progress(ppd))
561 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
562 ppd->dd->f_sdma_set_desc_cnt(ppd,
563 ppd->sdma_descq_cnt / 2);
567 dwoffset = tx->hdr_dwords;
568 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
570 sdmadesc[0] |= SDMA_DESC_FIRST;
571 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
572 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
574 /* write to the descq */
575 tail = ppd->sdma_descq_tail;
576 descqp = &ppd->sdma_descq[tail].qw[0];
577 *descqp++ = cpu_to_le64(sdmadesc[0]);
578 *descqp++ = cpu_to_le64(sdmadesc[1]);
580 /* increment the tail */
581 if (++tail == ppd->sdma_descq_cnt) {
583 descqp = &ppd->sdma_descq[0].qw[0];
584 ++ppd->sdma_generation;
587 tx->txreq.start_idx = tail;
595 if (len > sge->length)
597 if (len > sge->sge_length)
598 len = sge->sge_length;
601 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
602 dw << 2, DMA_TO_DEVICE);
603 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
608 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
609 /* SDmaUseLargeBuf has to be set in every descriptor */
610 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
611 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
612 /* write to the descq */
613 *descqp++ = cpu_to_le64(sdmadesc[0]);
614 *descqp++ = cpu_to_le64(sdmadesc[1]);
616 /* increment the tail */
617 if (++tail == ppd->sdma_descq_cnt) {
619 descqp = &ppd->sdma_descq[0].qw[0];
620 ++ppd->sdma_generation;
624 sge->sge_length -= len;
625 if (sge->sge_length == 0) {
627 *sge = *ss->sg_list++;
628 } else if (sge->length == 0 && sge->mr->lkey) {
629 if (++sge->n >= RVT_SEGSZ) {
630 if (++sge->m >= sge->mr->mapsz)
635 sge->mr->map[sge->m]->segs[sge->n].vaddr;
637 sge->mr->map[sge->m]->segs[sge->n].length;
645 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
647 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
648 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
649 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
650 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
651 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
653 atomic_inc(&priv->s_dma_busy);
654 tx->txreq.next_descq_idx = tail;
655 ppd->dd->f_sdma_update_tail(ppd, tail);
656 ppd->sdma_descq_added += tx->txreq.sg_count;
657 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
663 tail = ppd->sdma_descq_cnt - 1;
666 if (tail == ppd->sdma_descq_tail)
668 unmap_desc(ppd, tail);
673 spin_lock(&qp->r_lock);
674 spin_lock(&qp->s_lock);
675 if (qp->ibqp.qp_type == IB_QPT_RC) {
676 /* XXX what about error sending RDMA read responses? */
677 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
678 rvt_error_qp(qp, IB_WC_GENERAL_ERR);
679 } else if (qp->s_wqe)
680 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
681 spin_unlock(&qp->s_lock);
682 spin_unlock(&qp->r_lock);
683 /* return zero to process the next send work request */
689 spin_lock(&qp->s_lock);
690 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
691 struct qib_ibdev *dev;
694 * If we couldn't queue the DMA request, save the info
695 * and try again later rather than destroying the
696 * buffer and undoing the side effects of the copy.
701 dev = &ppd->dd->verbs_dev;
702 spin_lock(&dev->rdi.pending_lock);
703 if (list_empty(&priv->iowait)) {
704 struct qib_ibport *ibp;
706 ibp = &ppd->ibport_data;
707 ibp->rvp.n_dmawait++;
708 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
709 list_add_tail(&priv->iowait, &dev->dmawait);
711 spin_unlock(&dev->rdi.pending_lock);
712 qp->s_flags &= ~RVT_S_BUSY;
713 spin_unlock(&qp->s_lock);
716 spin_unlock(&qp->s_lock);
720 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
725 * sdma_lock should be acquired before calling this routine
727 void dump_sdma_state(struct qib_pportdata *ppd)
729 struct qib_sdma_desc *descq;
730 struct qib_sdma_txreq *txp, *txpnext;
734 u16 gen, dwlen, dwoffset;
737 head = ppd->sdma_descq_head;
738 tail = ppd->sdma_descq_tail;
739 cnt = qib_sdma_descq_freecnt(ppd);
740 descq = ppd->sdma_descq;
742 qib_dev_porterr(ppd->dd, ppd->port,
743 "SDMA ppd->sdma_descq_head: %u\n", head);
744 qib_dev_porterr(ppd->dd, ppd->port,
745 "SDMA ppd->sdma_descq_tail: %u\n", tail);
746 qib_dev_porterr(ppd->dd, ppd->port,
747 "SDMA sdma_descq_freecnt: %u\n", cnt);
749 /* print info for each entry in the descriptor queue */
750 while (head != tail) {
751 char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
753 descqp = &descq[head].qw[0];
754 desc[0] = le64_to_cpu(descqp[0]);
755 desc[1] = le64_to_cpu(descqp[1]);
756 flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
757 flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
758 flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
759 flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
760 flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
761 addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
762 gen = (desc[0] >> 30) & 3ULL;
763 dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
764 dwoffset = (desc[0] & 0x7ffULL) << 2;
765 qib_dev_porterr(ppd->dd, ppd->port,
766 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
767 head, flags, addr, gen, dwlen, dwoffset);
768 if (++head == ppd->sdma_descq_cnt)
772 /* print dma descriptor indices from the TX requests */
773 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
775 qib_dev_porterr(ppd->dd, ppd->port,
776 "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
777 txp->start_idx, txp->next_descq_idx);
780 void qib_sdma_process_event(struct qib_pportdata *ppd,
781 enum qib_sdma_events event)
785 spin_lock_irqsave(&ppd->sdma_lock, flags);
787 __qib_sdma_process_event(ppd, event);
789 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
790 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
792 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
795 void __qib_sdma_process_event(struct qib_pportdata *ppd,
796 enum qib_sdma_events event)
798 struct qib_sdma_state *ss = &ppd->sdma_state;
800 switch (ss->current_state) {
801 case qib_sdma_state_s00_hw_down:
803 case qib_sdma_event_e00_go_hw_down:
805 case qib_sdma_event_e30_go_running:
807 * If down, but running requested (usually result
808 * of link up, then we need to start up.
809 * This can happen when hw down is requested while
810 * bringing the link up with traffic active on
812 ss->go_s99_running = 1;
813 /* fall through and start dma engine */
814 case qib_sdma_event_e10_go_hw_start:
815 /* This reference means the state machine is started */
816 sdma_get(&ppd->sdma_state);
818 qib_sdma_state_s10_hw_start_up_wait);
820 case qib_sdma_event_e20_hw_started:
822 case qib_sdma_event_e40_sw_cleaned:
823 sdma_sw_tear_down(ppd);
825 case qib_sdma_event_e50_hw_cleaned:
827 case qib_sdma_event_e60_hw_halted:
829 case qib_sdma_event_e70_go_idle:
831 case qib_sdma_event_e7220_err_halted:
833 case qib_sdma_event_e7322_err_halted:
835 case qib_sdma_event_e90_timer_tick:
840 case qib_sdma_state_s10_hw_start_up_wait:
842 case qib_sdma_event_e00_go_hw_down:
843 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
844 sdma_sw_tear_down(ppd);
846 case qib_sdma_event_e10_go_hw_start:
848 case qib_sdma_event_e20_hw_started:
849 sdma_set_state(ppd, ss->go_s99_running ?
850 qib_sdma_state_s99_running :
851 qib_sdma_state_s20_idle);
853 case qib_sdma_event_e30_go_running:
854 ss->go_s99_running = 1;
856 case qib_sdma_event_e40_sw_cleaned:
858 case qib_sdma_event_e50_hw_cleaned:
860 case qib_sdma_event_e60_hw_halted:
862 case qib_sdma_event_e70_go_idle:
863 ss->go_s99_running = 0;
865 case qib_sdma_event_e7220_err_halted:
867 case qib_sdma_event_e7322_err_halted:
869 case qib_sdma_event_e90_timer_tick:
874 case qib_sdma_state_s20_idle:
876 case qib_sdma_event_e00_go_hw_down:
877 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
878 sdma_sw_tear_down(ppd);
880 case qib_sdma_event_e10_go_hw_start:
882 case qib_sdma_event_e20_hw_started:
884 case qib_sdma_event_e30_go_running:
885 sdma_set_state(ppd, qib_sdma_state_s99_running);
886 ss->go_s99_running = 1;
888 case qib_sdma_event_e40_sw_cleaned:
890 case qib_sdma_event_e50_hw_cleaned:
892 case qib_sdma_event_e60_hw_halted:
894 case qib_sdma_event_e70_go_idle:
896 case qib_sdma_event_e7220_err_halted:
898 case qib_sdma_event_e7322_err_halted:
900 case qib_sdma_event_e90_timer_tick:
905 case qib_sdma_state_s30_sw_clean_up_wait:
907 case qib_sdma_event_e00_go_hw_down:
908 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
910 case qib_sdma_event_e10_go_hw_start:
912 case qib_sdma_event_e20_hw_started:
914 case qib_sdma_event_e30_go_running:
915 ss->go_s99_running = 1;
917 case qib_sdma_event_e40_sw_cleaned:
919 qib_sdma_state_s10_hw_start_up_wait);
920 sdma_hw_start_up(ppd);
922 case qib_sdma_event_e50_hw_cleaned:
924 case qib_sdma_event_e60_hw_halted:
926 case qib_sdma_event_e70_go_idle:
927 ss->go_s99_running = 0;
929 case qib_sdma_event_e7220_err_halted:
931 case qib_sdma_event_e7322_err_halted:
933 case qib_sdma_event_e90_timer_tick:
938 case qib_sdma_state_s40_hw_clean_up_wait:
940 case qib_sdma_event_e00_go_hw_down:
941 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
942 sdma_start_sw_clean_up(ppd);
944 case qib_sdma_event_e10_go_hw_start:
946 case qib_sdma_event_e20_hw_started:
948 case qib_sdma_event_e30_go_running:
949 ss->go_s99_running = 1;
951 case qib_sdma_event_e40_sw_cleaned:
953 case qib_sdma_event_e50_hw_cleaned:
955 qib_sdma_state_s30_sw_clean_up_wait);
956 sdma_start_sw_clean_up(ppd);
958 case qib_sdma_event_e60_hw_halted:
960 case qib_sdma_event_e70_go_idle:
961 ss->go_s99_running = 0;
963 case qib_sdma_event_e7220_err_halted:
965 case qib_sdma_event_e7322_err_halted:
967 case qib_sdma_event_e90_timer_tick:
972 case qib_sdma_state_s50_hw_halt_wait:
974 case qib_sdma_event_e00_go_hw_down:
975 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
976 sdma_start_sw_clean_up(ppd);
978 case qib_sdma_event_e10_go_hw_start:
980 case qib_sdma_event_e20_hw_started:
982 case qib_sdma_event_e30_go_running:
983 ss->go_s99_running = 1;
985 case qib_sdma_event_e40_sw_cleaned:
987 case qib_sdma_event_e50_hw_cleaned:
989 case qib_sdma_event_e60_hw_halted:
991 qib_sdma_state_s40_hw_clean_up_wait);
992 ppd->dd->f_sdma_hw_clean_up(ppd);
994 case qib_sdma_event_e70_go_idle:
995 ss->go_s99_running = 0;
997 case qib_sdma_event_e7220_err_halted:
999 case qib_sdma_event_e7322_err_halted:
1001 case qib_sdma_event_e90_timer_tick:
1006 case qib_sdma_state_s99_running:
1008 case qib_sdma_event_e00_go_hw_down:
1009 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
1010 sdma_start_sw_clean_up(ppd);
1012 case qib_sdma_event_e10_go_hw_start:
1014 case qib_sdma_event_e20_hw_started:
1016 case qib_sdma_event_e30_go_running:
1018 case qib_sdma_event_e40_sw_cleaned:
1020 case qib_sdma_event_e50_hw_cleaned:
1022 case qib_sdma_event_e60_hw_halted:
1024 qib_sdma_state_s30_sw_clean_up_wait);
1025 sdma_start_sw_clean_up(ppd);
1027 case qib_sdma_event_e70_go_idle:
1028 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1029 ss->go_s99_running = 0;
1031 case qib_sdma_event_e7220_err_halted:
1033 qib_sdma_state_s30_sw_clean_up_wait);
1034 sdma_start_sw_clean_up(ppd);
1036 case qib_sdma_event_e7322_err_halted:
1037 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
1039 case qib_sdma_event_e90_timer_tick:
1045 ss->last_event = event;