2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted (subject to the limitations in the
7 * disclaimer below) provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
17 * * Neither the name of Qualcomm Atheros nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
22 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
23 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
33 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "ah_internal.h"
39 #include "ar5416reg.h"
40 #include "ar5416desc.h"
42 #define N(a) (sizeof(a)/sizeof(a[0]))
43 #define AR_INTR_SPURIOUS 0xffffffff
44 #define ar5416_desc ar5416_desc_20
45 #define AR5416_ABORT_LOOPS 1000
46 #define AR5416_ABORT_WAIT 5
47 #define AR5416DESC AR5416DESC_20
48 #define AR5416DESC_CONST AR5416DESC_CONST_20
54 static const struct ath_hal_private ar5416hal_10 = {{
55 .ah_getRateTable = ar5416GetRateTable,
56 .ah_detach = ar5416Detach,
58 /* Transmit functions */
59 .ah_updateTxTrigLevel = ar5416UpdateTxTrigLevel,
60 .ah_setTxDP = ar5416SetTxDP,
61 .ah_numTxPending = ar5416NumTxPending,
62 .ah_startTxDma = ar5416StartTxDma,
63 .ah_stopTxDma = ar5416StopTxDma,
65 .ah_abortTxDma = ar5416AbortTxDma,
68 .ah_getTsf64 = ar5416GetTsf64,
69 .ah_resetTsf = ar5416ResetTsf,
70 .ah_setRxFilter = ar5416SetRxFilter,
73 .ah_getRxDP = ar5416GetRxDP,
74 .ah_setRxDP = ar5416SetRxDP,
75 .ah_stopDmaReceive = ar5416StopDmaReceive,
76 .ah_enableReceive = ar5416EnableReceive,
77 .ah_startPcuReceive = ar5416StartPcuReceive,
78 .ah_stopPcuReceive = ar5416StopPcuReceive,
80 /* Interrupt Functions */
81 .ah_isInterruptPending = ar5416IsInterruptPending,
82 .ah_getPendingInterrupts = ar5416GetPendingInterrupts,
83 .ah_getInterrupts = ar5416GetInterrupts,
84 .ah_setInterrupts = ar5416SetInterrupts,
88 void ar5416Detach(struct ath_hal *ah)
90 HALASSERT(ah != AH_NULL);
95 ar5416Attach(a_uint32_t devid,HAL_SOFTC sc, adf_os_device_t dev,
96 a_uint32_t flags, HAL_STATUS *status)
98 struct ath_hal_5416 *ahp;
101 ahp = ath_hal_malloc(sizeof (struct ath_hal_5416));
102 if (ahp == AH_NULL) {
103 *status = HAL_ENOMEM;
106 ah = &ahp->ah_priv.h;
108 OS_MEMCPY(&ahp->ah_priv, &ar5416hal_10, sizeof(struct ath_hal_private));
113 /* If its a Owl 2.0 chip then change the hal structure to
114 point to the Owl 2.0 ar5416_hal_20 structure */
116 ah->ah_set11nTxDesc = ar5416Set11nTxDesc_20;
117 ah->ah_set11nRateScenario = ar5416Set11nRateScenario_20;
118 ah->ah_set11nAggrFirst = ar5416Set11nAggrFirst_20;
119 ah->ah_set11nAggrMiddle = ar5416Set11nAggrMiddle_20;
120 ah->ah_set11nAggrLast = ar5416Set11nAggrLast_20;
121 ah->ah_clr11nAggr = ar5416Clr11nAggr_20;
122 ah->ah_set11nBurstDuration = ar5416Set11nBurstDuration_20;
123 ah->ah_setupRxDesc = ar5416SetupRxDesc_20;
124 ah->ah_procRxDescFast = ar5416ProcRxDescFast_20;
125 ah->ah_updateCTSForBursting = NULL;
126 ah->ah_setupTxDesc = ar5416SetupTxDesc_20;
127 ah->ah_reqTxIntrDesc = ar5416IntrReqTxDesc_20;
128 ah->ah_fillTxDesc = ar5416FillTxDesc_20;
129 ah->ah_fillKeyTxDesc = ar5416FillKeyTxDesc_20;
130 ah->ah_procTxDesc = ar5416ProcTxDesc_20;
131 ah->ah_set11nVirtualMoreFrag = ar5416Set11nVirtualMoreFrag_20;
137 /**********************/
138 /* Interrupt Handling */
139 /**********************/
141 HAL_BOOL ar5416IsInterruptPending(struct ath_hal *ah)
143 a_uint32_t host_isr = OS_REG_READ(ah, AR_INTR_ASYNC_CAUSE);
145 * Some platforms trigger our ISR before applying power to
146 * the card, so make sure.
148 return ((host_isr != AR_INTR_SPURIOUS) && (host_isr & AR_INTR_MAC_IRQ));
151 HAL_BOOL ar5416GetPendingInterrupts(struct ath_hal *ah, HAL_INT *masked)
155 HAL_BOOL fatal_int = AH_FALSE;
156 a_uint32_t sync_cause;
158 if (OS_REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
159 if ((OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) != AR_RTC_STATUS_ON) {
168 isr = OS_REG_READ(ah, AR_ISR_RAC);
169 if (isr == 0xffffffff) {
174 *masked = isr & HAL_INT_COMMON;
176 #ifdef AR5416_INT_MITIGATION
177 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
178 *masked |= HAL_INT_RX;
180 if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
181 *masked |= HAL_INT_TX;
185 if (isr & AR_ISR_BCNMISC) {
188 s2_s = OS_REG_READ(ah, AR_ISR_S2_S);
190 if (s2_s & AR_ISR_S2_GTT) {
191 *masked |= HAL_INT_GTT;
194 if (s2_s & AR_ISR_S2_CST) {
195 *masked |= HAL_INT_CST;
199 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
200 *masked |= HAL_INT_RX;
201 if (isr & (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | AR_ISR_TXEOL)) {
202 struct ath_hal_5416 *ahp = AH5416(ah);
203 a_uint32_t s0_s, s1_s;
205 *masked |= HAL_INT_TX;
206 s0_s = OS_REG_READ(ah, AR_ISR_S0_S);
207 s1_s = OS_REG_READ(ah, AR_ISR_S1_S);
208 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
209 ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
210 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
211 ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
215 sync_cause = OS_REG_READ(ah, AR_INTR_SYNC_CAUSE);
216 fatal_int = ((sync_cause != AR_INTR_SPURIOUS) &&
217 (sync_cause & (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))) ?
220 if (AH_TRUE == fatal_int) {
221 OS_REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
222 (void) OS_REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
228 HAL_INT ar5416GetInterrupts(struct ath_hal *ah)
230 return AH5416(ah)->ah_maskReg;
234 ar5416SetInterrupts(struct ath_hal *ah, HAL_INT ints)
236 struct ath_hal_5416 *ahp = AH5416(ah);
237 a_uint32_t omask = ahp->ah_maskReg;
240 if (omask & HAL_INT_GLOBAL) {
241 OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
242 (void) OS_REG_READ(ah, AR_IER);
245 mask = ints & HAL_INT_COMMON;
246 if (ints & HAL_INT_TX) {
247 #ifdef AR5416_INT_MITIGATION
248 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
251 mask |= AR_IMR_TXDESC;
253 mask |= AR_IMR_TXERR;
254 mask |= AR_IMR_TXEOL;
256 if (ints & HAL_INT_RX) {
257 mask |= AR_IMR_RXERR;
258 #ifdef AR5416_INT_MITIGATION
259 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
261 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
265 if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
266 mask |= AR_IMR_BCNMISC;
269 OS_REG_WRITE(ah, AR_IMR, mask);
270 (void) OS_REG_READ(ah, AR_IMR);
271 ahp->ah_maskReg = ints;
273 /* Re-enable interrupts if they were enabled before. */
274 if (ints & HAL_INT_GLOBAL) {
275 OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
276 /* See explanation above... */
277 (void) OS_REG_READ(ah, AR_IER);
280 OS_REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, AR_INTR_MAC_IRQ);
281 OS_REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
282 OS_REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_ALL);
291 u_int64_t ar5416GetTsf64(struct ath_hal *ah)
295 tsf = OS_REG_READ(ah, AR_TSF_U32);
296 tsf = (tsf << 32) | OS_REG_READ(ah, AR_TSF_L32);
301 void ar5416ResetTsf(struct ath_hal *ah)
307 while (OS_REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
314 OS_REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
321 a_uint32_t ar5416GetRxDP(struct ath_hal *ath)
323 return OS_REG_READ(ath, AR_RXDP);
327 void ar5416SetRxDP(struct ath_hal *ah, a_uint32_t rxdp)
329 OS_REG_WRITE(ah, AR_RXDP, rxdp);
330 HALASSERT(OS_REG_READ(ah, AR_RXDP) == rxdp);
333 void ar5416SetMulticastFilter(struct ath_hal *ah, a_uint32_t filter0, a_uint32_t filter1)
335 OS_REG_WRITE(ah, AR_MCAST_FIL0, filter0);
336 OS_REG_WRITE(ah, AR_MCAST_FIL1, filter1);
339 HAL_BOOL ar5416ClrMulticastFilterIndex(struct ath_hal *ah, a_uint32_t ix)
346 val = OS_REG_READ(ah, AR_MCAST_FIL1);
347 OS_REG_WRITE(ah, AR_MCAST_FIL1, (val &~ (1<<(ix-32))));
349 val = OS_REG_READ(ah, AR_MCAST_FIL0);
350 OS_REG_WRITE(ah, AR_MCAST_FIL0, (val &~ (1<<ix)));
355 HAL_BOOL ar5416StopDmaReceive(struct ath_hal *ah)
357 OS_REG_WRITE(ah, AR_CR, AR_CR_RXD); /* Set receive disable bit */
358 if (!ath_hal_wait(ah, AR_CR, AR_CR_RXE, 0)) {
365 HAL_BOOL ar5416SetMulticastFilterIndex(struct ath_hal *ah, a_uint32_t ix)
372 val = OS_REG_READ(ah, AR_MCAST_FIL1);
373 OS_REG_WRITE(ah, AR_MCAST_FIL1, (val | (1<<(ix-32))));
375 val = OS_REG_READ(ah, AR_MCAST_FIL0);
376 OS_REG_WRITE(ah, AR_MCAST_FIL0, (val | (1<<ix)));
381 void ar5416StartPcuReceive(struct ath_hal *ah)
383 OS_REG_CLR_BIT(ah, AR_DIAG_SW,
384 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
387 void ar5416SetRxFilter(struct ath_hal *ah, a_uint32_t bits)
391 OS_REG_WRITE(ah, AR_RX_FILTER, (bits & 0xff) | AR_RX_COMPR_BAR);
393 if (bits & HAL_RX_FILTER_PHYRADAR)
394 phybits |= AR_PHY_ERR_RADAR;
395 if (bits & HAL_RX_FILTER_PHYERR)
396 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
397 OS_REG_WRITE(ah, AR_PHY_ERR, phybits);
399 OS_REG_WRITE(ah, AR_RXCFG,OS_REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
401 OS_REG_WRITE(ah, AR_RXCFG,OS_REG_READ(ah, AR_RXCFG) &~ AR_RXCFG_ZLFDMA);
405 void ar5416EnableReceive(struct ath_hal *ah)
407 OS_REG_WRITE(ah, AR_CR, AR_CR_RXE);
410 void ar5416StopPcuReceive(struct ath_hal *ah)
412 OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
415 HAL_BOOL ar5416SetupRxDesc_20(struct ath_hal *ah, struct ath_rx_desc *ds,
416 a_uint32_t size, a_uint32_t flags)
418 struct ar5416_desc *ads = AR5416DESC(ds);
420 HALASSERT((size &~ AR_BufLen) == 0);
422 ads->ds_ctl1 = size & AR_BufLen;
423 if (flags & HAL_RXDESC_INTREQ)
424 ads->ds_ctl1 |= AR_RxIntrReq;
426 /* this should be enough */
427 ads->ds_rxstatus8 &= ~AR_RxDone;
432 HAL_STATUS ar5416ProcRxDescFast_20(struct ath_hal *ah, struct ath_rx_desc *ds,
433 a_uint32_t pa, struct ath_desc *nds,
434 struct ath_rx_status *rx_stats)
436 struct ar5416_desc ads;
437 struct ar5416_desc *adsp = AR5416DESC(ds);
438 struct ar5416_desc *ands = AR5416DESC(nds);
440 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
441 return HAL_EINPROGRESS;
443 * Given the use of a self-linked tail be very sure that the hw is
444 * done with this descriptor; the hw may have done this descriptor
445 * once and picked it up again...make sure the hw has moved on.
447 if ((ands->ds_rxstatus8 & AR_RxDone) == 0
448 && OS_REG_READ(ah, AR_RXDP) == pa)
449 return HAL_EINPROGRESS;
452 * Now we need to get the stats from the descriptor. Since desc are
453 * uncached, lets make a copy of the stats first. Note that, since we
454 * touch most of the rx stats, a memcpy would always be more efficient
456 * Next we fill in all values in a caller passed stack variable.
457 * This reduces the number of uncached accesses.
458 * Do this copy here, after the check so that when the checks fail, we
459 * dont end up copying the entire stats uselessly.
461 ads.u.rx = adsp->u.rx;
463 rx_stats->rs_status = 0;
464 rx_stats->rs_flags = 0;
466 rx_stats->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
467 rx_stats->rs_tstamp = ads.AR_RcvTimestamp;
469 /* XXX what about KeyCacheMiss? */
470 rx_stats->rs_rssi_combined =
471 MS(ads.ds_rxstatus4, AR_RxRSSICombined);
472 rx_stats->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
473 rx_stats->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
474 rx_stats->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
475 rx_stats->rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
476 rx_stats->rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
477 rx_stats->rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
478 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
479 rx_stats->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
481 rx_stats->rs_keyix = HAL_RXKEYIX_INVALID;
482 /* NB: caller expected to do rate table mapping */
483 rx_stats->rs_rate = RXSTATUS_RATE(ah, (&ads));
484 rx_stats->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
486 rx_stats->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
487 rx_stats->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
488 rx_stats->rs_flags |= (ads.ds_rxstatus3 & AR_GI) ? HAL_RX_GI : 0;
489 rx_stats->rs_flags |= (ads.ds_rxstatus3 & AR_2040) ? HAL_RX_2040 : 0;
491 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
492 rx_stats->rs_flags |= HAL_RX_DELIM_CRC_PRE;
493 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
494 rx_stats->rs_flags |= HAL_RX_DELIM_CRC_POST;
495 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
496 rx_stats->rs_flags |= HAL_RX_DECRYPT_BUSY;
498 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
500 * These four bits should not be set together. The
501 * 5416 spec states a Michael error can only occur if
502 * DecryptCRCErr not set (and TKIP is used). Experience
503 * indicates however that you can also get Michael errors
504 * when a CRC error is detected, but these are specious.
505 * Consequently we filter them out here so we don't
506 * confuse and/or complicate drivers.
508 if (ads.ds_rxstatus8 & AR_CRCErr)
509 rx_stats->rs_status |= HAL_RXERR_CRC;
510 else if (ads.ds_rxstatus8 & AR_PHYErr) {
513 rx_stats->rs_status |= HAL_RXERR_PHY;
514 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
515 rx_stats->rs_phyerr = phyerr;
516 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
517 rx_stats->rs_status |= HAL_RXERR_DECRYPT;
518 else if (ads.ds_rxstatus8 & AR_MichaelErr)
519 rx_stats->rs_status |= HAL_RXERR_MIC;
521 rx_stats->evm0=ads.AR_RxEVM0;
522 rx_stats->evm1=ads.AR_RxEVM1;
523 rx_stats->evm2=ads.AR_RxEVM2;
532 HAL_BOOL ar5416UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
534 struct ath_hal_5416 *ahp = AH5416(ah);
535 a_uint32_t txcfg, curLevel, newLevel;
539 * Disable interrupts while futzing with the fifo level.
541 omask = ar5416SetInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
543 txcfg = OS_REG_READ(ah, AR_TXCFG);
544 curLevel = MS(txcfg, AR_FTRIG);
548 if (curLevel < MAX_TX_FIFO_THRESHOLD)
550 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
552 if (newLevel != curLevel)
553 OS_REG_WRITE(ah, AR_TXCFG,
554 (txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
556 /* re-enable chip interrupts */
557 ar5416SetInterrupts(ah, omask);
559 return (newLevel != curLevel);
562 HAL_BOOL ar5416SetTxDP(struct ath_hal *ah, a_uint32_t q, a_uint32_t txdp)
564 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
565 HALASSERT(AH5416(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
568 * Make sure that TXE is deasserted before setting the TXDP. If TXE
569 * is still asserted, setting TXDP will have no effect.
571 HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
573 OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
578 HAL_BOOL ar5416StartTxDma(struct ath_hal *ah, a_uint32_t q)
580 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
581 HALASSERT(AH5416(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
583 /* Check to be sure we're not enabling a q that has its TXD bit set. */
584 HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
586 OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
591 a_uint32_t ar5416NumTxPending(struct ath_hal *ah, a_uint32_t q)
595 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
596 HALASSERT(AH5416(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
598 npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
601 * Pending frame count (PFC) can momentarily go to zero
602 * while TXE remains asserted. In other words a PFC of
603 * zero is not sufficient to say that the queue has stopped.
605 if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
609 if (npend && (AH5416(ah)->ah_txq[q].tqi_type == HAL_TX_QUEUE_CAB)) {
610 if (OS_REG_READ(ah, AR_Q_RDYTIMESHDN) & (1 << q)) {
611 isrPrintf("RTSD on CAB queue\n");
612 /* Clear the ReadyTime shutdown status bits */
613 OS_REG_WRITE(ah, AR_Q_RDYTIMESHDN, 1 << q);
620 HAL_BOOL ar5416AbortTxDma(struct ath_hal *ah)
625 * set txd on all queues
627 OS_REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
632 OS_REG_SET_BIT(ah, AR_PCU_MISC, (AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF));
633 OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
634 OS_REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
637 * wait on all tx queues
639 for (q = 0; q < AR_NUM_QCU; q++) {
640 for (i = 0; i < AR5416_ABORT_LOOPS; i++) {
641 if (!ar5416NumTxPending(ah, q))
644 OS_DELAY(AR5416_ABORT_WAIT);
646 if (i == AR5416_ABORT_LOOPS) {
652 * clear tx abort bits
654 OS_REG_CLR_BIT(ah, AR_PCU_MISC, (AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF));
655 OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
656 OS_REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
661 OS_REG_WRITE(ah, AR_Q_TXD, 0);
666 HAL_BOOL ar5416StopTxDma(struct ath_hal*ah, a_uint32_t q)
670 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
672 HALASSERT(AH5416(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
674 OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
675 for (i = 1000; i != 0; i--) {
676 if (ar5416NumTxPending(ah, q) == 0)
678 OS_DELAY(100); /* XXX get actual value */
681 OS_REG_WRITE(ah, AR_Q_TXD, 0);
685 void ar5416IntrReqTxDesc_20(struct ath_hal *ah, struct ath_desc *ds)
687 struct ar5416_desc *ads = AR5416DESC(ds);
688 ads->ds_ctl0 |= AR_TxIntrReq;
691 HAL_BOOL ar5416SetupTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *ds,
696 a_uint32_t txRate0, a_uint32_t txTries0,
700 a_uint32_t rtsctsRate,
701 a_uint32_t rtsctsDuration,
702 a_uint32_t compicvLen,
703 a_uint32_t compivLen,
706 #define RTSCTS (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
708 struct ar5416_desc *ads = AR5416DESC(ds);
712 ads->ds_txstatus9 &= ~AR_TxDone;
714 HALASSERT(txTries0 != 0);
715 HALASSERT(isValidPktType(type));
716 HALASSERT(isValidTxRate(txRate0));
717 HALASSERT((flags & RTSCTS) != RTSCTS);
722 ads->ds_ctl0 = (pktLen & AR_FrameLen)
723 | (txPower << AR_XmitPower_S)
724 | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
725 | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
726 | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
728 ads->ds_ctl1 = (type << AR_FrameType_S)
729 | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
730 ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0);
731 ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S);
733 ads->ds_ctl7 = SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel0)
734 | SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel1)
735 | SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel2)
736 | SM(AR5416_LEGACY_CHAINMASK, AR_ChainSel3);
738 if (keyIx != HAL_TXKEYIX_INVALID) {
739 /* XXX validate key index */
740 ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
741 ads->ds_ctl0 |= AR_DestIdxValid;
744 if (flags & RTSCTS) {
745 if (!isValidTxRate(rtsctsRate)) {
748 /* XXX validate rtsctsDuration */
749 ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
750 | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
751 ads->ds_ctl2 |= SM(rtsctsDuration, AR_BurstDur);
752 ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
759 HAL_BOOL ar5416FillTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *ds,
760 a_uint32_t segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
761 const struct ath_tx_desc *ds0)
763 struct ar5416_desc *ads = AR5416DESC(ds);
765 HALASSERT((segLen &~ AR_BufLen) == 0);
769 * First descriptor, don't clobber xmit control data
770 * setup by ar5416SetupTxDesc.
772 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
773 } else if (lastSeg) {
775 * Last descriptor in a multi-descriptor frame,
776 * copy the multi-rate transmit parameters from
777 * the first frame for processing on completion.
780 ads->ds_ctl1 = segLen;
781 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
782 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
785 * Intermediate descriptor in a multi-descriptor frame.
788 ads->ds_ctl1 = segLen | AR_TxMore;
792 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
797 HAL_BOOL ar5416FillKeyTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *ds,
798 HAL_KEY_TYPE keyType)
800 struct ar5416_desc *ads = AR5416DESC(ds);
802 ads->ds_ctl6 = SM(keyType, AR_EncrType);
806 HAL_STATUS ar5416ProcTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *gds)
808 struct ar5416_desc *ads = AR5416DESC(gds);
809 struct ath_tx_desc *ds = (struct ath_tx_desc *)gds;
811 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
812 return HAL_EINPROGRESS;
814 ads->ds_txstatus9 &= ~AR_TxDone;
816 /* Update software copies of the HW status */
817 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
818 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
819 ds->ds_txstat.ts_status = 0;
820 ds->ds_txstat.ts_flags = 0;
822 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
823 ds->ds_txstat.ts_status |= HAL_TXERR_XRETRY;
824 if (ads->ds_txstatus1 & AR_Filtered)
825 ds->ds_txstat.ts_status |= HAL_TXERR_FILT;
826 if (ads->ds_txstatus1 & AR_FIFOUnderrun)
827 ds->ds_txstat.ts_status |= HAL_TXERR_FIFO;
828 if (ads->ds_txstatus9 & AR_TxOpExceeded)
829 ds->ds_txstat.ts_status |= HAL_TXERR_XTXOP;
830 if (ads->ds_txstatus1 & AR_TxTimerExpired)
831 ds->ds_txstat.ts_status |= HAL_TXERR_TIMER_EXPIRED;
833 if (ads->ds_txstatus1 & AR_DescCfgErr)
834 ds->ds_txstat.ts_flags |= HAL_TX_DESC_CFG_ERR;
835 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
836 ds->ds_txstat.ts_flags |= HAL_TX_DATA_UNDERRUN;
837 ar5416UpdateTxTrigLevel(ah, AH_TRUE);
839 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
840 ds->ds_txstat.ts_flags |= HAL_TX_DELIM_UNDERRUN;
841 ar5416UpdateTxTrigLevel(ah, AH_TRUE);
843 if (ads->ds_txstatus0 & AR_TxBaStatus) {
844 ds->ds_txstat.ts_flags |= HAL_TX_BA;
845 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
846 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
850 * Extract the transmit rate used and mark the rate as
851 * ``alternate'' if it wasn't the series 0 rate.
853 ds->ds_txstat.ts_rate = MS(ads->ds_txstatus9, AR_FinalTxIdx);
854 ds->ds_txstat.ts_rssi_combined =
855 MS(ads->ds_txstatus5, AR_TxRSSICombined);
856 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
857 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
858 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
859 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
860 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
861 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
862 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
863 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
864 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
865 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
866 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
867 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
868 ds->ds_txstat.ts_antenna = 0; /* ignored for owl */
873 void ar5416Set11nTxDesc_20(struct ath_hal *ah, struct ath_tx_desc *ds,
874 a_uint32_t pktLen, HAL_PKT_TYPE type, a_uint32_t txPower,
875 a_uint32_t keyIx, HAL_KEY_TYPE keyType,
878 struct ar5416_desc *ads = AR5416DESC(ds);
880 HALASSERT(isValidPktType(type));
881 HALASSERT(isValidKeyType(keyType));
886 ads->ds_ctl0 = (pktLen & AR_FrameLen)
887 | (flags & HAL_TXDESC_VMF ? AR_VirtMoreFrag : 0)
888 | SM(txPower, AR_XmitPower)
889 | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
890 | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
891 | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
892 | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
893 | (keyIx != HAL_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
894 | (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0);
896 ads->ds_ctl1 = (keyIx != HAL_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
897 | SM(type, AR_FrameType)
898 | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
899 | (flags & HAL_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
900 | (flags & HAL_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
902 ads->ds_ctl6 = SM(keyType, AR_EncrType);
907 void ar5416Set11nRateScenario_20(struct ath_hal *ah, struct ath_tx_desc *ds,
908 a_uint32_t durUpdateEn, a_uint32_t rtsctsRate,
909 a_uint32_t rtsctsDuration,
910 HAL_11N_RATE_SERIES series[], a_uint32_t nseries,
913 struct ar5416_desc *ads = AR5416DESC(ds);
916 HALASSERT(nseries == 4);
920 * Rate control settings override
922 ds_ctl0 = ads->ds_ctl0;
924 if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
925 if (flags & HAL_TXDESC_RTSENA) {
926 ds_ctl0 &= ~AR_CTSEnable;
927 ds_ctl0 |= AR_RTSEnable;
929 ds_ctl0 &= ~AR_RTSEnable;
930 ds_ctl0 |= AR_CTSEnable;
933 ds_ctl0 = (ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
936 ads->ds_ctl0 = ds_ctl0;
938 ads->ds_ctl2 = set11nTries(series, 0)
939 | set11nTries(series, 1)
940 | set11nTries(series, 2)
941 | set11nTries(series, 3)
942 | (durUpdateEn ? AR_DurUpdateEn : 0);
944 ads->ds_ctl3 = set11nRate(series, 0)
945 | set11nRate(series, 1)
946 | set11nRate(series, 2)
947 | set11nRate(series, 3);
949 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
950 | set11nPktDurRTSCTS(series, 1);
952 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
953 | set11nPktDurRTSCTS(series, 3);
955 ads->ds_ctl7 = set11nRateFlags(series, 0)
956 | set11nRateFlags(series, 1)
957 | set11nRateFlags(series, 2)
958 | set11nRateFlags(series, 3)
959 | SM(rtsctsRate, AR_RTSCTSRate);
964 void ar5416Set11nRateScenario_20(struct ath_hal *ah, struct ath_tx_desc *ds,
965 a_uint32_t durUpdateEn, a_uint32_t rtsctsRate,
966 a_uint32_t rtsctsDuration,
967 HAL_11N_RATE_SERIES series[], a_uint32_t nseries,
970 struct ar5416_desc *ads = AR5416DESC(ds);
973 HALASSERT(nseries == 4);
977 * Rate control settings override
979 if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
980 ds_ctl0 = ads->ds_ctl0;
982 if (flags & HAL_TXDESC_RTSENA) {
983 ds_ctl0 &= ~AR_CTSEnable;
984 ds_ctl0 |= AR_RTSEnable;
986 ds_ctl0 &= ~AR_RTSEnable;
987 ds_ctl0 |= AR_CTSEnable;
990 ads->ds_ctl0 = ds_ctl0;
993 ads->ds_ctl2 = set11nTries(series, 0)
994 | set11nTries(series, 1)
995 | set11nTries(series, 2)
996 | set11nTries(series, 3)
997 | (durUpdateEn ? AR_DurUpdateEn : 0);
999 ads->ds_ctl3 = set11nRate(series, 0)
1000 | set11nRate(series, 1)
1001 | set11nRate(series, 2)
1002 | set11nRate(series, 3);
1004 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
1005 | set11nPktDurRTSCTS(series, 1);
1007 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
1008 | set11nPktDurRTSCTS(series, 3);
1010 ads->ds_ctl7 = set11nRateFlags(series, 0)
1011 | set11nRateFlags(series, 1)
1012 | set11nRateFlags(series, 2)
1013 | set11nRateFlags(series, 3)
1014 | SM(rtsctsRate, AR_RTSCTSRate);
1019 void ar5416Set11nAggrFirst_20(struct ath_hal *ah, struct ath_tx_desc *ds, a_uint32_t aggrLen,
1020 a_uint32_t numDelims)
1022 struct ar5416_desc *ads = AR5416DESC(ds);
1024 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
1026 ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
1027 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen) |
1028 SM(numDelims, AR_PadDelim);
1031 void ar5416Set11nAggrMiddle_20(struct ath_hal *ah, struct ath_tx_desc *ds, a_uint32_t numDelims)
1033 struct ar5416_desc *ads = AR5416DESC(ds);
1036 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
1039 * We use a stack variable to manipulate ctl6 to reduce uncached
1040 * read modify, modfiy, write.
1042 ctl6 = ads->ds_ctl6;
1043 ctl6 &= ~AR_PadDelim;
1044 ctl6 |= SM(numDelims, AR_PadDelim);
1045 ads->ds_ctl6 = ctl6;
1048 void ar5416Set11nAggrLast_20(struct ath_hal *ah, struct ath_tx_desc *ds)
1050 struct ar5416_desc *ads = AR5416DESC(ds);
1052 ads->ds_ctl1 |= AR_IsAggr;
1053 ads->ds_ctl1 &= ~AR_MoreAggr;
1054 ads->ds_ctl6 &= ~AR_PadDelim;
1057 void ar5416Clr11nAggr_20(struct ath_hal *ah, struct ath_tx_desc *ds)
1059 struct ar5416_desc *ads = AR5416DESC(ds);
1061 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
1064 void ar5416Set11nBurstDuration_20(struct ath_hal *ah, struct ath_tx_desc *ds,
1065 a_uint32_t burstDuration)
1067 struct ar5416_desc *ads = AR5416DESC(ds);
1069 ads->ds_ctl2 &= ~AR_BurstDur;
1070 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
1073 void ar5416Set11nVirtualMoreFrag_20(struct ath_hal *ah, struct ath_tx_desc *ds,
1076 struct ar5416_desc *ads = AR5416DESC(ds);
1079 ads->ds_ctl0 |= AR_VirtMoreFrag;
1081 ads->ds_ctl0 &= ~AR_VirtMoreFrag;