#include "if_athrate.h"
#include "if_athvar.h"
#include "ah_desc.h"
+#include "ah_internal.h"
#define ath_tgt_free_skb adf_nbuf_free
int ath_tgt_tx_add_to_aggr(struct ath_softc_tgt *sc,
struct ath_buf *bf,int datatype,
ath_atx_tid_t *tid, int is_burst);
+int ath_tgt_tx_form_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid,
+ ath_tx_bufhead *bf_q);
struct ieee80211_frame *ATH_SKB_2_WH(adf_nbuf_t skb)
{
static void ath_dma_unmap(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
{
- adf_nbuf_t skb = bf->bf_skb;
-
- skb = adf_nbuf_queue_first(&bf->bf_skbhead);
+ adf_nbuf_queue_first(&bf->bf_skbhead);
adf_nbuf_unmap( sc->sc_dev, bf->bf_dmamap, ADF_OS_DMA_TO_DEVICE);
}
if (txs == NULL)
return;
+ txs->txstatus[txs->cnt].ts_flags = 0;
+
txs->txstatus[txs->cnt].cookie = bf->bf_cookie;
txs->txstatus[txs->cnt].ts_rate = SM(bf->bf_endpt, ATH9K_HTC_TXSTAT_EPID);
{
struct ath_hal *ah = sc->sc_ah;
struct ath_txq *txq;
- HAL_STATUS status;
volatile a_int32_t txe_val;
adf_os_assert(bf);
txq = bf->bf_txq;
- status = ah->ah_procTxDesc(ah, bf->bf_lastds);
+ ah->ah_procTxDesc(ah, bf->bf_lastds);
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
} else {
*txq->axq_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
- txe_val = OS_REG_READ(ah, 0x840);
+ txe_val = ioread32_mac(0x0840);
if (!(txe_val & (1<< txq->axq_qnum)))
ah->ah_setTxDP(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
}
for (bfd = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; bfd++, i++) {
ah->ah_clr11nAggr(bfd);
ah->ah_set11nBurstDuration(bfd, 0);
- ah->ah_set11nVirtualMoreFrag(ah, bfd, 0);
+ ah->ah_set11nVirtualMoreFrag(bfd, 0);
}
ath_dma_unmap(sc, bf);
ath_update_stats(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
{
struct ath_tx_desc *ds = bf->bf_desc;
+ struct ieee80211_frame *wh = ATH_SKB2_WH(bf->bf_skb);
u_int32_t sr, lr;
if (ds->ds_txstat.ts_status == 0) {
if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE)
sc->sc_tx_stats.ast_tx_altrate++;
} else {
- if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY)
+ if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY &&
+ !IEEE80211_IS_MULTICAST(wh->i_addr1))
sc->sc_tx_stats.ast_tx_xretries++;
if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO)
sc->sc_tx_stats.ast_tx_fifoerr++;
struct ath_tx_desc lastds;
struct ath_tx_desc *ds = &lastds;
struct ath_rc_series rcs[4];
- u_int16_t seq_st;
- u_int32_t *ba;
- int ba_index;
int nbad = 0;
int nframes = bf->bf_nframes;
struct ath_tx_buf *bf_next;
- int tx_ok = 1;
adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
- seq_st = ATH_DS_BA_SEQ(ds);
- ba = ATH_DS_BA_BITMAP(ds);
- tx_ok = (ATH_DS_TX_STATUS(ds) == HAL_OK);
-
if (!bf->bf_isaggr) {
ath_update_stats(sc, bf);
}
while (bf) {
- ba_index = ATH_BA_INDEX(seq_st, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
bf_next = bf->bf_next;
ath_tx_status_update_aggr(sc, bf, ds, rcs, 0);
for(ds = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; ds++, i++) {
ah->ah_clr11nAggr(ds);
ah->ah_set11nBurstDuration(ds, 0);
- ah->ah_set11nVirtualMoreFrag(ah, ds, 0);
+ ah->ah_set11nVirtualMoreFrag(ds, 0);
}
if (bf->bf_retries >= OWLMAX_RETRIES) {
{
struct ath_tx_buf *bf;
struct ath_tx_buf *bf_next;
- struct ath_txq *txq;
-
- txq = TID_TO_ACTXQ(tid->tidno);
bf = asf_tailq_first(&tid->buf_q);
struct ath_tx_desc *ds = bf->bf_lastds;
struct ath_node_target *an;
ath_atx_tid_t *tid;
- struct ath_txq *txq;
an = (struct ath_node_target *)bf->bf_node;
tid = &an->tid[bf->bf_tidno];
- txq = TID_TO_ACTXQ(tid->tidno);
if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
ath_bar_retry(sc, bf);
struct ath_hal *ah = sc->sc_ah;
HAL_11N_RATE_SERIES series[4];
int i = 0;
- adf_nbuf_queue_t skbhead;
a_uint8_t *anbdata;
a_uint32_t anblen;
| HAL_TXDESC_CLRDMASK
, 0, 0);
- skbhead = bf->bf_skbhead;
bf->bf_isaggr = 0;
bf->bf_next = NULL;