2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted (subject to the limitations in the
7 * disclaimer below) provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
17 * * Neither the name of Qualcomm Atheros nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
22 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
23 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
33 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <adf_os_types.h>
37 #include <adf_os_pci.h>
38 #include <adf_os_dma.h>
39 #include <adf_os_timer.h>
40 #include <adf_os_lock.h>
41 #include <adf_os_io.h>
42 #include <adf_os_mem.h>
43 #include <adf_os_util.h>
44 #include <adf_os_stdtypes.h>
45 #include <adf_os_defer.h>
46 #include <adf_os_atomic.h>
49 #include <adf_net_wcmd.h>
50 #include <adf_os_irq.h>
52 #include <if_ath_pci.h>
54 #include "ieee80211_var.h"
55 #include "if_athrate.h"
56 #include "if_athvar.h"
60 static a_int32_t ath_numrxbufs = -1;
61 static a_int32_t ath_numrxdescs = -1;
63 #if defined(PROJECT_MAGPIE)
64 uint32_t *init_htc_handle = 0;
67 #define RX_ENDPOINT_ID 3
68 #define ATH_CABQ_HANDLING_THRESHOLD 9000
72 void owl_tgt_tx_tasklet(TQUEUE_ARG data);
73 static void ath_tgt_send_beacon(struct ath_softc_tgt *sc,adf_nbuf_t bc_hdr,adf_nbuf_t nbuf,HTC_ENDPOINT_ID EndPt);
74 static void ath_hal_reg_write_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen);
75 static void ath_hal_reg_rmw_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen);
76 extern struct ath_tx_buf* ath_tgt_tx_prepare(struct ath_softc_tgt *sc, adf_nbuf_t skb, ath_data_hdr_t *dh);
77 extern void ath_tgt_send_mgt(struct ath_softc_tgt *sc,adf_nbuf_t mgt_hdr, adf_nbuf_t skb,HTC_ENDPOINT_ID EndPt);
78 extern HAL_BOOL ath_hal_wait(struct ath_hal *ah, a_uint32_t reg, a_uint32_t mask, a_uint32_t val);
79 extern void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq, owl_txq_state_t txqstate);
80 void owl_tgt_node_init(struct ath_node_target * an);
81 void ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, struct ath_buf *bf);
82 void ath_tgt_tx_sched_nonaggr(struct ath_softc_tgt *sc,struct ath_buf * bf_host);
85 * Extend a 32 bit TSF to nearest 64 bit TSF value.
86 * When the adapter is a STATION, its local TSF is periodically modified by
87 * the hardware to match the BSS TSF (as received in beacon packets), and
88 * rstamp may appear to be from the future or from the past (with reference
89 * to the current local TSF) because of jitter. This is mostly noticable in
90 * highly congested channels. The code uses signed modulo arithmetic to
91 * handle both past/future cases and signed-extension to avoid branches.
93 * extend(0x0000001200000004, 0x00000006) == 0x0000001200000006
94 * extend(0x0000001200000004, 0x00000002) == 0x0000001200000002
95 * extend(0x0000001200000004, 0xfffffffe) == 0x00000011fffffffe ! tsfhigh--
96 * extend(0x00000012fffffffc, 0xfffffffe) == 0x00000012fffffffe
97 * extend(0x00000012fffffffc, 0xfffffffa) == 0x00000012fffffffa
98 * extend(0x00000012fffffffc, 0x00000002) == 0x0000001300000002 ! tsfhigh++
100 static u_int64_t ath_extend_tsf(struct ath_softc_tgt *sc, u_int32_t rstamp)
102 struct ath_hal *ah = sc->sc_ah;
105 a_int64_t tsf_delta; /* signed int64 */
107 tsf = ah->ah_getTsf64(ah);
108 tsf_low = tsf & 0xffffffff;
110 tsf_delta = (a_int32_t)rstamp - (a_int64_t)tsf_low;
112 return (tsf + (u_int64_t)tsf_delta);
115 static a_int32_t ath_rate_setup(struct ath_softc_tgt *sc, a_uint32_t mode)
117 struct ath_hal *ah = sc->sc_ah;
118 const HAL_RATE_TABLE *rt;
121 case IEEE80211_MODE_11NA:
122 sc->sc_rates[mode] = ah->ah_getRateTable(ah, HAL_MODE_11NA);
124 case IEEE80211_MODE_11NG:
125 sc->sc_rates[mode] = ah->ah_getRateTable(ah, HAL_MODE_11NG);
130 rt = sc->sc_rates[mode];
137 static void ath_setcurmode(struct ath_softc_tgt *sc,
138 enum ieee80211_phymode mode)
140 const HAL_RATE_TABLE *rt;
143 adf_os_mem_set(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
145 rt = sc->sc_rates[mode];
146 adf_os_assert(rt != NULL);
148 for (i = 0; i < rt->rateCount; i++) {
149 sc->sc_rixmap[rt->info[i].rateCode] = i;
152 sc->sc_currates = rt;
153 sc->sc_curmode = mode;
154 sc->sc_protrix = ((mode == IEEE80211_MODE_11NG) ? 3 : 0);
158 void wmi_event(wmi_handle_t handle, WMI_EVENT_ID evt_id,
159 void *buffer, a_int32_t Length)
161 adf_nbuf_t netbuf = ADF_NBUF_NULL;
164 netbuf = WMI_AllocEvent(handle, WMI_EVT_CLASS_CMD_EVENT,
165 sizeof(WMI_CMD_HDR) + Length);
167 if (netbuf == ADF_NBUF_NULL) {
168 adf_os_print("Buf null\n");
172 if (buffer != NULL && Length != 0 && Length < WMI_SVC_MAX_BUFFERED_EVENT_SIZE) {
173 pData = adf_nbuf_put_tail(netbuf, Length);
174 adf_os_mem_copy(pData, buffer, Length);
177 WMI_SendEvent(handle, netbuf, evt_id, 0, Length);
180 void wmi_cmd_rsp(void *pContext, WMI_COMMAND_ID cmd_id, A_UINT16 SeqNo,
181 void *buffer, a_int32_t Length)
183 adf_nbuf_t netbuf = ADF_NBUF_NULL;
186 netbuf = WMI_AllocEvent(pContext, WMI_EVT_CLASS_CMD_REPLY,
187 sizeof(WMI_CMD_HDR) + Length);
189 if (netbuf == ADF_NBUF_NULL) {
194 if (Length != 0 && buffer != NULL) {
195 pData = (A_UINT8 *)adf_nbuf_put_tail(netbuf, Length);
196 adf_os_mem_copy(pData, buffer, Length);
199 WMI_SendEvent(pContext, netbuf, cmd_id, SeqNo, Length);
202 static void ath_node_vdelete_tgt(struct ath_softc_tgt *sc, a_uint8_t vap_index)
206 for (i = 0; i < TARGET_NODE_MAX; i++) {
207 if(sc->sc_sta[i].ni.ni_vapindex == vap_index)
208 sc->sc_sta[i].an_valid = 0;
212 a_uint8_t ath_get_minrateidx(struct ath_softc_tgt *sc, struct ath_vap_target *avp)
214 if (sc->sc_curmode == IEEE80211_MODE_11NG)
215 return avp->av_minrateidx[0];
216 else if (sc->sc_curmode == IEEE80211_MODE_11NA)
217 return avp->av_minrateidx[1];
226 static adf_nbuf_t ath_alloc_skb_align(struct ath_softc_tgt *sc,
227 a_uint32_t size, a_uint32_t align)
231 skb = BUF_Pool_alloc_buf_align(sc->pool_handle, POOL_ID_WLAN_RX_BUF,
232 RX_HEADER_SPACE, align);
236 static a_int32_t ath_rxdesc_init(struct ath_softc_tgt *sc, struct ath_rx_desc *ds)
238 struct ath_hal *ah = sc->sc_ah;
239 struct ath_rx_desc *ds_held;
243 if (!sc->sc_rxdesc_held) {
244 sc->sc_rxdesc_held = ds;
248 ds_held = sc->sc_rxdesc_held;
249 sc->sc_rxdesc_held = ds;
252 if (ds->ds_nbuf == ADF_NBUF_NULL) {
253 ds->ds_nbuf = ath_alloc_skb_align(sc, sc->sc_rxbufsize, sc->sc_cachelsz);
254 if (ds->ds_nbuf == ADF_NBUF_NULL) {
255 sc->sc_rxdesc_held = ds;
256 sc->sc_rx_stats.ast_rx_nobuf++;
259 adf_nbuf_map(sc->sc_dev, ds->ds_dmap, ds->ds_nbuf, ADF_OS_DMA_FROM_DEVICE);
260 adf_nbuf_dmamap_info(ds->ds_dmap, &ds->ds_dmap_info);
261 ds->ds_data = ds->ds_dmap_info.dma_segs[0].paddr;
265 adf_nbuf_peek_header(ds->ds_nbuf, &anbdata, &anblen);
267 ah->ah_setupRxDesc(ds, adf_nbuf_tailroom(ds->ds_nbuf), 0);
269 if (sc->sc_rxlink == NULL) {
270 ah->ah_setRxDP(ah, ds->ds_daddr);
273 *sc->sc_rxlink = ds->ds_daddr;
275 sc->sc_rxlink = &ds->ds_link;
276 ah->ah_enableReceive(ah);
281 static void ath_rx_complete(struct ath_softc_tgt *sc, adf_nbuf_t buf)
283 struct ath_rx_desc *ds;
285 adf_nbuf_queue_t nbuf_head;
287 adf_nbuf_split_to_frag(buf, &nbuf_head);
288 ds = asf_tailq_first(&sc->sc_rxdesc_idle);
291 struct ath_rx_desc *ds_tmp;
292 buf_tmp = adf_nbuf_queue_remove(&nbuf_head);
294 if (buf_tmp == NULL) {
298 BUF_Pool_free_buf(sc->pool_handle, POOL_ID_WLAN_RX_BUF, buf_tmp);
301 ds = asf_tailq_next(ds, ds_list);
303 ath_rxdesc_init(sc, ds_tmp);
305 asf_tailq_remove(&sc->sc_rxdesc_idle, ds_tmp, ds_list);
306 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_tmp, ds_list);
310 static void tgt_HTCSendCompleteHandler(HTC_ENDPOINT_ID Endpt, adf_nbuf_t buf, void *ServiceCtx)
312 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
314 if (Endpt == RX_ENDPOINT_ID) {
315 sc->sc_rx_stats.ast_rx_done++;
316 ath_rx_complete(sc, buf);
320 static void ath_uapsd_processtriggers(struct ath_softc_tgt *sc)
322 struct ath_hal *ah = sc->sc_ah;
323 struct ath_rx_buf *bf = NULL;
324 struct ath_rx_desc *ds, *ds_head, *ds_tail, *ds_tmp;
327 a_uint16_t frame_len = 0;
330 #define PA2DESC(_sc, _pa) \
331 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
332 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
334 tsf = ah->ah_getTsf64(ah);
335 bf = asf_tailq_first(&sc->sc_rxbuf);
337 ds = asf_tailq_first(&sc->sc_rxdesc);
343 if (cnt == ath_numrxbufs - 1) {
344 adf_os_print("VERY LONG PACKET!!!!!\n");
348 struct ath_rx_desc *ds_rmv;
349 adf_nbuf_unmap(sc->sc_dev, ds_tmp->ds_dmap, ADF_OS_DMA_FROM_DEVICE);
351 ds_tmp = asf_tailq_next(ds_tmp, ds_list);
353 if (ds_tmp == NULL) {
354 adf_os_print("ds_tmp is NULL\n");
358 BUF_Pool_free_buf(sc->pool_handle, POOL_ID_WLAN_RX_BUF, ds_rmv->ds_nbuf);
359 ds_rmv->ds_nbuf = ADF_NBUF_NULL;
361 if (ath_rxdesc_init(sc, ds_rmv) == 0) {
362 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
363 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_rmv, ds_list);
366 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
367 asf_tailq_insert_tail(&sc->sc_rxdesc_idle, ds_rmv, ds_list);
370 if (ds_rmv == ds_tail) {
377 if (ds->ds_link == 0) {
381 if (bf->bf_status & ATH_BUFSTATUS_DONE) {
385 retval = ah->ah_procRxDescFast(ah, ds, ds->ds_daddr,
386 PA2DESC(sc, ds->ds_link), &bf->bf_rx_status);
387 if (HAL_EINPROGRESS == retval) {
391 if (adf_nbuf_len(ds->ds_nbuf) == 0) {
392 adf_nbuf_put_tail(ds->ds_nbuf, bf->bf_rx_status.rs_datalen);
395 frame_len += bf->bf_rx_status.rs_datalen;
397 if (bf->bf_rx_status.rs_more == 0) {
398 adf_nbuf_queue_t nbuf_head;
399 adf_nbuf_queue_init(&nbuf_head);
404 ds = asf_tailq_next(ds, ds_list);
407 ds_head = asf_tailq_next(ds_tail, ds_list);
410 struct ath_rx_desc *ds_rmv;
412 adf_nbuf_unmap(sc->sc_dev, ds_tmp->ds_dmap, ADF_OS_DMA_FROM_DEVICE);
413 adf_nbuf_queue_add(&nbuf_head, ds_tmp->ds_nbuf);
414 ds_tmp->ds_nbuf = ADF_NBUF_NULL;
417 ds_tmp = asf_tailq_next(ds_tmp, ds_list);
418 if (ds_tmp == NULL) {
422 if (ath_rxdesc_init(sc, ds_rmv) == 0) {
423 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
424 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_rmv, ds_list);
426 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
427 asf_tailq_insert_tail(&sc->sc_rxdesc_idle, ds_rmv, ds_list);
430 if (ds_rmv == ds_tail) {
436 bf->bf_rx_status.rs_datalen = frame_len;
439 bf->bf_skb = adf_nbuf_create_frm_frag(&nbuf_head);
441 bf->bf_status |= ATH_BUFSTATUS_DONE;
443 bf = (struct ath_rx_buf *)asf_tailq_next(bf, bf_list);
446 ds = asf_tailq_next(ds, ds_list);
453 static a_int32_t ath_startrecv(struct ath_softc_tgt *sc)
455 struct ath_hal *ah = sc->sc_ah;
456 struct ath_rx_desc *ds;
458 sc->sc_rxbufsize = 1024+512+128;
459 sc->sc_rxlink = NULL;
461 sc->sc_rxdesc_held = NULL;
463 asf_tailq_foreach(ds, &sc->sc_rxdesc, ds_list) {
464 a_int32_t error = ath_rxdesc_init(sc, ds);
470 ds = asf_tailq_first(&sc->sc_rxdesc);
471 ah->ah_setRxDP(ah, ds->ds_daddr);
476 static void ath_tgt_rx_tasklet(TQUEUE_ARG data)
478 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
479 struct ath_rx_buf *bf = NULL;
480 struct ath_hal *ah = sc->sc_ah;
481 struct rx_frame_header *rxhdr;
482 struct ath_rx_status *rxstats;
483 adf_nbuf_t skb = ADF_NBUF_NULL;
486 bf = asf_tailq_first(&sc->sc_rxbuf);
491 if (!(bf->bf_status & ATH_BUFSTATUS_DONE)) {
500 asf_tailq_remove(&sc->sc_rxbuf, bf, bf_list);
504 rxhdr = (struct rx_frame_header *)adf_nbuf_push_head(skb,
505 sizeof(struct rx_frame_header));
506 rxstats = (struct ath_rx_status *)(&rxhdr->rx_stats[0]);
507 adf_os_mem_copy(rxstats, &(bf->bf_rx_status),
508 sizeof(struct ath_rx_status));
510 rxstats->rs_tstamp = ath_extend_tsf(sc, (u_int32_t)rxstats->rs_tstamp);
512 HTC_SendMsg(sc->tgt_htc_handle, RX_ENDPOINT_ID, skb);
513 sc->sc_rx_stats.ast_rx_send++;
515 bf->bf_status &= ~ATH_BUFSTATUS_DONE;
516 asf_tailq_insert_tail(&sc->sc_rxbuf, bf, bf_list);
520 sc->sc_imask |= HAL_INT_RX;
521 ah->ah_setInterrupts(ah, sc->sc_imask);
524 /*******************/
525 /* Beacon Handling */
526 /*******************/
529 * Setup the beacon frame for transmit.
530 * FIXME: Short Preamble.
532 static void ath_beacon_setup(struct ath_softc_tgt *sc,
533 struct ath_tx_buf *bf,
534 struct ath_vap_target *avp)
536 adf_nbuf_t skb = bf->bf_skb;
537 struct ath_hal *ah = sc->sc_ah;
538 struct ath_tx_desc *ds;
540 const HAL_RATE_TABLE *rt;
542 HAL_11N_RATE_SERIES series[4] = {{ 0 }};
544 flags = HAL_TXDESC_NOACK;
548 ds->ds_data = bf->bf_dmamap_info.dma_segs[0].paddr;
550 rix = ath_get_minrateidx(sc, avp);
551 rt = sc->sc_currates;
552 rate = rt->info[rix].rateCode;
554 ah->ah_setupTxDesc(ds
555 , adf_nbuf_len(skb) + IEEE80211_CRC_LEN
556 , sizeof(struct ieee80211_frame)
557 , HAL_PKT_TYPE_BEACON
560 , HAL_TXKEYIX_INVALID
566 , asf_roundup(adf_nbuf_len(skb), 4)
572 series[0].Rate = rate;
573 series[0].ChSel = sc->sc_ic.ic_tx_chainmask;
574 series[0].RateFlags = 0;
575 ah->ah_set11nRateScenario(ds, 0, 0, series, 4, 0);
578 static void ath_tgt_send_beacon(struct ath_softc_tgt *sc, adf_nbuf_t bc_hdr,
579 adf_nbuf_t nbuf, HTC_ENDPOINT_ID EndPt)
581 struct ath_hal *ah = sc->sc_ah;
582 struct ath_tx_buf *bf;
583 a_uint8_t vap_index, *anbdata;
584 ath_beacon_hdr_t *bhdr;
585 struct ieee80211vap_target *vap;
587 struct ieee80211_frame *wh;
590 adf_nbuf_peek_header(nbuf, &anbdata, &anblen);
591 bhdr = (ath_beacon_hdr_t *)anbdata;
593 adf_os_print("found bc_hdr! 0x%x\n", bc_hdr);
596 vap_index = bhdr->vap_index;
597 adf_os_assert(vap_index < TARGET_VAP_MAX);
598 vap = &sc->sc_vap[vap_index].av_vap;
600 wh = (struct ieee80211_frame *)adf_nbuf_pull_head(nbuf,
601 sizeof(ath_beacon_hdr_t));
603 bf = sc->sc_vap[vap_index].av_bcbuf;
605 bf->bf_endpt = EndPt;
608 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap, ADF_OS_DMA_TO_DEVICE);
609 adf_nbuf_push_head(bf->bf_skb, sizeof(ath_beacon_hdr_t));
610 ath_free_tx_skb(sc->tgt_htc_handle, bf->bf_endpt, bf->bf_skb);
615 adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, nbuf, ADF_OS_DMA_TO_DEVICE);
616 adf_nbuf_dmamap_info(bf->bf_dmamap,&bf->bf_dmamap_info);
618 ath_beacon_setup(sc, bf, &sc->sc_vap[vap_index]);
619 ah->ah_stopTxDma(ah, sc->sc_bhalq);
620 ah->ah_setTxDP(ah, sc->sc_bhalq, ATH_BUF_GET_DESC_PHY_ADDR(bf));
621 ah->ah_startTxDma(ah, sc->sc_bhalq);
628 static void ath_tx_stopdma(struct ath_softc_tgt *sc, struct ath_txq *txq)
630 struct ath_hal *ah = sc->sc_ah;
632 ah->ah_stopTxDma(ah, txq->axq_qnum);
635 static void owltgt_txq_drain(struct ath_softc_tgt *sc, struct ath_txq *txq)
637 owltgt_tx_processq(sc, txq, OWL_TXQ_STOPPED);
640 static void ath_tx_draintxq(struct ath_softc_tgt *sc, struct ath_txq *txq)
642 owltgt_txq_drain(sc, txq);
645 static void ath_draintxq(struct ath_softc_tgt *sc, HAL_BOOL drain_softq)
647 struct ath_hal *ah = sc->sc_ah;
649 struct ath_txq *txq = NULL;
650 struct ath_atx_tid *tid = NULL;
652 ath_tx_status_clear(sc);
653 sc->sc_tx_draining = 1;
655 ah->ah_stopTxDma(ah, sc->sc_bhalq);
657 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
658 if (ATH_TXQ_SETUP(sc, i))
659 ath_tx_stopdma(sc, ATH_TXQ(sc, i));
661 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
662 if (ATH_TXQ_SETUP(sc, i)) {
663 owltgt_tx_processq(sc, ATH_TXQ(sc,i), OWL_TXQ_STOPPED);
666 while (!asf_tailq_empty(&txq->axq_tidq)){
667 TAILQ_DEQ(&txq->axq_tidq, tid, tid_qelem);
670 tid->sched = AH_FALSE;
671 ath_tgt_tid_drain(sc,tid);
675 sc->sc_tx_draining = 0;
678 static void ath_tgt_txq_setup(struct ath_softc_tgt *sc)
685 for (qnum=0;qnum<HAL_NUM_TX_QUEUES;qnum++) {
686 txq= &sc->sc_txq[qnum];
687 txq->axq_qnum = qnum;
688 txq->axq_link = NULL;
689 asf_tailq_init(&txq->axq_q);
691 txq->axq_linkbuf = NULL;
692 asf_tailq_init(&txq->axq_tidq);
693 sc->sc_txqsetup |= 1<<qnum;
696 sc->sc_uapsdq = &sc->sc_txq[UAPSDQ_NUM];
697 sc->sc_cabq = &sc->sc_txq[CABQ_NUM];
699 sc->sc_ac2q[WME_AC_BE] = &sc->sc_txq[0];
700 sc->sc_ac2q[WME_AC_BK] = &sc->sc_txq[1];
701 sc->sc_ac2q[WME_AC_VI] = &sc->sc_txq[2];
702 sc->sc_ac2q[WME_AC_VO] = &sc->sc_txq[3];
708 static void tgt_HTCRecv_beaconhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
709 adf_nbuf_t buf, void *ServiceCtx)
711 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
713 ath_tgt_send_beacon(sc, hdr_buf, buf, EndPt);
716 static void tgt_HTCRecv_uapsdhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
717 adf_nbuf_t buf, void *ServiceCtx)
721 static void tgt_HTCRecv_mgmthandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
722 adf_nbuf_t buf, void *ServiceCtx)
724 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
726 ath_tgt_send_mgt(sc,hdr_buf,buf,EndPt);
729 static void tgt_HTCRecvMessageHandler(HTC_ENDPOINT_ID EndPt,
730 adf_nbuf_t hdr_buf, adf_nbuf_t buf,
733 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
734 struct ath_tx_buf *bf;
738 struct ath_node_target *an;
739 struct ath_atx_tid *tid;
742 adf_nbuf_peek_header(buf, &data, &len);
743 adf_nbuf_pull_head(buf, sizeof(ath_data_hdr_t));
745 adf_nbuf_peek_header(hdr_buf, &data, &len);
748 adf_os_assert(len >= sizeof(ath_data_hdr_t));
749 dh = (ath_data_hdr_t *)data;
751 an = &sc->sc_sta[dh->ni_index];
752 tid = ATH_AN_2_TID(an, dh->tidno);
754 sc->sc_tx_stats.tx_tgt++;
756 bf = ath_tgt_tx_prepare(sc, buf, dh);
758 ath_free_tx_skb(sc->tgt_htc_handle,EndPt,buf);
762 bf->bf_endpt = EndPt;
763 bf->bf_cookie = dh->cookie;
765 if (tid->flag & TID_AGGR_ENABLED)
766 ath_tgt_handle_aggr(sc, bf);
768 ath_tgt_handle_normal(sc, bf);
771 static void tgt_HTCRecv_cabhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
772 adf_nbuf_t buf, void *ServiceCtx)
774 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
775 struct ath_hal *ah = sc->sc_ah;
779 #ifdef ATH_ENABLE_CABQ
780 tsf = ah->ah_getTsf64(ah);
781 tmp = tsf - sc->sc_swba_tsf;
783 if ( tmp > ATH_CABQ_HANDLING_THRESHOLD ) {
784 HTC_ReturnBuffers(sc->tgt_htc_handle, EndPt, buf);
788 tgt_HTCRecvMessageHandler(EndPt, hdr_buf, buf, ServiceCtx);
792 /***********************/
793 /* Descriptor Handling */
794 /***********************/
796 static a_int32_t ath_descdma_setup(struct ath_softc_tgt *sc,
797 struct ath_descdma *dd, ath_bufhead *head,
798 const char *name, a_int32_t nbuf, a_int32_t ndesc,
799 a_uint32_t bfSize, a_uint32_t descSize)
801 #define DS2PHYS(_dd, _ds) \
802 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
806 a_int32_t i, bsize, error;
811 dd->dd_desc_len = descSize * nbuf * ndesc;
813 dd->dd_desc = adf_os_dmamem_alloc(sc->sc_dev,
814 dd->dd_desc_len, 1, &dd->dd_desc_dmamap);
815 dd->dd_desc_paddr = adf_os_dmamem_map2addr(dd->dd_desc_dmamap);
816 if (dd->dd_desc == NULL) {
822 bsize = bfSize * nbuf;
823 bf = adf_os_mem_alloc(bsize);
828 adf_os_mem_set(bf, 0, bsize);
831 bf_addr = (a_uint8_t *)bf;
832 ds_addr = (a_uint8_t *)ds;
834 asf_tailq_init(head);
836 for (i = 0; i < nbuf; i++) {
839 if (adf_nbuf_dmamap_create( sc->sc_dev, &bf->bf_dmamap) != A_STATUS_OK) {
843 bf->bf_desc = bf->bf_descarr = bf->bf_lastds = ds;
844 for (j = 0; j < ndesc; j++)
845 ATH_BUF_SET_DESC_PHY_ADDR_WITH_IDX(bf, j, (ds_addr + (j*descSize)));
847 ATH_BUF_SET_DESC_PHY_ADDR(bf, ATH_BUF_GET_DESC_PHY_ADDR_WITH_IDX(bf, 0));
849 adf_nbuf_queue_init(&bf->bf_skbhead);
850 asf_tailq_insert_tail(head, bf, bf_list);
853 ds_addr += (ndesc * descSize);
854 bf = (struct ath_buf *)bf_addr;
855 ds = (struct ath_desc *)ds_addr;
860 adf_os_dmamem_free(sc->sc_dev, dd->dd_desc_len,
861 1, dd->dd_desc, dd->dd_desc_dmamap);
863 adf_os_mem_set(dd, 0, sizeof(*dd));
870 static void ath_descdma_cleanup(struct ath_softc_tgt *sc,
871 struct ath_descdma *dd,
872 ath_bufhead *head, a_int32_t dir)
875 struct ieee80211_node_target *ni;
877 asf_tailq_foreach(bf, head, bf_list) {
878 if (adf_nbuf_queue_len(&bf->bf_skbhead) != 0) {
879 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap, dir);
880 while(adf_nbuf_queue_len(&bf->bf_skbhead) != 0) {
882 adf_nbuf_queue_remove(&bf->bf_skbhead));
885 } else if (bf->bf_skb != NULL) {
886 adf_nbuf_unmap(sc->sc_dev,bf->bf_dmamap, dir);
887 ath_free_rx_skb(sc, bf->bf_skb);
891 adf_nbuf_dmamap_destroy(sc->sc_dev, bf->bf_dmamap);
897 adf_os_dmamem_free(sc->sc_dev, dd->dd_desc_len,
898 1, dd->dd_desc, dd->dd_desc_dmamap);
900 asf_tailq_init(head);
901 adf_os_mem_free(dd->dd_bufptr);
902 adf_os_mem_set(dd, 0, sizeof(*dd));
905 static a_int32_t ath_desc_alloc(struct ath_softc_tgt *sc)
907 #define DS2PHYS(_dd, _ds) \
908 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
911 struct ath_tx_buf *bf;
913 if(ath_numrxbufs == -1)
914 ath_numrxbufs = ATH_RXBUF;
916 if (ath_numrxdescs == -1)
917 ath_numrxdescs = ATH_RXDESC;
919 error = ath_descdma_setup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
920 "rx", ath_numrxdescs, 1,
921 sizeof(struct ath_rx_buf),
922 sizeof(struct ath_rx_desc));
927 struct ath_descdma *dd = &sc->sc_rxdma;
928 struct ath_rx_desc *ds = (struct ath_rx_desc *)dd->dd_desc;
929 struct ath_rx_desc *ds_prev = NULL;
931 asf_tailq_init(&sc->sc_rxdesc);
932 asf_tailq_init(&sc->sc_rxdesc_idle);
934 for (i = 0; i < ath_numrxdescs; i++, ds++) {
936 if (ds->ds_nbuf != ADF_NBUF_NULL) {
937 ds->ds_nbuf = ADF_NBUF_NULL;
940 if (adf_nbuf_dmamap_create(sc->sc_dev, &ds->ds_dmap) != A_STATUS_OK) {
944 ds->ds_daddr = DS2PHYS(&sc->sc_rxdma, ds);
947 ds_prev->ds_link = ds->ds_daddr;
953 asf_tailq_insert_tail(&sc->sc_rxdesc, ds, ds_list);
956 error = ath_descdma_setup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
957 "tx", ATH_TXBUF + 1, ATH_TXDESC,
958 sizeof(struct ath_tx_buf),
959 sizeof(struct ath_tx_desc));
961 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
962 ADF_OS_DMA_FROM_DEVICE);
966 error = ath_descdma_setup(sc, &sc->sc_bdma, (ath_bufhead *)&sc->sc_bbuf,
967 "beacon", ATH_BCBUF, 1,
968 sizeof(struct ath_tx_buf),
969 sizeof(struct ath_tx_desc));
971 ath_descdma_cleanup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
972 ADF_OS_DMA_TO_DEVICE);
973 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
974 ADF_OS_DMA_FROM_DEVICE);
978 bf = asf_tailq_first(&sc->sc_txbuf);
979 bf->bf_isaggr = bf->bf_isretried = bf->bf_retries = 0;
980 asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
982 sc->sc_txbuf_held = bf;
989 static void ath_desc_free(struct ath_softc_tgt *sc)
991 asf_tailq_insert_tail(&sc->sc_txbuf, sc->sc_txbuf_held, bf_list);
993 sc->sc_txbuf_held = NULL;
995 if (sc->sc_txdma.dd_desc_len != 0)
996 ath_descdma_cleanup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
997 ADF_OS_DMA_TO_DEVICE);
998 if (sc->sc_rxdma.dd_desc_len != 0)
999 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
1000 ADF_OS_DMA_FROM_DEVICE);
1003 /**********************/
1004 /* Interrupt Handling */
1005 /**********************/
1007 adf_os_irq_resp_t ath_intr(adf_drv_handle_t hdl)
1009 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)hdl;
1010 struct ath_hal *ah = sc->sc_ah;
1014 return ADF_OS_IRQ_NONE;
1016 if (!ah->ah_isInterruptPending(ah))
1017 return ADF_OS_IRQ_NONE;
1019 ah->ah_getPendingInterrupts(ah, &status);
1021 status &= sc->sc_imask;
1023 if (status & HAL_INT_FATAL) {
1024 ah->ah_setInterrupts(ah, 0);
1025 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_fataltq);
1027 if (status & HAL_INT_SWBA) {
1028 WMI_SWBA_EVENT swbaEvt;
1029 struct ath_txq *txq = ATH_TXQ(sc, 8);
1031 swbaEvt.tsf = ah->ah_getTsf64(ah);
1032 swbaEvt.beaconPendingCount = ah->ah_numTxPending(ah, sc->sc_bhalq);
1033 sc->sc_swba_tsf = ah->ah_getTsf64(ah);
1035 wmi_event(sc->tgt_wmi_handle,
1038 sizeof(WMI_SWBA_EVENT));
1040 ath_tx_draintxq(sc, txq);
1043 if (status & HAL_INT_RXORN)
1044 sc->sc_int_stats.ast_rxorn++;
1046 if (status & HAL_INT_RXEOL)
1047 sc->sc_int_stats.ast_rxeol++;
1049 if (status & (HAL_INT_RX | HAL_INT_RXEOL | HAL_INT_RXORN)) {
1050 if (status & HAL_INT_RX)
1051 sc->sc_int_stats.ast_rx++;
1053 ath_uapsd_processtriggers(sc);
1055 sc->sc_imask &= ~HAL_INT_RX;
1056 ah->ah_setInterrupts(ah, sc->sc_imask);
1058 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_rxtq);
1061 if (status & HAL_INT_TXURN) {
1062 sc->sc_int_stats.ast_txurn++;
1063 ah->ah_updateTxTrigLevel(ah, AH_TRUE);
1066 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_txtq);
1068 if (status & HAL_INT_BMISS) {
1069 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_bmisstq);
1072 if (status & HAL_INT_GTT)
1073 sc->sc_int_stats.ast_txto++;
1075 if (status & HAL_INT_CST)
1076 sc->sc_int_stats.ast_cst++;
1079 return ADF_OS_IRQ_HANDLED;
1082 static void ath_fatal_tasklet(TQUEUE_ARG data )
1084 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
1086 wmi_event(sc->tgt_wmi_handle, WMI_FATAL_EVENTID, NULL, 0);
1089 static void ath_bmiss_tasklet(TQUEUE_ARG data)
1091 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
1093 wmi_event(sc->tgt_wmi_handle, WMI_BMISS_EVENTID, NULL, 0);
1100 static void ath_enable_intr_tgt(void *Context, A_UINT16 Command,
1101 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1103 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1104 struct ath_hal *ah = sc->sc_ah;
1108 intr = (*(a_uint32_t *)data);
1110 intr = adf_os_ntohl(intr);
1112 if (intr & HAL_INT_SWBA) {
1113 sc->sc_imask |= HAL_INT_SWBA;
1115 sc->sc_imask &= ~HAL_INT_SWBA;
1118 if (intr & HAL_INT_BMISS) {
1119 sc->sc_imask |= HAL_INT_BMISS;
1122 ah->ah_setInterrupts(ah, sc->sc_imask);
1123 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo,NULL, 0);
1126 static void ath_init_tgt(void *Context, A_UINT16 Command,
1127 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1129 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1130 struct ath_hal *ah = sc->sc_ah;
1132 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1133 | HAL_INT_RXEOL | HAL_INT_RXORN
1134 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1136 sc->sc_imask |= HAL_INT_GTT;
1138 if (ath_hal_getcapability(ah, HAL_CAP_HT))
1139 sc->sc_imask |= HAL_INT_CST;
1141 adf_os_setup_intr(sc->sc_dev, ath_intr);
1142 ah->ah_setInterrupts(ah, sc->sc_imask);
1144 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1147 static void ath_int_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1148 A_UINT8 *data, a_int32_t datalen)
1150 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1152 struct fusion_stats {
1154 a_uint32_t ast_rxorn;
1155 a_uint32_t ast_rxeol;
1156 a_uint32_t ast_txurn;
1157 a_uint32_t ast_txto;
1161 struct fusion_stats stats;
1163 stats.ast_rx = sc->sc_int_stats.ast_rx;
1164 stats.ast_rxorn = sc->sc_int_stats.ast_rxorn;
1165 stats.ast_rxeol = sc->sc_int_stats.ast_rxeol;
1166 stats.ast_txurn = sc->sc_int_stats.ast_txurn;
1167 stats.ast_txto = sc->sc_int_stats.ast_txto;
1168 stats.ast_cst = sc->sc_int_stats.ast_cst;
1170 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1173 static void ath_tx_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1174 A_UINT8 *data, a_int32_t datalen)
1176 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1178 struct fusion_stats {
1179 a_uint32_t ast_tx_xretries;
1180 a_uint32_t ast_tx_fifoerr;
1181 a_uint32_t ast_tx_filtered;
1182 a_uint32_t ast_tx_timer_exp;
1183 a_uint32_t ast_tx_shortretry;
1184 a_uint32_t ast_tx_longretry;
1186 a_uint32_t tx_qnull;
1187 a_uint32_t tx_noskbs;
1188 a_uint32_t tx_nobufs;
1191 struct fusion_stats stats;
1193 stats.ast_tx_xretries = sc->sc_tx_stats.ast_tx_xretries;
1194 stats.ast_tx_fifoerr = sc->sc_tx_stats.ast_tx_fifoerr;
1195 stats.ast_tx_filtered = sc->sc_tx_stats.ast_tx_filtered;
1196 stats.ast_tx_timer_exp = sc->sc_tx_stats.ast_tx_timer_exp;
1197 stats.ast_tx_shortretry = sc->sc_tx_stats.ast_tx_shortretry;
1198 stats.ast_tx_longretry = sc->sc_tx_stats.ast_tx_longretry;
1199 stats.tx_qnull = sc->sc_tx_stats.tx_qnull;
1200 stats.tx_noskbs = sc->sc_tx_stats.tx_noskbs;
1201 stats.tx_nobufs = sc->sc_tx_stats.tx_nobufs;
1203 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1206 static void ath_rx_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1207 A_UINT8 *data, a_int32_t datalen)
1209 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1211 struct fusion_stats {
1212 a_uint32_t ast_rx_nobuf;
1213 a_uint32_t ast_rx_send;
1214 a_uint32_t ast_rx_done;
1217 struct fusion_stats stats;
1219 stats.ast_rx_nobuf = sc->sc_rx_stats.ast_rx_nobuf;
1220 stats.ast_rx_send = sc->sc_rx_stats.ast_rx_send;
1221 stats.ast_rx_done = sc->sc_rx_stats.ast_rx_done;
1223 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1226 static void ath_get_tgt_version(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1227 A_UINT8 *data, a_int32_t datalen)
1229 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1230 struct wmi_fw_version ver;
1232 ver.major = ATH_VERSION_MAJOR;
1233 ver.minor = ATH_VERSION_MINOR;
1235 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &ver, sizeof(ver));
1238 static void ath_enable_aggr_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1239 A_UINT8 *data, a_int32_t datalen)
1241 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1242 struct ath_aggr_info *aggr = (struct ath_aggr_info *)data;
1243 a_uint8_t nodeindex = aggr->nodeindex;
1244 a_uint8_t tidno = aggr->tidno;
1245 struct ath_node_target *an = NULL ;
1246 struct ath_atx_tid *tid = NULL;
1248 if (nodeindex >= TARGET_NODE_MAX) {
1252 an = &sc->sc_sta[nodeindex];
1253 if (!an->an_valid) {
1257 if (tidno >= WME_NUM_TID) {
1258 adf_os_print("[%s] enable_aggr with invalid tid %d(node = %d)\n",
1259 __FUNCTION__, tidno, nodeindex);
1263 tid = ATH_AN_2_TID(an, tidno);
1265 if (aggr->aggr_enable) {
1266 tid->flag |= TID_AGGR_ENABLED;
1267 } else if ( tid->flag & TID_AGGR_ENABLED ) {
1268 tid->flag &= ~TID_AGGR_ENABLED;
1269 ath_tgt_tx_cleanup(sc, an, tid, 1);
1272 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1275 static void ath_ic_update_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1276 A_UINT8 *data, a_int32_t datalen)
1278 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1279 struct ieee80211com_target *ic = (struct ieee80211com_target * )data;
1280 struct ieee80211com_target *ictgt = &sc->sc_ic ;
1282 adf_os_mem_copy(ictgt, ic, sizeof(struct ieee80211com_target));
1284 ictgt->ic_ampdu_limit = adf_os_ntohl(ic->ic_ampdu_limit);
1286 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1289 static void ath_vap_create_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1290 A_UINT8 *data, a_int32_t datalen)
1292 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1293 struct ieee80211vap_target *vap;
1294 a_uint8_t vap_index;
1296 vap = (struct ieee80211vap_target *)data;
1298 vap->iv_rtsthreshold = adf_os_ntohs(vap->iv_rtsthreshold);
1299 vap->iv_opmode = adf_os_ntohl(vap->iv_opmode);
1301 vap_index = vap->iv_vapindex;
1303 adf_os_assert(sc->sc_vap[vap_index].av_valid == 0);
1305 adf_os_mem_copy(&(sc->sc_vap[vap_index].av_vap), vap,
1308 sc->sc_vap[vap_index].av_bcbuf = asf_tailq_first(&(sc->sc_bbuf));
1309 sc->sc_vap[vap_index].av_valid = 1;
1311 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1314 static void ath_node_create_tgt(void *Context, A_UINT16 Command,
1315 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1317 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1318 struct ieee80211_node_target *node;
1319 a_uint8_t vap_index;
1320 a_uint8_t node_index;
1322 node = (struct ieee80211_node_target *)data;
1324 node_index = node->ni_nodeindex;
1326 node->ni_htcap = adf_os_ntohs(node->ni_htcap);
1327 node->ni_flags = adf_os_ntohs(node->ni_flags);
1328 node->ni_maxampdu = adf_os_ntohs(node->ni_maxampdu);
1330 adf_os_mem_copy(&(sc->sc_sta[node_index].ni), node,
1333 vap_index = sc->sc_sta[node_index].ni.ni_vapindex;
1334 sc->sc_sta[node_index].ni.ni_vap = &(sc->sc_vap[vap_index].av_vap);
1335 if(sc->sc_sta[node_index].ni.ni_is_vapnode == 1)
1336 sc->sc_vap[vap_index].av_vap.iv_nodeindex = node_index;
1338 sc->sc_sta[node_index].an_valid = 1;
1339 sc->sc_sta[node_index].ni.ni_txseqmgmt = 0;
1340 sc->sc_sta[node_index].ni.ni_iv16 = 0;
1341 sc->sc_sta[node_index].ni.ni_iv32 = 0;
1343 owl_tgt_node_init(&sc->sc_sta[node_index]);
1345 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1348 static void ath_node_cleanup_tgt(void *Context, A_UINT16 Command,
1349 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1351 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1352 a_uint8_t node_index;
1353 a_uint8_t *nodedata;
1355 nodedata = (a_uint8_t *)data;
1356 node_index = *nodedata;
1357 sc->sc_sta[node_index].an_valid = 0;
1359 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1362 static void ath_node_update_tgt(void *Context, A_UINT16 Command,
1363 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1365 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1366 struct ieee80211_node_target *node;
1367 a_uint8_t vap_index;
1368 a_uint8_t node_index;
1370 node = (struct ieee80211_node_target *)data;
1372 node_index = node->ni_nodeindex;
1374 node->ni_htcap = adf_os_ntohs(node->ni_htcap);
1375 node->ni_flags = adf_os_ntohs(node->ni_flags);
1376 node->ni_maxampdu = adf_os_ntohs(node->ni_maxampdu);
1378 adf_os_mem_copy(&(sc->sc_sta[node_index].ni), node,
1381 vap_index = sc->sc_sta[node_index].ni.ni_vapindex;
1382 sc->sc_sta[node_index].ni.ni_vap = &(sc->sc_vap[vap_index].av_vap);
1384 sc->sc_sta[node_index].ni.ni_txseqmgmt = 0;
1385 sc->sc_sta[node_index].ni.ni_iv16 = 0;
1386 sc->sc_sta[node_index].ni.ni_iv32 = 0;
1388 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1391 static a_int32_t ath_reg_read_filter(struct ath_hal *ah, a_int32_t addr)
1393 if ((addr & 0xffffe000) == 0x2000) {
1394 /* SEEPROM registers */
1396 if (!ath_hal_wait(ah, 0x407c, 0x00030000, 0))
1397 adf_os_print("SEEPROM Read fail: 0x%08x\n", addr);
1399 return ioread32_mac(0x407c) & 0x0000ffff;
1400 } else if (addr > 0xffff)
1402 return ioread32(addr);
1405 return ioread32_mac(addr);
1408 static void ath_hal_reg_read_tgt(void *Context, A_UINT16 Command,
1409 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1411 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1412 struct ath_hal *ah = sc->sc_ah;
1417 for (i = 0; i < datalen; i += sizeof(a_int32_t)) {
1418 addr = *(a_uint32_t *)(data + i);
1419 addr = adf_os_ntohl(addr);
1421 val[i/sizeof(a_int32_t)] =
1422 adf_os_ntohl(ath_reg_read_filter(ah, addr));
1425 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &val[0], datalen);
1428 static void ath_pll_reset_ones(struct ath_hal *ah)
1430 static uint8_t reset_pll = 0;
1432 if(reset_pll == 0) {
1433 #if defined(PROJECT_K2)
1434 /* here we write to core register */
1435 iowrite32(MAGPIE_REG_RST_PWDN_CTRL_ADDR, 0x0);
1436 /* and here to mac register */
1437 iowrite32_mac(0x786c,
1438 ioread32_mac(0x786c) | 0x6000000);
1439 iowrite32_mac(0x786c,
1440 ioread32_mac(0x786c) & (~0x6000000));
1442 iowrite32(MAGPIE_REG_RST_PWDN_CTRL_ADDR, 0x20);
1444 #elif defined(PROJECT_MAGPIE) && !defined (FPGA)
1445 iowrite32_mac(0x7890,
1446 ioread32_mac(0x7890) | 0x1800000);
1447 iowrite32_mac(0x7890,
1448 ioread32_mac(0x7890) & (~0x1800000));
1454 static void ath_hal_reg_write_filter(struct ath_hal *ah,
1455 a_uint32_t reg, a_uint32_t val)
1458 iowrite32(reg, val);
1459 #if defined(PROJECT_K2)
1460 if(reg == 0x50040) {
1461 static uint8_t flg=0;
1464 /* reinit clock and uart.
1465 * TODO: Independent on what host will
1466 * here set. We do our own decision. Why? */
1468 A_UART_HWINIT(117*1000*1000, 19200);
1475 ath_pll_reset_ones(ah);
1477 iowrite32_mac(reg, val);
1481 static void ath_hal_reg_write_tgt(void *Context, A_UINT16 Command,
1482 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1484 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1485 struct ath_hal *ah = sc->sc_ah;
1487 struct registerWrite {
1492 for (i = 0; i < datalen; i += sizeof(struct registerWrite)) {
1493 t = (struct registerWrite *)(data+i);
1495 ath_hal_reg_write_filter(ah, t->reg, t->val);
1498 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1501 static void ath_hal_reg_rmw_tgt(void *Context, A_UINT16 Command,
1502 A_UINT16 SeqNo, A_UINT8 *data,
1505 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1506 struct ath_hal *ah = sc->sc_ah;
1507 struct register_rmw *buf = (struct register_rmw *)data;
1510 for (i = 0; i < datalen;
1511 i += sizeof(struct register_rmw)) {
1513 buf = (struct register_rmw *)(data + i);
1515 val = ath_reg_read_filter(ah, buf->reg);
1518 ath_hal_reg_write_filter(ah, buf->reg, val);
1520 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1523 static void ath_vap_delete_tgt(void *Context, A_UINT16 Command,
1524 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1526 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1527 a_uint8_t vap_index;
1529 vap_index = *(a_uint8_t *)data;
1531 sc->sc_vap[vap_index].av_valid = 0;
1532 sc->sc_vap[vap_index].av_bcbuf = NULL;
1533 ath_node_vdelete_tgt(sc, vap_index);
1534 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1537 static void ath_disable_intr_tgt(void *Context, A_UINT16 Command,
1538 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1540 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1541 struct ath_hal *ah = sc->sc_ah;
1543 ah->ah_setInterrupts(ah, 0);
1544 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo,NULL, 0);
1547 static void ath_flushrecv_tgt(void *Context, A_UINT16 Command,
1548 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1550 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1551 struct ath_rx_buf *bf;
1553 asf_tailq_foreach(bf, &sc->sc_rxbuf, bf_list)
1554 if (bf->bf_skb != NULL) {
1555 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap,
1556 ADF_OS_DMA_FROM_DEVICE);
1557 ath_free_rx_skb(sc, adf_nbuf_queue_remove(&bf->bf_skbhead));
1561 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1564 static void ath_tx_draintxq_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1565 A_UINT8 *data, a_int32_t datalen)
1567 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1568 a_uint32_t q = *(a_uint32_t *)data;
1569 struct ath_txq *txq = NULL;
1571 q = adf_os_ntohl(q);
1572 txq = ATH_TXQ(sc, q);
1574 ath_tx_draintxq(sc, txq);
1575 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1578 static void ath_draintxq_tgt(void *Context, A_UINT16 Command,
1579 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1581 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1582 HAL_BOOL b = (HAL_BOOL) *(a_int32_t *)data;
1584 ath_draintxq(Context, b);
1585 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1588 static void ath_aborttx_dma_tgt(void *Context, A_UINT16 Command,
1589 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1591 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1592 struct ath_hal *ah = sc->sc_ah;
1594 ah->ah_abortTxDma(sc->sc_ah);
1595 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1598 static void ath_aborttxq_tgt(void *Context, A_UINT16 Command,
1599 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1602 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1605 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1606 if (ATH_TXQ_SETUP(sc, i))
1607 ath_tx_draintxq(sc, ATH_TXQ(sc,i));
1610 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1613 static void ath_stop_tx_dma_tgt(void *Context, A_UINT16 Command,
1614 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1616 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1617 struct ath_hal *ah = sc->sc_ah;
1621 q = *(a_uint32_t *)data;
1623 q = adf_os_ntohl(q);
1624 ah->ah_stopTxDma(ah, q);
1625 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1628 static void ath_startrecv_tgt(void *Context, A_UINT16 Command,
1629 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1632 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1635 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1638 static void ath_stoprecv_tgt(void *Context, A_UINT16 Command,
1639 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1641 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1642 struct ath_hal *ah = sc->sc_ah;
1644 ah->ah_stopPcuReceive(ah);
1645 ah->ah_setRxFilter(ah, 0);
1646 ah->ah_stopDmaReceive(ah);
1648 sc->sc_rxlink = NULL;
1649 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1652 static void ath_setcurmode_tgt(void *Context, A_UINT16 Command,
1653 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1655 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1658 mode= *((a_uint16_t *)data);
1659 mode = adf_os_ntohs(mode);
1661 ath_setcurmode(sc, mode);
1663 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1666 static void ath_detach_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1667 A_UINT8 *data, a_int32_t datalen)
1669 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1670 struct ath_hal *ah = sc->sc_ah;
1674 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1675 adf_os_mem_free(sc);
1678 static void handle_echo_command(void *pContext, A_UINT16 Command,
1679 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1681 wmi_cmd_rsp(pContext, WMI_ECHO_CMDID, SeqNo, buffer, Length);
1684 static void handle_rc_state_change_cmd(void *Context, A_UINT16 Command,
1685 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1688 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1689 struct wmi_rc_state_change_cmd *wmi_data = (struct wmi_rc_state_change_cmd *)buffer;
1691 a_uint32_t capflag = adf_os_ntohl(wmi_data->capflag);
1693 ath_rate_newstate(sc, &sc->sc_vap[wmi_data->vap_index].av_vap,
1694 wmi_data->vap_state,
1698 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1701 static void handle_rc_rate_update_cmd(void *Context, A_UINT16 Command,
1702 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1704 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1705 struct wmi_rc_rate_update_cmd *wmi_data = (struct wmi_rc_rate_update_cmd *)buffer;
1707 a_uint32_t capflag = adf_os_ntohl(wmi_data->capflag);
1709 ath_rate_node_update(sc, &sc->sc_sta[wmi_data->node_index],
1714 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1717 static void dispatch_magpie_sys_cmds(void *pContext, A_UINT16 Command,
1718 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1723 static void ath_rc_mask_tgt(void *Context, A_UINT16 Command,
1724 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1726 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1727 struct wmi_rc_rate_mask_cmd *wmi_data = (struct wmi_rc_rate_mask_cmd *)buffer;
1730 idx = wmi_data->vap_index;
1731 band = wmi_data->band;
1733 sc->sc_vap[idx].av_rate_mask[band] = adf_os_ntohl(wmi_data->mask);
1735 if (sc->sc_vap[idx].av_rate_mask[band]) {
1736 for (i = 0; i < RATE_TABLE_SIZE; i++) {
1737 if ((1 << i) & sc->sc_vap[idx].av_rate_mask[band]) {
1738 sc->sc_vap[idx].av_minrateidx[band] = i;
1743 sc->sc_vap[idx].av_minrateidx[band] = 0;
1746 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1749 static WMI_DISPATCH_ENTRY Magpie_Sys_DispatchEntries[] =
1751 {handle_echo_command, WMI_ECHO_CMDID, 0},
1752 {dispatch_magpie_sys_cmds, WMI_ACCESS_MEMORY_CMDID, 0},
1753 {ath_get_tgt_version, WMI_GET_FW_VERSION, 0},
1754 {ath_disable_intr_tgt, WMI_DISABLE_INTR_CMDID, 0},
1755 {ath_enable_intr_tgt, WMI_ENABLE_INTR_CMDID, 0},
1756 {ath_init_tgt, WMI_ATH_INIT_CMDID, 0},
1757 {ath_aborttxq_tgt, WMI_ABORT_TXQ_CMDID, 0},
1758 {ath_stop_tx_dma_tgt, WMI_STOP_TX_DMA_CMDID, 0},
1759 {ath_aborttx_dma_tgt, WMI_ABORT_TX_DMA_CMDID, 0},
1760 {ath_tx_draintxq_tgt, WMI_DRAIN_TXQ_CMDID, 0},
1761 {ath_draintxq_tgt, WMI_DRAIN_TXQ_ALL_CMDID, 0},
1762 {ath_startrecv_tgt, WMI_START_RECV_CMDID, 0},
1763 {ath_stoprecv_tgt, WMI_STOP_RECV_CMDID, 0},
1764 {ath_flushrecv_tgt, WMI_FLUSH_RECV_CMDID, 0},
1765 {ath_setcurmode_tgt, WMI_SET_MODE_CMDID, 0},
1766 {ath_node_create_tgt, WMI_NODE_CREATE_CMDID, 0},
1767 {ath_node_cleanup_tgt, WMI_NODE_REMOVE_CMDID, 0},
1768 {ath_vap_delete_tgt, WMI_VAP_REMOVE_CMDID, 0},
1769 {ath_vap_create_tgt, WMI_VAP_CREATE_CMDID, 0},
1770 {ath_hal_reg_read_tgt, WMI_REG_READ_CMDID, 0},
1771 {ath_hal_reg_write_tgt, WMI_REG_WRITE_CMDID, 0},
1772 {handle_rc_state_change_cmd, WMI_RC_STATE_CHANGE_CMDID, 0},
1773 {handle_rc_rate_update_cmd, WMI_RC_RATE_UPDATE_CMDID, 0},
1774 {ath_ic_update_tgt, WMI_TARGET_IC_UPDATE_CMDID, 0},
1775 {ath_enable_aggr_tgt, WMI_TX_AGGR_ENABLE_CMDID, 0},
1776 {ath_detach_tgt, WMI_TGT_DETACH_CMDID, 0},
1777 {ath_node_update_tgt, WMI_NODE_UPDATE_CMDID, 0},
1778 {ath_int_stats_tgt, WMI_INT_STATS_CMDID, 0},
1779 {ath_tx_stats_tgt, WMI_TX_STATS_CMDID, 0},
1780 {ath_rx_stats_tgt, WMI_RX_STATS_CMDID, 0},
1781 {ath_rc_mask_tgt, WMI_BITRATE_MASK_CMDID, 0},
1782 {ath_hal_reg_rmw_tgt, WMI_REG_RMW_CMDID, 0},
1789 static void htc_setup_comp(void)
1793 static A_UINT8 tgt_ServiceConnect(HTC_SERVICE *pService,
1794 HTC_ENDPOINT_ID eid,
1798 a_int32_t *pLengthOut)
1800 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)pService->ServiceCtx;
1802 switch(pService->ServiceID) {
1803 case WMI_CONTROL_SVC:
1804 sc->wmi_command_ep= eid;
1806 case WMI_BEACON_SVC:
1818 case WMI_DATA_VO_SVC:
1819 sc->data_VO_ep = eid;
1821 case WMI_DATA_VI_SVC:
1822 sc->data_VI_ep = eid;
1824 case WMI_DATA_BE_SVC:
1825 sc->data_BE_ep = eid;
1827 case WMI_DATA_BK_SVC:
1828 sc->data_BK_ep = eid;
1834 return HTC_SERVICE_SUCCESS;
1837 static void tgt_reg_service(struct ath_softc_tgt *sc, HTC_SERVICE *svc,
1838 int svcId, HTC_SERVICE_ProcessRecvMsg recvMsg)
1840 svc->ProcessRecvMsg = recvMsg;
1841 svc->ProcessSendBufferComplete = tgt_HTCSendCompleteHandler;
1842 svc->ProcessConnect = tgt_ServiceConnect;
1843 svc->MaxSvcMsgSize = 1600;
1844 svc->TrailerSpcCheckLimit = 0;
1845 svc->ServiceID = svcId;
1846 svc->ServiceCtx = sc;
1847 HTC_RegisterService(sc->tgt_htc_handle, svc);
1850 static void tgt_hif_htc_wmi_init(struct ath_softc_tgt *sc)
1852 HTC_CONFIG htc_conf;
1853 WMI_SVC_CONFIG wmiConfig;
1854 WMI_DISPATCH_TABLE *Magpie_Sys_Commands_Tbl;
1856 /* Init dynamic buf pool */
1857 sc->pool_handle = BUF_Pool_init(sc->sc_hdl);
1859 /* Init target-side HIF */
1860 sc->tgt_hif_handle = HIF_init(0);
1862 /* Init target-side HTC */
1863 htc_conf.HIFHandle = sc->tgt_hif_handle;
1864 htc_conf.CreditSize = 320;
1865 htc_conf.CreditNumber = ATH_TXBUF;
1866 htc_conf.OSHandle = sc->sc_hdl;
1867 htc_conf.PoolHandle = sc->pool_handle;
1868 sc->tgt_htc_handle = HTC_init(htc_setup_comp, &htc_conf);
1869 #if defined(PROJECT_MAGPIE)
1870 init_htc_handle = sc->tgt_htc_handle;
1873 tgt_reg_service(sc, &sc->htc_beacon_service, WMI_BEACON_SVC, tgt_HTCRecv_beaconhandler);
1874 tgt_reg_service(sc, &sc->htc_cab_service, WMI_CAB_SVC, tgt_HTCRecv_cabhandler);
1875 tgt_reg_service(sc, &sc->htc_uapsd_service, WMI_UAPSD_SVC, tgt_HTCRecv_uapsdhandler);
1876 tgt_reg_service(sc, &sc->htc_mgmt_service, WMI_MGMT_SVC, tgt_HTCRecv_mgmthandler);
1877 tgt_reg_service(sc, &sc->htc_data_BE_service, WMI_DATA_BE_SVC, tgt_HTCRecvMessageHandler);
1878 tgt_reg_service(sc, &sc->htc_data_BK_service, WMI_DATA_BK_SVC, tgt_HTCRecvMessageHandler);
1879 tgt_reg_service(sc, &sc->htc_data_VI_service, WMI_DATA_VI_SVC, tgt_HTCRecvMessageHandler);
1880 tgt_reg_service(sc, &sc->htc_data_VO_service, WMI_DATA_VO_SVC, tgt_HTCRecvMessageHandler);
1882 /* Init target-side WMI */
1883 Magpie_Sys_Commands_Tbl = (WMI_DISPATCH_TABLE *)adf_os_mem_alloc(sizeof(WMI_DISPATCH_TABLE));
1884 adf_os_mem_zero(Magpie_Sys_Commands_Tbl, sizeof(WMI_DISPATCH_TABLE));
1885 Magpie_Sys_Commands_Tbl->NumberOfEntries = WMI_DISPATCH_ENTRY_COUNT(Magpie_Sys_DispatchEntries);
1886 Magpie_Sys_Commands_Tbl->pTable = Magpie_Sys_DispatchEntries;
1888 adf_os_mem_zero(&wmiConfig, sizeof(WMI_SVC_CONFIG));
1889 wmiConfig.HtcHandle = sc->tgt_htc_handle;
1890 wmiConfig.PoolHandle = sc->pool_handle;
1891 wmiConfig.MaxCmdReplyEvts = ATH_WMI_MAX_CMD_REPLY;
1892 wmiConfig.MaxEventEvts = ATH_WMI_MAX_EVENTS;
1894 sc->tgt_wmi_handle = WMI_Init(&wmiConfig);
1895 Magpie_Sys_Commands_Tbl->pContext = sc;
1896 WMI_RegisterDispatchTable(sc->tgt_wmi_handle, Magpie_Sys_Commands_Tbl);
1898 HTC_NotifyTargetInserted(sc->tgt_htc_handle);
1900 /* Start HTC messages exchange */
1901 HTC_Ready(sc->tgt_htc_handle);
1904 a_int32_t ath_tgt_attach(a_uint32_t devid, struct ath_softc_tgt *sc, adf_os_device_t osdev)
1908 a_int32_t error = 0, i, flags = 0;
1911 adf_os_pci_config_read8(osdev, ATH_PCI_CACHE_LINE_SIZE, &csz);
1915 sc->sc_cachelsz = csz << 2;
1920 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_rxtq, ath_tgt_rx_tasklet, sc);
1921 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_txtq, owl_tgt_tx_tasklet, sc);
1922 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_bmisstq, ath_bmiss_tasklet, sc);
1923 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_fataltq, ath_fatal_tasklet, sc);
1925 flags |= AH_USE_EEPROM;
1926 ah = _ath_hal_attach_tgt(devid, sc, sc->sc_dev, flags, &status);
1933 tgt_hif_htc_wmi_init(sc);
1935 sc->sc_bhalq = HAL_NUM_TX_QUEUES - 1;
1937 ath_rate_setup(sc, IEEE80211_MODE_11NA);
1938 ath_rate_setup(sc, IEEE80211_MODE_11NG);
1940 sc->sc_rc = ath_rate_attach(sc);
1941 if (sc->sc_rc == NULL) {
1946 for (i=0; i < TARGET_NODE_MAX; i++) {
1947 sc->sc_sta[i].an_rcnode = adf_os_mem_alloc(sc->sc_rc->arc_space);
1950 error = ath_desc_alloc(sc);
1955 BUF_Pool_create_pool(sc->pool_handle, POOL_ID_WLAN_RX_BUF, ath_numrxdescs, 1664);
1957 ath_tgt_txq_setup(sc);
1959 ah->ah_setInterrupts(ah, 0);
1969 static void tgt_hif_htc_wmi_shutdown(struct ath_softc_tgt *sc)
1971 HTC_NotifyTargetDetached(sc->tgt_htc_handle);
1973 WMI_Shutdown(sc->tgt_wmi_handle);
1974 HTC_Shutdown(sc->tgt_htc_handle);
1975 HIF_shutdown(sc->tgt_hif_handle);
1976 BUF_Pool_shutdown(sc->pool_handle);
1979 a_int32_t ath_detach(struct ath_softc_tgt *sc)
1981 tgt_hif_htc_wmi_shutdown(sc);