2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted (subject to the limitations in the
7 * disclaimer below) provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
17 * * Neither the name of Qualcomm Atheros nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
22 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
23 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
33 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <adf_os_types.h>
37 #include <adf_os_pci.h>
38 #include <adf_os_dma.h>
39 #include <adf_os_timer.h>
40 #include <adf_os_lock.h>
41 #include <adf_os_io.h>
42 #include <adf_os_mem.h>
43 #include <adf_os_util.h>
44 #include <adf_os_stdtypes.h>
45 #include <adf_os_defer.h>
46 #include <adf_os_atomic.h>
49 #include <adf_net_wcmd.h>
50 #include <adf_os_irq.h>
52 #include <if_ath_pci.h>
53 #include "if_ethersubr.h"
55 #include "ieee80211_var.h"
56 #include "ieee80211_proto.h"
57 #include "if_athrate.h"
58 #include "if_athvar.h"
61 static a_int32_t ath_numrxbufs = -1;
62 static a_int32_t ath_numrxdescs = -1;
64 #if defined(PROJECT_MAGPIE)
65 uint32_t *init_htc_handle = 0;
68 #define RX_ENDPOINT_ID 3
69 #define ATH_CABQ_HANDLING_THRESHOLD 9000
73 void owl_tgt_tx_tasklet(TQUEUE_ARG data);
74 static void ath_tgt_send_beacon(struct ath_softc_tgt *sc,adf_nbuf_t bc_hdr,adf_nbuf_t nbuf,HTC_ENDPOINT_ID EndPt);
75 static void ath_hal_reg_write_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen);
76 extern struct ath_tx_buf* ath_tgt_tx_prepare(struct ath_softc_tgt *sc, adf_nbuf_t skb, ath_data_hdr_t *dh);
77 extern void ath_tgt_send_mgt(struct ath_softc_tgt *sc,adf_nbuf_t mgt_hdr, adf_nbuf_t skb,HTC_ENDPOINT_ID EndPt);
78 extern HAL_BOOL ath_hal_wait(struct ath_hal *ah, a_uint32_t reg, a_uint32_t mask, a_uint32_t val);
79 extern void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq, owl_txq_state_t txqstate);
80 void owl_tgt_node_init(struct ath_node_target * an);
81 void ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, struct ath_buf *bf);
82 void ath_tgt_tx_sched_nonaggr(struct ath_softc_tgt *sc,struct ath_buf * bf_host);
88 #undef adf_os_cpu_to_le16
90 static a_uint16_t adf_os_cpu_to_le16(a_uint16_t x)
92 return ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8));
96 * Extend a 32 bit TSF to 64 bit, taking wrapping into account.
98 static u_int64_t ath_extend_tsf(struct ath_softc_tgt *sc, u_int32_t rstamp)
104 tsf = ath_hal_gettsf64(sc->sc_ah);
105 tsf_low = tsf & 0xffffffff;
106 tsf64 = (tsf & ~0xffffffffULL) | rstamp;
108 if (rstamp > tsf_low && (rstamp - tsf_low > 0x10000000))
109 tsf64 -= 0x100000000ULL;
111 if (rstamp < tsf_low && (tsf_low - rstamp > 0x10000000))
112 tsf64 += 0x100000000ULL;
117 static a_int32_t ath_rate_setup(struct ath_softc_tgt *sc, a_uint32_t mode)
119 struct ath_hal *ah = sc->sc_ah;
120 const HAL_RATE_TABLE *rt;
123 case IEEE80211_MODE_11NA:
124 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11NA);
126 case IEEE80211_MODE_11NG:
127 sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11NG);
132 rt = sc->sc_rates[mode];
139 static void ath_setcurmode(struct ath_softc_tgt *sc,
140 enum ieee80211_phymode mode)
142 const HAL_RATE_TABLE *rt;
145 adf_os_mem_set(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
147 rt = sc->sc_rates[mode];
148 adf_os_assert(rt != NULL);
150 for (i = 0; i < rt->rateCount; i++) {
151 sc->sc_rixmap[rt->info[i].rateCode] = i;
154 sc->sc_currates = rt;
155 sc->sc_curmode = mode;
156 sc->sc_protrix = ((mode == IEEE80211_MODE_11NG) ? 3 : 0);
160 void wmi_event(wmi_handle_t handle, WMI_EVENT_ID evt_id,
161 void *buffer, a_int32_t Length)
163 adf_nbuf_t netbuf = ADF_NBUF_NULL;
166 netbuf = WMI_AllocEvent(handle, WMI_EVT_CLASS_CMD_EVENT,
167 sizeof(WMI_CMD_HDR) + Length);
169 if (netbuf == ADF_NBUF_NULL) {
170 adf_os_print("Buf null\n");
174 if (buffer != NULL && Length != 0 && Length < WMI_SVC_MAX_BUFFERED_EVENT_SIZE) {
175 pData = adf_nbuf_put_tail(netbuf, Length);
176 adf_os_mem_copy(pData, buffer, Length);
179 WMI_SendEvent(handle, netbuf, evt_id, 0, Length);
182 void wmi_cmd_rsp(void *pContext, WMI_COMMAND_ID cmd_id, A_UINT16 SeqNo,
183 void *buffer, a_int32_t Length)
185 adf_nbuf_t netbuf = ADF_NBUF_NULL;
188 netbuf = WMI_AllocEvent(pContext, WMI_EVT_CLASS_CMD_REPLY,
189 sizeof(WMI_CMD_HDR) + Length);
191 if (netbuf == ADF_NBUF_NULL) {
196 if (Length != 0 && buffer != NULL) {
197 pData = (A_UINT8 *)adf_nbuf_put_tail(netbuf, Length);
198 adf_os_mem_copy(pData, buffer, Length);
201 WMI_SendEvent(pContext, netbuf, cmd_id, SeqNo, Length);
204 static void ath_node_vdelete_tgt(struct ath_softc_tgt *sc, a_uint8_t vap_index)
208 for (i = 0; i < TARGET_NODE_MAX; i++) {
209 if(sc->sc_sta[i].ni.ni_vapindex == vap_index)
210 sc->sc_sta[i].an_valid = 0;
214 a_uint8_t ath_get_minrateidx(struct ath_softc_tgt *sc, struct ath_vap_target *avp)
216 if (sc->sc_curmode == IEEE80211_MODE_11NG)
217 return avp->av_minrateidx[0];
218 else if (sc->sc_curmode == IEEE80211_MODE_11NA)
219 return avp->av_minrateidx[1];
228 static adf_nbuf_t ath_alloc_skb_align(struct ath_softc_tgt *sc,
229 a_uint32_t size, a_uint32_t align)
233 skb = BUF_Pool_alloc_buf_align(sc->pool_handle, POOL_ID_WLAN_RX_BUF,
234 RX_HEADER_SPACE, align);
238 static a_int32_t ath_rxdesc_init(struct ath_softc_tgt *sc, struct ath_rx_desc *ds)
240 struct ath_hal *ah = sc->sc_ah;
241 struct ath_rx_desc *ds_held;
245 if (!sc->sc_rxdesc_held) {
246 sc->sc_rxdesc_held = ds;
250 ds_held = sc->sc_rxdesc_held;
251 sc->sc_rxdesc_held = ds;
254 if (ds->ds_nbuf == ADF_NBUF_NULL) {
255 ds->ds_nbuf = ath_alloc_skb_align(sc, sc->sc_rxbufsize, sc->sc_cachelsz);
256 if (ds->ds_nbuf == ADF_NBUF_NULL) {
257 sc->sc_rxdesc_held = ds;
258 sc->sc_rx_stats.ast_rx_nobuf++;
261 adf_nbuf_map(sc->sc_dev, ds->ds_dmap, ds->ds_nbuf, ADF_OS_DMA_FROM_DEVICE);
262 adf_nbuf_dmamap_info(ds->ds_dmap, &ds->ds_dmap_info);
263 ds->ds_data = ds->ds_dmap_info.dma_segs[0].paddr;
267 adf_nbuf_peek_header(ds->ds_nbuf, &anbdata, &anblen);
269 ath_hal_setuprxdesc(ah, ds,
270 adf_nbuf_tailroom(ds->ds_nbuf),
273 if (sc->sc_rxlink == NULL) {
274 ath_hal_putrxbuf(ah, ds->ds_daddr);
277 *sc->sc_rxlink = ds->ds_daddr;
279 sc->sc_rxlink = &ds->ds_link;
285 static void ath_rx_complete(struct ath_softc_tgt *sc, adf_nbuf_t buf)
287 struct ath_rx_desc *ds;
289 adf_nbuf_queue_t nbuf_head;
291 adf_nbuf_split_to_frag(buf, &nbuf_head);
292 ds = asf_tailq_first(&sc->sc_rxdesc_idle);
295 struct ath_rx_desc *ds_tmp;
296 buf_tmp = adf_nbuf_queue_remove(&nbuf_head);
298 if (buf_tmp == NULL) {
302 BUF_Pool_free_buf(sc->pool_handle, POOL_ID_WLAN_RX_BUF, buf_tmp);
305 ds = asf_tailq_next(ds, ds_list);
307 ath_rxdesc_init(sc, ds_tmp);
309 asf_tailq_remove(&sc->sc_rxdesc_idle, ds_tmp, ds_list);
310 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_tmp, ds_list);
314 static void tgt_HTCSendCompleteHandler(HTC_ENDPOINT_ID Endpt, adf_nbuf_t buf, void *ServiceCtx)
316 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
318 if (Endpt == RX_ENDPOINT_ID) {
319 sc->sc_rx_stats.ast_rx_done++;
320 ath_rx_complete(sc, buf);
324 static void ath_uapsd_processtriggers(struct ath_softc_tgt *sc)
326 struct ath_hal *ah = sc->sc_ah;
327 struct ath_rx_buf *bf = NULL;
328 struct ath_rx_desc *ds, *ds_head, *ds_tail, *ds_tmp;
331 a_uint16_t frame_len = 0;
334 #define PA2DESC(_sc, _pa) \
335 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
336 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
338 tsf = ath_hal_gettsf64(ah);
339 bf = asf_tailq_first(&sc->sc_rxbuf);
341 ds = asf_tailq_first(&sc->sc_rxdesc);
347 if (cnt == ath_numrxbufs - 1) {
348 adf_os_print("VERY LONG PACKET!!!!!\n");
352 struct ath_rx_desc *ds_rmv;
353 adf_nbuf_unmap(sc->sc_dev, ds_tmp->ds_dmap, ADF_OS_DMA_FROM_DEVICE);
355 ds_tmp = asf_tailq_next(ds_tmp, ds_list);
357 if (ds_tmp == NULL) {
358 adf_os_print("ds_tmp is NULL\n");
362 BUF_Pool_free_buf(sc->pool_handle, POOL_ID_WLAN_RX_BUF, ds_rmv->ds_nbuf);
363 ds_rmv->ds_nbuf = ADF_NBUF_NULL;
365 if (ath_rxdesc_init(sc, ds_rmv) == 0) {
366 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
367 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_rmv, ds_list);
370 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
371 asf_tailq_insert_tail(&sc->sc_rxdesc_idle, ds_rmv, ds_list);
374 if (ds_rmv == ds_tail) {
381 if (ds->ds_link == 0) {
385 if (bf->bf_status & ATH_BUFSTATUS_DONE) {
389 retval = ath_hal_rxprocdescfast(ah, ds, ds->ds_daddr,
390 PA2DESC(sc, ds->ds_link), &bf->bf_rx_status);
391 if (HAL_EINPROGRESS == retval) {
395 if (adf_nbuf_len(ds->ds_nbuf) == 0) {
396 adf_nbuf_put_tail(ds->ds_nbuf, bf->bf_rx_status.rs_datalen);
399 frame_len += bf->bf_rx_status.rs_datalen;
401 if (bf->bf_rx_status.rs_more == 0) {
402 adf_nbuf_queue_t nbuf_head;
403 adf_nbuf_queue_init(&nbuf_head);
408 ds = asf_tailq_next(ds, ds_list);
411 ds_head = asf_tailq_next(ds_tail, ds_list);
414 struct ath_rx_desc *ds_rmv;
416 adf_nbuf_unmap(sc->sc_dev, ds_tmp->ds_dmap, ADF_OS_DMA_FROM_DEVICE);
417 adf_nbuf_queue_add(&nbuf_head, ds_tmp->ds_nbuf);
418 ds_tmp->ds_nbuf = ADF_NBUF_NULL;
421 ds_tmp = asf_tailq_next(ds_tmp, ds_list);
422 if (ds_tmp == NULL) {
426 if (ath_rxdesc_init(sc, ds_rmv) == 0) {
427 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
428 asf_tailq_insert_tail(&sc->sc_rxdesc, ds_rmv, ds_list);
430 asf_tailq_remove(&sc->sc_rxdesc, ds_rmv, ds_list);
431 asf_tailq_insert_tail(&sc->sc_rxdesc_idle, ds_rmv, ds_list);
434 if (ds_rmv == ds_tail) {
440 bf->bf_rx_status.rs_datalen = frame_len;
443 bf->bf_skb = adf_nbuf_create_frm_frag(&nbuf_head);
445 bf->bf_status |= ATH_BUFSTATUS_DONE;
447 bf = (struct ath_rx_buf *)asf_tailq_next(bf, bf_list);
450 ds = asf_tailq_next(ds, ds_list);
457 static a_int32_t ath_startrecv(struct ath_softc_tgt *sc)
459 struct ath_hal *ah = sc->sc_ah;
460 struct ath_rx_desc *ds;
462 sc->sc_rxbufsize = 1024+512+128;
463 sc->sc_rxlink = NULL;
465 sc->sc_rxdesc_held = NULL;
467 asf_tailq_foreach(ds, &sc->sc_rxdesc, ds_list) {
468 a_int32_t error = ath_rxdesc_init(sc, ds);
474 ds = asf_tailq_first(&sc->sc_rxdesc);
475 ath_hal_putrxbuf(ah, ds->ds_daddr);
480 static void ath_tgt_rx_tasklet(TQUEUE_ARG data)
482 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
483 struct ath_rx_buf *bf = NULL;
484 struct ath_hal *ah = sc->sc_ah;
485 struct rx_frame_header *rxhdr;
486 struct ath_rx_status *rxstats;
487 adf_nbuf_t skb = ADF_NBUF_NULL;
490 bf = asf_tailq_first(&sc->sc_rxbuf);
495 if (!(bf->bf_status & ATH_BUFSTATUS_DONE)) {
504 asf_tailq_remove(&sc->sc_rxbuf, bf, bf_list);
508 rxhdr = (struct rx_frame_header *)adf_nbuf_push_head(skb,
509 sizeof(struct rx_frame_header));
510 rxstats = (struct ath_rx_status *)(&rxhdr->rx_stats[0]);
511 adf_os_mem_copy(rxstats, &(bf->bf_rx_status),
512 sizeof(struct ath_rx_status));
514 rxstats->rs_tstamp = ath_extend_tsf(sc, (u_int32_t)rxstats->rs_tstamp);
516 HTC_SendMsg(sc->tgt_htc_handle, RX_ENDPOINT_ID, skb);
517 sc->sc_rx_stats.ast_rx_send++;
519 bf->bf_status &= ~ATH_BUFSTATUS_DONE;
520 asf_tailq_insert_tail(&sc->sc_rxbuf, bf, bf_list);
524 sc->sc_imask |= HAL_INT_RX;
525 ath_hal_intrset(ah, sc->sc_imask);
528 /*******************/
529 /* Beacon Handling */
530 /*******************/
533 * Setup the beacon frame for transmit.
534 * FIXME: Short Preamble.
536 static void ath_beacon_setup(struct ath_softc_tgt *sc,
537 struct ath_tx_buf *bf,
538 struct ath_vap_target *avp)
540 adf_nbuf_t skb = bf->bf_skb;
541 struct ath_hal *ah = sc->sc_ah;
544 const HAL_RATE_TABLE *rt;
546 HAL_11N_RATE_SERIES series[4] = {{ 0 }};
548 flags = HAL_TXDESC_NOACK;
552 ds->ds_data = bf->bf_dmamap_info.dma_segs[0].paddr;
554 rix = ath_get_minrateidx(sc, avp);
555 rt = sc->sc_currates;
556 rate = rt->info[rix].rateCode;
558 ath_hal_setuptxdesc(ah, ds
559 , adf_nbuf_len(skb) + IEEE80211_CRC_LEN
560 , sizeof(struct ieee80211_frame)
561 , HAL_PKT_TYPE_BEACON
564 , HAL_TXKEYIX_INVALID
571 , ATH_COMP_PROC_NO_COMP_NO_CCS);
573 ath_hal_filltxdesc(ah, ds
574 , asf_roundup(adf_nbuf_len(skb), 4)
580 series[0].Rate = rate;
581 series[0].ChSel = sc->sc_ic.ic_tx_chainmask;
582 series[0].RateFlags = 0;
583 ath_hal_set11n_ratescenario(ah, ds, 0, 0, 0, series, 4, 0);
586 static void ath_tgt_send_beacon(struct ath_softc_tgt *sc, adf_nbuf_t bc_hdr,
587 adf_nbuf_t nbuf, HTC_ENDPOINT_ID EndPt)
589 struct ath_hal *ah = sc->sc_ah;
590 struct ath_tx_buf *bf;
591 a_uint8_t vap_index, *anbdata;
592 ath_beacon_hdr_t *bhdr;
593 struct ieee80211vap_target *vap;
595 struct ieee80211_frame *wh;
598 adf_nbuf_peek_header(nbuf, &anbdata, &anblen);
599 bhdr = (ath_beacon_hdr_t *)anbdata;
601 adf_os_print("found bc_hdr! 0x%x\n", bc_hdr);
604 vap_index = bhdr->vap_index;
605 adf_os_assert(vap_index < TARGET_VAP_MAX);
606 vap = &sc->sc_vap[vap_index].av_vap;
608 wh = (struct ieee80211_frame *)adf_nbuf_pull_head(nbuf,
609 sizeof(ath_beacon_hdr_t));
611 bf = sc->sc_vap[vap_index].av_bcbuf;
613 bf->bf_endpt = EndPt;
616 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap, ADF_OS_DMA_TO_DEVICE);
617 adf_nbuf_push_head(bf->bf_skb, sizeof(ath_beacon_hdr_t));
618 ath_free_tx_skb(sc->tgt_htc_handle, bf->bf_endpt, bf->bf_skb);
623 adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, nbuf, ADF_OS_DMA_TO_DEVICE);
624 adf_nbuf_dmamap_info(bf->bf_dmamap,&bf->bf_dmamap_info);
626 ath_beacon_setup(sc, bf, &sc->sc_vap[vap_index]);
627 ath_hal_stoptxdma(ah, sc->sc_bhalq);
628 ath_hal_puttxbuf(ah, sc->sc_bhalq, ATH_BUF_GET_DESC_PHY_ADDR(bf));
629 ath_hal_txstart(ah, sc->sc_bhalq);
636 static void ath_tx_stopdma(struct ath_softc_tgt *sc, struct ath_txq *txq)
638 struct ath_hal *ah = sc->sc_ah;
640 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
643 static void owltgt_txq_drain(struct ath_softc_tgt *sc, struct ath_txq *txq)
645 owltgt_tx_processq(sc, txq, OWL_TXQ_STOPPED);
648 static void ath_tx_draintxq(struct ath_softc_tgt *sc, struct ath_txq *txq)
650 owltgt_txq_drain(sc, txq);
653 static void ath_draintxq(struct ath_softc_tgt *sc, HAL_BOOL drain_softq)
655 struct ath_hal *ah = sc->sc_ah;
657 struct ath_txq *txq = NULL;
658 struct ath_atx_tid *tid = NULL;
660 ath_tx_status_clear(sc);
661 sc->sc_tx_draining = 1;
663 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
665 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
666 if (ATH_TXQ_SETUP(sc, i))
667 ath_tx_stopdma(sc, ATH_TXQ(sc, i));
669 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
670 if (ATH_TXQ_SETUP(sc, i)) {
671 owltgt_tx_processq(sc, ATH_TXQ(sc,i), OWL_TXQ_STOPPED);
674 while (!asf_tailq_empty(&txq->axq_tidq)){
675 TAILQ_DEQ(&txq->axq_tidq, tid, tid_qelem);
678 tid->sched = AH_FALSE;
679 ath_tgt_tid_drain(sc,tid);
683 sc->sc_tx_draining = 0;
686 static void ath_tgt_txq_setup(struct ath_softc_tgt *sc)
693 for (qnum=0;qnum<HAL_NUM_TX_QUEUES;qnum++) {
694 txq= &sc->sc_txq[qnum];
695 txq->axq_qnum = qnum;
696 txq->axq_link = NULL;
697 asf_tailq_init(&txq->axq_q);
699 txq->axq_linkbuf = NULL;
700 asf_tailq_init(&txq->axq_tidq);
701 sc->sc_txqsetup |= 1<<qnum;
704 sc->sc_uapsdq = &sc->sc_txq[UAPSDQ_NUM];
705 sc->sc_cabq = &sc->sc_txq[CABQ_NUM];
707 sc->sc_ac2q[WME_AC_BE] = &sc->sc_txq[0];
708 sc->sc_ac2q[WME_AC_BK] = &sc->sc_txq[1];
709 sc->sc_ac2q[WME_AC_VI] = &sc->sc_txq[2];
710 sc->sc_ac2q[WME_AC_VO] = &sc->sc_txq[3];
716 static void tgt_HTCRecv_beaconhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
717 adf_nbuf_t buf, void *ServiceCtx)
719 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
721 ath_tgt_send_beacon(sc, hdr_buf, buf, EndPt);
724 static void tgt_HTCRecv_uapsdhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
725 adf_nbuf_t buf, void *ServiceCtx)
729 static void tgt_HTCRecv_mgmthandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
730 adf_nbuf_t buf, void *ServiceCtx)
732 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
734 ath_tgt_send_mgt(sc,hdr_buf,buf,EndPt);
737 static void tgt_HTCRecvMessageHandler(HTC_ENDPOINT_ID EndPt,
738 adf_nbuf_t hdr_buf, adf_nbuf_t buf,
741 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
742 struct ath_tx_buf *bf;
746 struct ath_node_target *an;
747 struct ath_atx_tid *tid;
750 adf_nbuf_peek_header(buf, &data, &len);
751 adf_nbuf_pull_head(buf, sizeof(ath_data_hdr_t));
753 adf_nbuf_peek_header(hdr_buf, &data, &len);
756 adf_os_assert(len >= sizeof(ath_data_hdr_t));
757 dh = (ath_data_hdr_t *)data;
759 an = &sc->sc_sta[dh->ni_index];
760 tid = ATH_AN_2_TID(an, dh->tidno);
762 sc->sc_tx_stats.tx_tgt++;
764 bf = ath_tgt_tx_prepare(sc, buf, dh);
766 ath_free_tx_skb(sc->tgt_htc_handle,EndPt,buf);
770 bf->bf_endpt = EndPt;
771 bf->bf_cookie = dh->cookie;
773 if (tid->flag & TID_AGGR_ENABLED)
774 ath_tgt_handle_aggr(sc, bf);
776 ath_tgt_handle_normal(sc, bf);
779 static void tgt_HTCRecv_cabhandler(HTC_ENDPOINT_ID EndPt, adf_nbuf_t hdr_buf,
780 adf_nbuf_t buf, void *ServiceCtx)
782 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)ServiceCtx;
783 struct ath_hal *ah = sc->sc_ah;
787 #ifdef ATH_ENABLE_CABQ
788 tsf = ath_hal_gettsf64(ah);
789 tmp = tsf - sc->sc_swba_tsf;
791 if ( tmp > ATH_CABQ_HANDLING_THRESHOLD ) {
792 HTC_ReturnBuffers(sc->tgt_htc_handle, EndPt, buf);
796 tgt_HTCRecvMessageHandler(EndPt, hdr_buf, buf, ServiceCtx);
800 /***********************/
801 /* Descriptor Handling */
802 /***********************/
804 static a_int32_t ath_descdma_setup(struct ath_softc_tgt *sc,
805 struct ath_descdma *dd, ath_bufhead *head,
806 const char *name, a_int32_t nbuf, a_int32_t ndesc,
807 a_uint32_t bfSize, a_uint32_t descSize)
809 #define DS2PHYS(_dd, _ds) \
810 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
814 a_int32_t i, bsize, error;
819 dd->dd_desc_len = descSize * nbuf * ndesc;
821 dd->dd_desc = adf_os_dmamem_alloc(sc->sc_dev,
822 dd->dd_desc_len, 1, &dd->dd_desc_dmamap);
823 dd->dd_desc_paddr = adf_os_dmamem_map2addr(dd->dd_desc_dmamap);
824 if (dd->dd_desc == NULL) {
830 bsize = bfSize * nbuf;
831 bf = adf_os_mem_alloc(bsize);
836 adf_os_mem_set(bf, 0, bsize);
839 bf_addr = (a_uint8_t *)bf;
840 ds_addr = (a_uint8_t *)ds;
842 asf_tailq_init(head);
844 for (i = 0; i < nbuf; i++) {
847 if (adf_nbuf_dmamap_create( sc->sc_dev, &bf->bf_dmamap) != A_STATUS_OK) {
851 bf->bf_desc = bf->bf_descarr = bf->bf_lastds = ds;
852 for (j = 0; j < ndesc; j++)
853 ATH_BUF_SET_DESC_PHY_ADDR_WITH_IDX(bf, j, (ds_addr + (j*descSize)));
855 ATH_BUF_SET_DESC_PHY_ADDR(bf, ATH_BUF_GET_DESC_PHY_ADDR_WITH_IDX(bf, 0));
857 adf_nbuf_queue_init(&bf->bf_skbhead);
858 asf_tailq_insert_tail(head, bf, bf_list);
861 ds_addr += (ndesc * descSize);
862 bf = (struct ath_buf *)bf_addr;
863 ds = (struct ath_desc *)ds_addr;
868 adf_os_dmamem_free(sc->sc_dev, dd->dd_desc_len,
869 1, dd->dd_desc, dd->dd_desc_dmamap);
871 adf_os_mem_set(dd, 0, sizeof(*dd));
878 static void ath_descdma_cleanup(struct ath_softc_tgt *sc,
879 struct ath_descdma *dd,
880 ath_bufhead *head, a_int32_t dir)
883 struct ieee80211_node_target *ni;
885 asf_tailq_foreach(bf, head, bf_list) {
886 if (adf_nbuf_queue_len(&bf->bf_skbhead) != 0) {
887 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap, dir);
888 while(adf_nbuf_queue_len(&bf->bf_skbhead) != 0) {
890 adf_nbuf_queue_remove(&bf->bf_skbhead));
893 } else if (bf->bf_skb != NULL) {
894 adf_nbuf_unmap(sc->sc_dev,bf->bf_dmamap, dir);
895 ath_free_rx_skb(sc, bf->bf_skb);
899 adf_nbuf_dmamap_destroy(sc->sc_dev, bf->bf_dmamap);
905 adf_os_dmamem_free(sc->sc_dev, dd->dd_desc_len,
906 1, dd->dd_desc, dd->dd_desc_dmamap);
908 asf_tailq_init(head);
909 adf_os_mem_free(dd->dd_bufptr);
910 adf_os_mem_set(dd, 0, sizeof(*dd));
913 static a_int32_t ath_desc_alloc(struct ath_softc_tgt *sc)
915 #define DS2PHYS(_dd, _ds) \
916 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
919 struct ath_tx_buf *bf;
921 if(ath_numrxbufs == -1)
922 ath_numrxbufs = ATH_RXBUF;
924 if (ath_numrxdescs == -1)
925 ath_numrxdescs = ATH_RXDESC;
927 error = ath_descdma_setup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
928 "rx", ath_numrxdescs, 1,
929 sizeof(struct ath_rx_buf),
930 sizeof(struct ath_rx_desc));
935 struct ath_descdma *dd = &sc->sc_rxdma;
936 struct ath_rx_desc *ds = dd->dd_desc;
937 struct ath_rx_desc *ds_prev = NULL;
939 asf_tailq_init(&sc->sc_rxdesc);
940 asf_tailq_init(&sc->sc_rxdesc_idle);
942 for (i = 0; i < ath_numrxdescs; i++, ds++) {
944 if (ds->ds_nbuf != ADF_NBUF_NULL) {
945 ds->ds_nbuf = ADF_NBUF_NULL;
948 if (adf_nbuf_dmamap_create(sc->sc_dev, &ds->ds_dmap) != A_STATUS_OK) {
952 ds->ds_daddr = DS2PHYS(&sc->sc_rxdma, ds);
955 ds_prev->ds_link = ds->ds_daddr;
961 asf_tailq_insert_tail(&sc->sc_rxdesc, ds, ds_list);
964 error = ath_descdma_setup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
965 "tx", ATH_TXBUF + 1, ATH_TXDESC,
966 sizeof(struct ath_tx_buf),
967 sizeof(struct ath_tx_desc));
969 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
970 ADF_OS_DMA_FROM_DEVICE);
974 error = ath_descdma_setup(sc, &sc->sc_bdma, (ath_bufhead *)&sc->sc_bbuf,
975 "beacon", ATH_BCBUF, 1,
976 sizeof(struct ath_tx_buf),
977 sizeof(struct ath_tx_desc));
979 ath_descdma_cleanup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
980 ADF_OS_DMA_TO_DEVICE);
981 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
982 ADF_OS_DMA_FROM_DEVICE);
986 bf = asf_tailq_first(&sc->sc_txbuf);
987 bf->bf_isaggr = bf->bf_isretried = bf->bf_retries = 0;
988 asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
990 sc->sc_txbuf_held = bf;
997 static void ath_desc_free(struct ath_softc_tgt *sc)
999 asf_tailq_insert_tail(&sc->sc_txbuf, sc->sc_txbuf_held, bf_list);
1001 sc->sc_txbuf_held = NULL;
1003 if (sc->sc_txdma.dd_desc_len != 0)
1004 ath_descdma_cleanup(sc, &sc->sc_txdma, (ath_bufhead *)&sc->sc_txbuf,
1005 ADF_OS_DMA_TO_DEVICE);
1006 if (sc->sc_rxdma.dd_desc_len != 0)
1007 ath_descdma_cleanup(sc, &sc->sc_rxdma, (ath_bufhead *)&sc->sc_rxbuf,
1008 ADF_OS_DMA_FROM_DEVICE);
1011 /**********************/
1012 /* Interrupt Handling */
1013 /**********************/
1015 adf_os_irq_resp_t ath_intr(adf_drv_handle_t hdl)
1017 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)hdl;
1018 struct ath_hal *ah = sc->sc_ah;
1022 return ADF_OS_IRQ_NONE;
1024 if (!ath_hal_intrpend(ah))
1025 return ADF_OS_IRQ_NONE;
1027 ath_hal_getisr(ah, &status);
1029 status &= sc->sc_imask;
1031 if (status & HAL_INT_FATAL) {
1032 ath_hal_intrset(ah, 0);
1033 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_fataltq);
1035 if (status & HAL_INT_SWBA) {
1036 WMI_SWBA_EVENT swbaEvt;
1037 struct ath_txq *txq = ATH_TXQ(sc, 8);
1039 swbaEvt.tsf = ath_hal_gettsf64(ah);
1040 swbaEvt.beaconPendingCount = ath_hal_numtxpending(ah, sc->sc_bhalq);
1041 sc->sc_swba_tsf = ath_hal_gettsf64(ah);
1043 wmi_event(sc->tgt_wmi_handle,
1046 sizeof(WMI_SWBA_EVENT));
1048 ath_tx_draintxq(sc, txq);
1051 if (status & HAL_INT_RXORN)
1052 sc->sc_int_stats.ast_rxorn++;
1054 if (status & HAL_INT_RXEOL)
1055 sc->sc_int_stats.ast_rxeol++;
1057 if (status & (HAL_INT_RX | HAL_INT_RXEOL | HAL_INT_RXORN)) {
1058 if (status & HAL_INT_RX)
1059 sc->sc_int_stats.ast_rx++;
1061 ath_uapsd_processtriggers(sc);
1063 sc->sc_imask &= ~HAL_INT_RX;
1064 ath_hal_intrset(ah, sc->sc_imask);
1066 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_rxtq);
1069 if (status & HAL_INT_TXURN) {
1070 sc->sc_int_stats.ast_txurn++;
1071 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1074 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_txtq);
1076 if (status & HAL_INT_BMISS) {
1077 ATH_SCHEDULE_TQUEUE(sc->sc_dev, &sc->sc_bmisstq);
1080 if (status & HAL_INT_GTT)
1081 sc->sc_int_stats.ast_txto++;
1083 if (status & HAL_INT_CST)
1084 sc->sc_int_stats.ast_cst++;
1087 return ADF_OS_IRQ_HANDLED;
1090 static void ath_fatal_tasklet(TQUEUE_ARG data )
1092 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
1094 wmi_event(sc->tgt_wmi_handle, WMI_FATAL_EVENTID, NULL, 0);
1097 static void ath_bmiss_tasklet(TQUEUE_ARG data)
1099 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
1101 wmi_event(sc->tgt_wmi_handle, WMI_BMISS_EVENTID, NULL, 0);
1108 static void ath_enable_intr_tgt(void *Context, A_UINT16 Command,
1109 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1111 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1112 struct ath_hal *ah = sc->sc_ah;
1116 intr = (*(a_uint32_t *)data);
1118 intr = adf_os_ntohl(intr);
1120 if (intr & HAL_INT_SWBA) {
1121 sc->sc_imask |= HAL_INT_SWBA;
1123 sc->sc_imask &= ~HAL_INT_SWBA;
1126 if (intr & HAL_INT_BMISS) {
1127 sc->sc_imask |= HAL_INT_BMISS;
1130 ath_hal_intrset(ah, sc->sc_imask);
1131 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo,NULL, 0);
1134 static void ath_init_tgt(void *Context, A_UINT16 Command,
1135 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1137 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1138 struct ath_hal *ah = sc->sc_ah;
1139 a_uint32_t stbcsupport;
1141 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1142 | HAL_INT_RXEOL | HAL_INT_RXORN
1143 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1145 sc->sc_imask |= HAL_INT_GTT;
1147 if (ath_hal_htsupported(ah))
1148 sc->sc_imask |= HAL_INT_CST;
1150 #ifdef MAGPIE_MERLIN
1151 if (ath_hal_txstbcsupport(ah, &stbcsupport))
1152 sc->sc_txstbcsupport = stbcsupport;
1154 if (ath_hal_rxstbcsupport(ah, &stbcsupport))
1155 sc->sc_rxstbcsupport = stbcsupport;
1157 adf_os_setup_intr(sc->sc_dev, ath_intr);
1158 ath_hal_intrset(ah, sc->sc_imask);
1160 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1163 static void ath_int_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1164 A_UINT8 *data, a_int32_t datalen)
1166 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1168 struct fusion_stats {
1170 a_uint32_t ast_rxorn;
1171 a_uint32_t ast_rxeol;
1172 a_uint32_t ast_txurn;
1173 a_uint32_t ast_txto;
1177 struct fusion_stats stats;
1179 stats.ast_rx = sc->sc_int_stats.ast_rx;
1180 stats.ast_rxorn = sc->sc_int_stats.ast_rxorn;
1181 stats.ast_rxeol = sc->sc_int_stats.ast_rxeol;
1182 stats.ast_txurn = sc->sc_int_stats.ast_txurn;
1183 stats.ast_txto = sc->sc_int_stats.ast_txto;
1184 stats.ast_cst = sc->sc_int_stats.ast_cst;
1186 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1189 static void ath_tx_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1190 A_UINT8 *data, a_int32_t datalen)
1192 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1194 struct fusion_stats {
1195 a_uint32_t ast_tx_xretries;
1196 a_uint32_t ast_tx_fifoerr;
1197 a_uint32_t ast_tx_filtered;
1198 a_uint32_t ast_tx_timer_exp;
1199 a_uint32_t ast_tx_shortretry;
1200 a_uint32_t ast_tx_longretry;
1202 a_uint32_t tx_qnull;
1203 a_uint32_t tx_noskbs;
1204 a_uint32_t tx_nobufs;
1207 struct fusion_stats stats;
1209 stats.ast_tx_xretries = sc->sc_tx_stats.ast_tx_xretries;
1210 stats.ast_tx_fifoerr = sc->sc_tx_stats.ast_tx_fifoerr;
1211 stats.ast_tx_filtered = sc->sc_tx_stats.ast_tx_filtered;
1212 stats.ast_tx_timer_exp = sc->sc_tx_stats.ast_tx_timer_exp;
1213 stats.ast_tx_shortretry = sc->sc_tx_stats.ast_tx_shortretry;
1214 stats.ast_tx_longretry = sc->sc_tx_stats.ast_tx_longretry;
1215 stats.tx_qnull = sc->sc_tx_stats.tx_qnull;
1216 stats.tx_noskbs = sc->sc_tx_stats.tx_noskbs;
1217 stats.tx_nobufs = sc->sc_tx_stats.tx_nobufs;
1219 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1222 static void ath_rx_stats_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1223 A_UINT8 *data, a_int32_t datalen)
1225 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1227 struct fusion_stats {
1228 a_uint32_t ast_rx_nobuf;
1229 a_uint32_t ast_rx_send;
1230 a_uint32_t ast_rx_done;
1233 struct fusion_stats stats;
1235 stats.ast_rx_nobuf = sc->sc_rx_stats.ast_rx_nobuf;
1236 stats.ast_rx_send = sc->sc_rx_stats.ast_rx_send;
1237 stats.ast_rx_done = sc->sc_rx_stats.ast_rx_done;
1239 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &stats, sizeof(stats));
1242 static void ath_get_tgt_version(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1243 A_UINT8 *data, a_int32_t datalen)
1245 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1246 struct wmi_fw_version ver;
1248 ver.major = ATH_VERSION_MAJOR;
1249 ver.minor = ATH_VERSION_MINOR;
1251 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &ver, sizeof(ver));
1254 static void ath_enable_aggr_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1255 A_UINT8 *data, a_int32_t datalen)
1257 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1258 struct ath_aggr_info *aggr = (struct ath_aggr_info *)data;
1259 a_uint8_t nodeindex = aggr->nodeindex;
1260 a_uint8_t tidno = aggr->tidno;
1261 struct ath_node_target *an = NULL ;
1262 struct ath_atx_tid *tid = NULL;
1264 if (nodeindex >= TARGET_NODE_MAX) {
1268 an = &sc->sc_sta[nodeindex];
1269 if (!an->an_valid) {
1273 if (tidno >= WME_NUM_TID) {
1274 adf_os_print("[%s] enable_aggr with invalid tid %d(node = %d)\n",
1275 __FUNCTION__, tidno, nodeindex);
1279 tid = ATH_AN_2_TID(an, tidno);
1281 if (aggr->aggr_enable) {
1282 tid->flag |= TID_AGGR_ENABLED;
1283 } else if ( tid->flag & TID_AGGR_ENABLED ) {
1284 tid->flag &= ~TID_AGGR_ENABLED;
1285 ath_tgt_tx_cleanup(sc, an, tid, 1);
1288 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1291 static void ath_ic_update_tgt(void *Context,A_UINT16 Command, A_UINT16 SeqNo,
1292 A_UINT8 *data, a_int32_t datalen)
1294 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1295 struct ieee80211com_target *ic = (struct ieee80211com_target * )data;
1296 struct ieee80211com_target *ictgt = &sc->sc_ic ;
1298 adf_os_mem_copy(ictgt, ic, sizeof(struct ieee80211com_target));
1300 ictgt->ic_ampdu_limit = adf_os_ntohl(ic->ic_ampdu_limit);
1302 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1305 static void ath_vap_create_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1306 A_UINT8 *data, a_int32_t datalen)
1308 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1309 struct ieee80211vap_target *vap;
1310 a_uint8_t vap_index;
1312 vap = (struct ieee80211vap_target *)data;
1314 vap->iv_rtsthreshold = adf_os_ntohs(vap->iv_rtsthreshold);
1315 vap->iv_opmode = adf_os_ntohl(vap->iv_opmode);
1317 vap_index = vap->iv_vapindex;
1319 adf_os_assert(sc->sc_vap[vap_index].av_valid == 0);
1321 adf_os_mem_copy(&(sc->sc_vap[vap_index].av_vap), vap,
1324 sc->sc_vap[vap_index].av_bcbuf = asf_tailq_first(&(sc->sc_bbuf));
1325 sc->sc_vap[vap_index].av_valid = 1;
1327 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1330 static void ath_node_create_tgt(void *Context, A_UINT16 Command,
1331 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1333 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1334 struct ieee80211_node_target *node;
1335 a_uint8_t vap_index;
1336 a_uint8_t node_index;
1338 node = (struct ieee80211_node_target *)data;
1340 node_index = node->ni_nodeindex;
1342 node->ni_htcap = adf_os_ntohs(node->ni_htcap);
1343 node->ni_flags = adf_os_ntohs(node->ni_flags);
1344 node->ni_maxampdu = adf_os_ntohs(node->ni_maxampdu);
1346 adf_os_mem_copy(&(sc->sc_sta[node_index].ni), node,
1349 vap_index = sc->sc_sta[node_index].ni.ni_vapindex;
1350 sc->sc_sta[node_index].ni.ni_vap = &(sc->sc_vap[vap_index].av_vap);
1351 if(sc->sc_sta[node_index].ni.ni_is_vapnode == 1)
1352 sc->sc_vap[vap_index].av_vap.iv_nodeindex = node_index;
1354 sc->sc_sta[node_index].an_valid = 1;
1355 sc->sc_sta[node_index].ni.ni_txseqmgmt = 0;
1356 sc->sc_sta[node_index].ni.ni_iv16 = 0;
1357 sc->sc_sta[node_index].ni.ni_iv32 = 0;
1359 owl_tgt_node_init(&sc->sc_sta[node_index]);
1361 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1364 static void ath_node_cleanup_tgt(void *Context, A_UINT16 Command,
1365 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1367 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1368 a_uint8_t node_index;
1369 a_uint8_t *nodedata;
1371 nodedata = (a_uint8_t *)data;
1372 node_index = *nodedata;
1373 sc->sc_sta[node_index].an_valid = 0;
1375 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1378 static void ath_node_update_tgt(void *Context, A_UINT16 Command,
1379 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1381 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1382 struct ieee80211_node_target *node;
1383 a_uint8_t vap_index;
1384 a_uint8_t node_index;
1386 node = (struct ieee80211_node_target *)data;
1388 node_index = node->ni_nodeindex;
1390 node->ni_htcap = adf_os_ntohs(node->ni_htcap);
1391 node->ni_flags = adf_os_ntohs(node->ni_flags);
1392 node->ni_maxampdu = adf_os_ntohs(node->ni_maxampdu);
1394 adf_os_mem_copy(&(sc->sc_sta[node_index].ni), node,
1397 vap_index = sc->sc_sta[node_index].ni.ni_vapindex;
1398 sc->sc_sta[node_index].ni.ni_vap = &(sc->sc_vap[vap_index].av_vap);
1400 sc->sc_sta[node_index].ni.ni_txseqmgmt = 0;
1401 sc->sc_sta[node_index].ni.ni_iv16 = 0;
1402 sc->sc_sta[node_index].ni.ni_iv32 = 0;
1404 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1407 static void ath_hal_reg_read_tgt(void *Context, A_UINT16 Command,
1408 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1410 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1411 struct ath_hal *ah = sc->sc_ah;
1416 for (i = 0; i < datalen; i += sizeof(a_int32_t)) {
1417 addr = *(a_uint32_t *)(data + i);
1418 addr = adf_os_ntohl(addr);
1420 if ((addr & 0xffffe000) == 0x2000) {
1422 ath_hal_reg_read_target(ah, addr);
1423 if (!ath_hal_wait(ah, 0x407c, 0x00030000, 0)) {
1424 adf_os_print("SEEPROM Read fail: 0x%08x\n", addr);
1426 val[i/sizeof(a_int32_t)] = (ath_hal_reg_read_target(ah, 0x407c) & 0x0000ffff);
1427 } else if (addr > 0xffff) {
1428 val[i/sizeof(a_int32_t)] = *(a_uint32_t *)addr;
1430 val[i/sizeof(a_int32_t)] = ath_hal_reg_read_target(ah, addr);
1432 val[i/sizeof(a_int32_t)] = adf_os_ntohl(val[i/sizeof(a_int32_t)]);
1435 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, &val[0], datalen);
1438 static void ath_hal_reg_write_tgt(void *Context, A_UINT16 Command,
1439 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1441 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1442 struct ath_hal *ah = sc->sc_ah;
1444 struct registerWrite {
1449 for (i = 0; i < datalen; i += sizeof(struct registerWrite)) {
1450 t = (struct registerWrite *)(data+i);
1452 if( t->reg > 0xffff ) {
1453 a_uint32_t *pReg = (a_uint32_t *)t->reg;
1457 #if defined(PROJECT_K2)
1458 if( t->reg == 0x50040 ) {
1459 static uint8_t flg=0;
1463 A_UART_HWINIT(117*1000*1000, 19200);
1469 #if defined(PROJECT_K2)
1470 if( t->reg == 0x7014 ) {
1471 static uint8_t resetPLL = 0;
1474 if( resetPLL == 0 ) {
1476 pReg = (a_uint32_t *)t->reg;
1478 ath_hal_reg_write_target(ah, 0x786c,
1479 ath_hal_reg_read_target(ah,0x786c) | 0x6000000);
1480 ath_hal_reg_write_target(ah, 0x786c,
1481 ath_hal_reg_read_target(ah,0x786c) & (~0x6000000));
1487 #elif defined(PROJECT_MAGPIE) && !defined (FPGA)
1488 if( t->reg == 0x7014 ){
1489 static uint8_t resetPLL = 0;
1491 if( resetPLL == 0 ) {
1492 ath_hal_reg_write_target(ah, 0x7890,
1493 ath_hal_reg_read_target(ah,0x7890) | 0x1800000);
1494 ath_hal_reg_write_target(ah, 0x7890,
1495 ath_hal_reg_read_target(ah,0x7890) & (~0x1800000));
1500 ath_hal_reg_write_target(ah,t->reg,t->val);
1504 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1507 static void ath_vap_delete_tgt(void *Context, A_UINT16 Command,
1508 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1510 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1511 a_uint8_t vap_index;
1513 vap_index = *(a_uint8_t *)data;
1515 sc->sc_vap[vap_index].av_valid = 0;
1516 sc->sc_vap[vap_index].av_bcbuf = NULL;
1517 ath_node_vdelete_tgt(sc, vap_index);
1518 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1521 static void ath_disable_intr_tgt(void *Context, A_UINT16 Command,
1522 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1524 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1525 struct ath_hal *ah = sc->sc_ah;
1527 ath_hal_intrset(ah, 0);
1528 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo,NULL, 0);
1531 static void ath_flushrecv_tgt(void *Context, A_UINT16 Command,
1532 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1534 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1537 asf_tailq_foreach(bf, &sc->sc_rxbuf, bf_list)
1538 if (bf->bf_skb != NULL) {
1539 adf_nbuf_unmap(sc->sc_dev, bf->bf_dmamap,
1540 ADF_OS_DMA_FROM_DEVICE);
1541 ath_free_rx_skb(sc, adf_nbuf_queue_remove(&bf->bf_skbhead));
1545 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1548 static void ath_tx_draintxq_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1549 A_UINT8 *data, a_int32_t datalen)
1551 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1552 a_uint32_t q = *(a_uint32_t *)data;
1553 struct ath_txq *txq = NULL;
1555 q = adf_os_ntohl(q);
1556 txq = ATH_TXQ(sc, q);
1558 ath_tx_draintxq(sc, txq);
1559 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1562 static void ath_draintxq_tgt(void *Context, A_UINT16 Command,
1563 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1565 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1566 HAL_BOOL b = (HAL_BOOL) *(a_int32_t *)data;
1568 ath_draintxq(Context, b);
1569 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1572 static void ath_aborttx_dma_tgt(void *Context, A_UINT16 Command,
1573 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1575 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1577 ath_hal_aborttxdma(sc->sc_ah);
1578 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1581 static void ath_aborttxq_tgt(void *Context, A_UINT16 Command,
1582 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1585 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1588 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1589 if (ATH_TXQ_SETUP(sc, i))
1590 ath_tx_draintxq(sc, ATH_TXQ(sc,i));
1593 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1596 static void ath_stop_tx_dma_tgt(void *Context, A_UINT16 Command,
1597 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1599 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1600 struct ath_hal *ah = sc->sc_ah;
1604 q = *(a_uint32_t *)data;
1606 q = adf_os_ntohl(q);
1607 ath_hal_stoptxdma(ah, q);
1608 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1611 static void ath_startrecv_tgt(void *Context, A_UINT16 Command,
1612 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1615 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1618 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1621 static void ath_stoprecv_tgt(void *Context, A_UINT16 Command,
1622 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1624 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1625 struct ath_hal *ah = sc->sc_ah;
1627 ath_hal_stoppcurecv(ah);
1628 ath_hal_setrxfilter(ah, 0);
1629 ath_hal_stopdmarecv(ah);
1631 sc->sc_rxlink = NULL;
1632 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1635 static void ath_setcurmode_tgt(void *Context, A_UINT16 Command,
1636 A_UINT16 SeqNo, A_UINT8 *data, a_int32_t datalen)
1638 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1641 mode= *((a_uint16_t *)data);
1642 mode = adf_os_ntohs(mode);
1644 ath_setcurmode(sc, mode);
1646 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1649 static void ath_detach_tgt(void *Context, A_UINT16 Command, A_UINT16 SeqNo,
1650 A_UINT8 *data, a_int32_t datalen)
1652 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1653 struct ath_hal *ah = sc->sc_ah;
1657 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1658 adf_os_mem_free(sc);
1661 static void handle_echo_command(void *pContext, A_UINT16 Command,
1662 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1664 wmi_cmd_rsp(pContext, WMI_ECHO_CMDID, SeqNo, buffer, Length);
1667 static void handle_rc_state_change_cmd(void *Context, A_UINT16 Command,
1668 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1671 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1672 struct wmi_rc_state_change_cmd *wmi_data = (struct wmi_rc_state_change_cmd *)buffer;
1674 a_uint32_t capflag = adf_os_ntohl(wmi_data->capflag);
1676 ath_rate_newstate(sc, &sc->sc_vap[wmi_data->vap_index].av_vap,
1677 wmi_data->vap_state,
1681 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1684 static void handle_rc_rate_update_cmd(void *Context, A_UINT16 Command,
1685 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1687 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1688 struct wmi_rc_rate_update_cmd *wmi_data = (struct wmi_rc_rate_update_cmd *)buffer;
1690 a_uint32_t capflag = adf_os_ntohl(wmi_data->capflag);
1692 ath_rate_node_update(sc, &sc->sc_sta[wmi_data->node_index],
1697 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1700 static void dispatch_magpie_sys_cmds(void *pContext, A_UINT16 Command,
1701 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1706 static void ath_rc_mask_tgt(void *Context, A_UINT16 Command,
1707 A_UINT16 SeqNo, A_UINT8 *buffer, a_int32_t Length)
1709 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)Context;
1710 struct wmi_rc_rate_mask_cmd *wmi_data = (struct wmi_rc_rate_mask_cmd *)buffer;
1713 idx = wmi_data->vap_index;
1714 band = wmi_data->band;
1716 sc->sc_vap[idx].av_rate_mask[band] = adf_os_ntohl(wmi_data->mask);
1718 if (sc->sc_vap[idx].av_rate_mask[band]) {
1719 for (i = 0; i < RATE_TABLE_SIZE; i++) {
1720 if ((1 << i) & sc->sc_vap[idx].av_rate_mask[band]) {
1721 sc->sc_vap[idx].av_minrateidx[band] = i;
1726 sc->sc_vap[idx].av_minrateidx[band] = 0;
1729 wmi_cmd_rsp(sc->tgt_wmi_handle, Command, SeqNo, NULL, 0);
1732 static WMI_DISPATCH_ENTRY Magpie_Sys_DispatchEntries[] =
1734 {handle_echo_command, WMI_ECHO_CMDID, 0},
1735 {dispatch_magpie_sys_cmds, WMI_ACCESS_MEMORY_CMDID, 0},
1736 {ath_get_tgt_version, WMI_GET_FW_VERSION, 0},
1737 {ath_disable_intr_tgt, WMI_DISABLE_INTR_CMDID, 0},
1738 {ath_enable_intr_tgt, WMI_ENABLE_INTR_CMDID, 0},
1739 {ath_init_tgt, WMI_ATH_INIT_CMDID, 0},
1740 {ath_aborttxq_tgt, WMI_ABORT_TXQ_CMDID, 0},
1741 {ath_stop_tx_dma_tgt, WMI_STOP_TX_DMA_CMDID, 0},
1742 {ath_aborttx_dma_tgt, WMI_ABORT_TX_DMA_CMDID, 0},
1743 {ath_tx_draintxq_tgt, WMI_DRAIN_TXQ_CMDID, 0},
1744 {ath_draintxq_tgt, WMI_DRAIN_TXQ_ALL_CMDID, 0},
1745 {ath_startrecv_tgt, WMI_START_RECV_CMDID, 0},
1746 {ath_stoprecv_tgt, WMI_STOP_RECV_CMDID, 0},
1747 {ath_flushrecv_tgt, WMI_FLUSH_RECV_CMDID, 0},
1748 {ath_setcurmode_tgt, WMI_SET_MODE_CMDID, 0},
1749 {ath_node_create_tgt, WMI_NODE_CREATE_CMDID, 0},
1750 {ath_node_cleanup_tgt, WMI_NODE_REMOVE_CMDID, 0},
1751 {ath_vap_delete_tgt, WMI_VAP_REMOVE_CMDID, 0},
1752 {ath_vap_create_tgt, WMI_VAP_CREATE_CMDID, 0},
1753 {ath_hal_reg_read_tgt, WMI_REG_READ_CMDID, 0},
1754 {ath_hal_reg_write_tgt, WMI_REG_WRITE_CMDID, 0},
1755 {handle_rc_state_change_cmd, WMI_RC_STATE_CHANGE_CMDID, 0},
1756 {handle_rc_rate_update_cmd, WMI_RC_RATE_UPDATE_CMDID, 0},
1757 {ath_ic_update_tgt, WMI_TARGET_IC_UPDATE_CMDID, 0},
1758 {ath_enable_aggr_tgt, WMI_TX_AGGR_ENABLE_CMDID, 0},
1759 {ath_detach_tgt, WMI_TGT_DETACH_CMDID, 0},
1760 {ath_node_update_tgt, WMI_NODE_UPDATE_CMDID, 0},
1761 {ath_int_stats_tgt, WMI_INT_STATS_CMDID, 0},
1762 {ath_tx_stats_tgt, WMI_TX_STATS_CMDID, 0},
1763 {ath_rx_stats_tgt, WMI_RX_STATS_CMDID, 0},
1764 {ath_rc_mask_tgt, WMI_BITRATE_MASK_CMDID, 0},
1771 static void htc_setup_comp(void)
1775 static A_UINT8 tgt_ServiceConnect(HTC_SERVICE *pService,
1776 HTC_ENDPOINT_ID eid,
1780 a_int32_t *pLengthOut)
1782 struct ath_softc_tgt *sc = (struct ath_softc_tgt *)pService->ServiceCtx;
1784 switch(pService->ServiceID) {
1785 case WMI_CONTROL_SVC:
1786 sc->wmi_command_ep= eid;
1788 case WMI_BEACON_SVC:
1800 case WMI_DATA_VO_SVC:
1801 sc->data_VO_ep = eid;
1803 case WMI_DATA_VI_SVC:
1804 sc->data_VI_ep = eid;
1806 case WMI_DATA_BE_SVC:
1807 sc->data_BE_ep = eid;
1809 case WMI_DATA_BK_SVC:
1810 sc->data_BK_ep = eid;
1816 return HTC_SERVICE_SUCCESS;
1819 static void tgt_reg_service(struct ath_softc_tgt *sc, HTC_SERVICE *svc,
1820 int svcId, HTC_SERVICE_ProcessRecvMsg recvMsg)
1822 svc->ProcessRecvMsg = recvMsg;
1823 svc->ProcessSendBufferComplete = tgt_HTCSendCompleteHandler;
1824 svc->ProcessConnect = tgt_ServiceConnect;
1825 svc->MaxSvcMsgSize = 1600;
1826 svc->TrailerSpcCheckLimit = 0;
1827 svc->ServiceID = svcId;
1828 svc->ServiceCtx = sc;
1829 HTC_RegisterService(sc->tgt_htc_handle, svc);
1832 static void tgt_hif_htc_wmi_init(struct ath_softc_tgt *sc)
1834 HTC_CONFIG htc_conf;
1835 WMI_SVC_CONFIG wmiConfig;
1836 WMI_DISPATCH_TABLE *Magpie_Sys_Commands_Tbl;
1838 /* Init dynamic buf pool */
1839 sc->pool_handle = BUF_Pool_init(sc->sc_hdl);
1841 /* Init target-side HIF */
1842 sc->tgt_hif_handle = HIF_init(0);
1844 /* Init target-side HTC */
1845 htc_conf.HIFHandle = sc->tgt_hif_handle;
1846 htc_conf.CreditSize = 320;
1847 htc_conf.CreditNumber = ATH_TXBUF;
1848 htc_conf.OSHandle = sc->sc_hdl;
1849 htc_conf.PoolHandle = sc->pool_handle;
1850 sc->tgt_htc_handle = HTC_init(htc_setup_comp, &htc_conf);
1851 #if defined(PROJECT_MAGPIE)
1852 init_htc_handle = sc->tgt_htc_handle;
1855 tgt_reg_service(sc, &sc->htc_beacon_service, WMI_BEACON_SVC, tgt_HTCRecv_beaconhandler);
1856 tgt_reg_service(sc, &sc->htc_cab_service, WMI_CAB_SVC, tgt_HTCRecv_cabhandler);
1857 tgt_reg_service(sc, &sc->htc_uapsd_service, WMI_UAPSD_SVC, tgt_HTCRecv_uapsdhandler);
1858 tgt_reg_service(sc, &sc->htc_mgmt_service, WMI_MGMT_SVC, tgt_HTCRecv_mgmthandler);
1859 tgt_reg_service(sc, &sc->htc_data_BE_service, WMI_DATA_BE_SVC, tgt_HTCRecvMessageHandler);
1860 tgt_reg_service(sc, &sc->htc_data_BK_service, WMI_DATA_BK_SVC, tgt_HTCRecvMessageHandler);
1861 tgt_reg_service(sc, &sc->htc_data_VI_service, WMI_DATA_VI_SVC, tgt_HTCRecvMessageHandler);
1862 tgt_reg_service(sc, &sc->htc_data_VO_service, WMI_DATA_VO_SVC, tgt_HTCRecvMessageHandler);
1864 /* Init target-side WMI */
1865 Magpie_Sys_Commands_Tbl = (WMI_DISPATCH_TABLE *)adf_os_mem_alloc(sizeof(WMI_DISPATCH_TABLE));
1866 adf_os_mem_zero(Magpie_Sys_Commands_Tbl, sizeof(WMI_DISPATCH_TABLE));
1867 Magpie_Sys_Commands_Tbl->NumberOfEntries = WMI_DISPATCH_ENTRY_COUNT(Magpie_Sys_DispatchEntries);
1868 Magpie_Sys_Commands_Tbl->pTable = Magpie_Sys_DispatchEntries;
1870 adf_os_mem_zero(&wmiConfig, sizeof(WMI_SVC_CONFIG));
1871 wmiConfig.HtcHandle = sc->tgt_htc_handle;
1872 wmiConfig.PoolHandle = sc->pool_handle;
1873 wmiConfig.MaxCmdReplyEvts = ATH_WMI_MAX_CMD_REPLY;
1874 wmiConfig.MaxEventEvts = ATH_WMI_MAX_EVENTS;
1876 sc->tgt_wmi_handle = WMI_Init(&wmiConfig);
1877 Magpie_Sys_Commands_Tbl->pContext = sc;
1878 WMI_RegisterDispatchTable(sc->tgt_wmi_handle, Magpie_Sys_Commands_Tbl);
1880 HTC_NotifyTargetInserted(sc->tgt_htc_handle);
1882 /* Start HTC messages exchange */
1883 HTC_Ready(sc->tgt_htc_handle);
1886 a_int32_t ath_tgt_attach(a_uint32_t devid,a_uint32_t mem_start,
1887 struct ath_softc_tgt *sc, adf_os_device_t osdev)
1891 a_int32_t error = 0, i, flags = 0;
1894 adf_os_pci_config_read8(osdev, ATH_PCI_CACHE_LINE_SIZE, &csz);
1898 sc->sc_cachelsz = csz << 2;
1903 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_rxtq, ath_tgt_rx_tasklet, sc);
1904 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_txtq, owl_tgt_tx_tasklet, sc);
1905 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_bmisstq, ath_bmiss_tasklet, sc);
1906 ATH_INIT_TQUEUE(sc->sc_dev, &sc->sc_fataltq, ath_fatal_tasklet, sc);
1908 flags |= AH_USE_EEPROM;
1909 ah = _ath_hal_attach_tgt(devid,sc,sc->sc_dev,mem_start, flags, &status);
1916 tgt_hif_htc_wmi_init(sc);
1918 sc->sc_bhalq = HAL_NUM_TX_QUEUES - 1;
1920 ath_rate_setup(sc, IEEE80211_MODE_11NA);
1921 ath_rate_setup(sc, IEEE80211_MODE_11NG);
1923 sc->sc_rc = ath_rate_attach(sc);
1924 if (sc->sc_rc == NULL) {
1929 for (i=0; i < TARGET_NODE_MAX; i++) {
1930 sc->sc_sta[i].an_rcnode = adf_os_mem_alloc(sc->sc_rc->arc_space);
1933 error = ath_desc_alloc(sc);
1938 BUF_Pool_create_pool(sc->pool_handle, POOL_ID_WLAN_RX_BUF, ath_numrxdescs, 1664);
1940 ath_tgt_txq_setup(sc);
1942 ath_hal_intrset(ah,0);
1952 static void tgt_hif_htc_wmi_shutdown(struct ath_softc_tgt *sc)
1954 HTC_NotifyTargetDetached(sc->tgt_htc_handle);
1956 WMI_Shutdown(sc->tgt_wmi_handle);
1957 HTC_Shutdown(sc->tgt_htc_handle);
1958 HIF_shutdown(sc->tgt_hif_handle);
1959 BUF_Pool_shutdown(sc->pool_handle);
1962 a_int32_t ath_detach(struct ath_softc_tgt *sc)
1964 tgt_hif_htc_wmi_shutdown(sc);