1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
20 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 struct ieee80211_txq *txq;
22 struct mt76_txq *mtxq;
25 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 !ieee80211_is_data_present(hdr->frame_control))
29 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
31 mtxq = (struct mt76_txq *)txq->drv_priv;
35 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 __acquires(&dev->status_lock)
43 __skb_queue_head_init(list);
44 spin_lock_bh(&dev->status_lock);
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 __releases(&dev->status_lock)
52 struct ieee80211_hw *hw;
55 spin_unlock_bh(&dev->status_lock);
58 while ((skb = __skb_dequeue(list)) != NULL) {
59 struct ieee80211_tx_status status = {
61 .info = IEEE80211_SKB_CB(skb),
63 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
64 struct mt76_wcid *wcid;
66 wcid = rcu_dereference(dev->wcid[cb->wcid]);
68 status.sta = wcid_to_sta(wcid);
73 hw = mt76_tx_status_get_hw(dev, skb);
74 ieee80211_tx_status_ext(hw, &status);
78 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
81 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
82 struct sk_buff_head *list)
84 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
85 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
86 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
91 if ((flags & done) != done)
94 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
95 if (flags & MT_TX_CB_TXS_FAILED) {
96 info->status.rates[0].count = 0;
97 info->status.rates[0].idx = -1;
98 info->flags |= IEEE80211_TX_STAT_ACK;
101 __skb_queue_tail(list, skb);
105 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
106 struct sk_buff_head *list)
108 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
110 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
113 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
116 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
117 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
120 memset(cb, 0, sizeof(*cb));
122 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
123 return MT_PACKET_ID_NO_ACK;
125 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
126 return MT_PACKET_ID_NO_ACK;
128 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
129 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
130 return MT_PACKET_ID_NO_SKB;
132 spin_lock_bh(&dev->status_lock);
134 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
135 MT_PACKET_ID_MASK, GFP_ATOMIC);
137 pid = MT_PACKET_ID_NO_SKB;
141 cb->wcid = wcid->idx;
144 if (list_empty(&wcid->list))
145 list_add_tail(&wcid->list, &dev->wcid_list);
148 spin_unlock_bh(&dev->status_lock);
152 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
155 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
156 struct sk_buff_head *list)
161 lockdep_assert_held(&dev->status_lock);
163 skb = idr_remove(&wcid->pktid, pktid);
167 /* look for stale entries in the wcid idr queue */
168 idr_for_each_entry(&wcid->pktid, skb, id) {
169 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
172 if (!(cb->flags & MT_TX_CB_DMA_DONE))
175 if (time_is_after_jiffies(cb->jiffies +
176 MT_TX_STATUS_SKB_TIMEOUT))
180 /* It has been too long since DMA_DONE, time out this packet
181 * and stop waiting for TXS callback.
183 idr_remove(&wcid->pktid, cb->pktid);
184 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
185 MT_TX_CB_TXS_DONE, list);
189 if (idr_is_empty(&wcid->pktid))
190 list_del_init(&wcid->list);
194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
197 mt76_tx_status_check(struct mt76_dev *dev, bool flush)
199 struct mt76_wcid *wcid, *tmp;
200 struct sk_buff_head list;
202 mt76_tx_status_lock(dev, &list);
203 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
204 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
205 mt76_tx_status_unlock(dev, &list);
207 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
210 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
213 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
216 if (!wcid || info->tx_time_est)
219 pending = atomic_dec_return(&wcid->non_aql_packets);
221 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
224 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
225 struct list_head *free_list)
227 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
228 struct ieee80211_tx_status status = {
230 .free_list = free_list,
232 struct mt76_wcid *wcid = NULL;
233 struct ieee80211_hw *hw;
234 struct sk_buff_head list;
238 if (wcid_idx < ARRAY_SIZE(dev->wcid))
239 wcid = rcu_dereference(dev->wcid[wcid_idx]);
241 mt76_tx_check_non_aql(dev, wcid, skb);
243 #ifdef CONFIG_NL80211_TESTMODE
244 if (mt76_is_testmode_skb(dev, skb, &hw)) {
245 struct mt76_phy *phy = hw->priv;
247 if (skb == phy->test.tx_skb)
249 if (phy->test.tx_queued == phy->test.tx_done)
250 wake_up(&dev->tx_wait);
252 dev_kfree_skb_any(skb);
257 if (cb->pktid < MT_PACKET_ID_FIRST) {
258 hw = mt76_tx_status_get_hw(dev, skb);
259 status.sta = wcid_to_sta(wcid);
260 ieee80211_tx_status_ext(hw, &status);
264 mt76_tx_status_lock(dev, &list);
265 cb->jiffies = jiffies;
266 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
267 mt76_tx_status_unlock(dev, &list);
272 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
275 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
276 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
280 struct mt76_queue *q = phy->q_tx[qid];
281 struct mt76_dev *dev = phy->dev;
286 non_aql = !info->tx_time_est;
287 idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
291 wcid = (struct mt76_wcid *)sta->drv_priv;
292 q->entry[idx].wcid = wcid->idx;
297 pending = atomic_inc_return(&wcid->non_aql_packets);
298 if (stop && pending >= MT_MAX_NON_AQL_PKT)
305 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
306 struct mt76_wcid *wcid, struct sk_buff *skb)
308 struct mt76_dev *dev = phy->dev;
309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
310 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
311 struct mt76_queue *q;
312 int qid = skb_get_queue_mapping(skb);
313 bool ext_phy = phy != &dev->phy;
315 if (mt76_testmode_enabled(phy)) {
316 ieee80211_free_txskb(phy->hw, skb);
320 if (WARN_ON(qid >= MT_TXQ_PSD)) {
322 skb_set_queue_mapping(skb, qid);
325 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
326 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
327 !ieee80211_is_data(hdr->frame_control) &&
328 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
330 skb_set_queue_mapping(skb, qid);
333 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
334 ieee80211_get_tx_rates(info->control.vif, sta, skb,
335 info->control.rates, 1);
338 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
342 spin_lock_bh(&q->lock);
343 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
344 dev->queue_ops->kick(dev, q);
345 spin_unlock_bh(&q->lock);
347 EXPORT_SYMBOL_GPL(mt76_tx);
349 static struct sk_buff *
350 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
352 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
353 struct ieee80211_tx_info *info;
354 bool ext_phy = phy != &phy->dev->phy;
357 skb = ieee80211_tx_dequeue(phy->hw, txq);
361 info = IEEE80211_SKB_CB(skb);
363 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
369 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
370 struct sk_buff *skb, bool last)
372 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
373 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
375 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
377 info->flags |= IEEE80211_TX_STATUS_EOSP |
378 IEEE80211_TX_CTL_REQ_TX_STATUS;
380 mt76_skb_set_moredata(skb, !last);
381 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
385 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
386 u16 tids, int nframes,
387 enum ieee80211_frame_release_type reason,
390 struct mt76_phy *phy = hw->priv;
391 struct mt76_dev *dev = phy->dev;
392 struct sk_buff *last_skb = NULL;
393 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
396 spin_lock_bh(&hwq->lock);
397 for (i = 0; tids && nframes; i++, tids >>= 1) {
398 struct ieee80211_txq *txq = sta->txq[i];
399 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
406 skb = mt76_txq_dequeue(phy, mtxq);
412 mt76_queue_ps_skb(phy, sta, last_skb, false);
419 mt76_queue_ps_skb(phy, sta, last_skb, true);
420 dev->queue_ops->kick(dev, hwq);
422 ieee80211_sta_eosp(sta);
425 spin_unlock_bh(&hwq->lock);
427 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
430 mt76_txq_stopped(struct mt76_queue *q)
432 return q->stopped || q->blocked ||
433 q->queued + MT_TXQ_FREE_THR >= q->ndesc;
437 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
438 struct mt76_txq *mtxq, struct mt76_wcid *wcid)
440 struct mt76_dev *dev = phy->dev;
441 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
442 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
443 struct ieee80211_tx_info *info;
449 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
452 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
455 skb = mt76_txq_dequeue(phy, mtxq);
459 info = IEEE80211_SKB_CB(skb);
460 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
461 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
462 info->control.rates, 1);
465 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
466 spin_unlock(&q->lock);
471 if (test_bit(MT76_RESET, &phy->state))
474 if (stop || mt76_txq_stopped(q))
477 skb = mt76_txq_dequeue(phy, mtxq);
481 info = IEEE80211_SKB_CB(skb);
482 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
483 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
484 info->control.rates, 1);
487 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
488 spin_unlock(&q->lock);
496 dev->queue_ops->kick(dev, q);
497 spin_unlock(&q->lock);
503 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
505 struct mt76_queue *q = phy->q_tx[qid];
506 struct mt76_dev *dev = phy->dev;
507 struct ieee80211_txq *txq;
508 struct mt76_txq *mtxq;
509 struct mt76_wcid *wcid;
515 if (test_bit(MT76_RESET, &phy->state))
518 if (dev->queue_ops->tx_cleanup &&
519 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
520 dev->queue_ops->tx_cleanup(dev, q, false);
523 txq = ieee80211_next_txq(phy->hw, qid);
527 mtxq = (struct mt76_txq *)txq->drv_priv;
528 wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
529 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
532 if (mtxq->send_bar && mtxq->aggr) {
533 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
534 struct ieee80211_sta *sta = txq->sta;
535 struct ieee80211_vif *vif = txq->vif;
536 u16 agg_ssn = mtxq->agg_ssn;
539 mtxq->send_bar = false;
540 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
543 if (!mt76_txq_stopped(q))
544 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
546 ieee80211_return_txq(phy->hw, txq, false);
548 if (unlikely(n_frames < 0))
557 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
568 ieee80211_txq_schedule_start(phy->hw, qid);
569 len = mt76_txq_schedule_list(phy, qid);
570 ieee80211_txq_schedule_end(phy->hw, qid);
576 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
578 void mt76_txq_schedule_all(struct mt76_phy *phy)
582 for (i = 0; i <= MT_TXQ_BK; i++)
583 mt76_txq_schedule(phy, i);
585 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
587 void mt76_tx_worker_run(struct mt76_dev *dev)
589 mt76_txq_schedule_all(&dev->phy);
591 mt76_txq_schedule_all(dev->phy2);
593 #ifdef CONFIG_NL80211_TESTMODE
594 if (dev->phy.test.tx_pending)
595 mt76_testmode_tx_pending(&dev->phy);
596 if (dev->phy2 && dev->phy2->test.tx_pending)
597 mt76_testmode_tx_pending(dev->phy2);
600 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
602 void mt76_tx_worker(struct mt76_worker *w)
604 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
606 mt76_tx_worker_run(dev);
609 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
614 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
615 struct ieee80211_txq *txq = sta->txq[i];
616 struct mt76_queue *hwq;
617 struct mt76_txq *mtxq;
622 hwq = phy->q_tx[mt76_txq_get_qid(txq)];
623 mtxq = (struct mt76_txq *)txq->drv_priv;
625 spin_lock_bh(&hwq->lock);
626 mtxq->send_bar = mtxq->aggr && send_bar;
627 spin_unlock_bh(&hwq->lock);
630 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
632 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
634 struct mt76_phy *phy = hw->priv;
635 struct mt76_dev *dev = phy->dev;
637 if (!test_bit(MT76_STATE_RUNNING, &phy->state))
640 mt76_worker_schedule(&dev->tx_worker);
642 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
644 u8 mt76_ac_to_hwq(u8 ac)
646 static const u8 wmm_queue_map[] = {
647 [IEEE80211_AC_BE] = 0,
648 [IEEE80211_AC_BK] = 1,
649 [IEEE80211_AC_VI] = 2,
650 [IEEE80211_AC_VO] = 3,
653 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
656 return wmm_queue_map[ac];
658 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
660 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
662 struct sk_buff *iter, *last = skb;
664 /* First packet of a A-MSDU burst keeps track of the whole burst
665 * length, need to update length of it and the last packet.
667 skb_walk_frags(skb, iter) {
670 skb->data_len += pad;
676 if (skb_pad(last, pad))
679 __skb_put(last, pad);
683 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
685 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
686 struct mt76_queue_entry *e)
689 dev->drv->tx_complete_skb(dev, e);
691 spin_lock_bh(&q->lock);
692 q->tail = (q->tail + 1) % q->ndesc;
694 spin_unlock_bh(&q->lock);
696 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
698 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
700 struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
701 struct mt76_queue *q, *q2 = NULL;
704 if (blocked == q->blocked)
707 q->blocked = blocked;
710 q2->blocked = blocked;
714 mt76_worker_schedule(&dev->tx_worker);
716 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
718 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
722 spin_lock_bh(&dev->token_lock);
724 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
728 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
729 if (mtk_wed_device_active(&dev->mmio.wed) &&
730 token >= dev->mmio.wed.wlan.token_start)
731 dev->wed_token_count++;
734 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
735 __mt76_set_tx_blocked(dev, true);
737 spin_unlock_bh(&dev->token_lock);
741 EXPORT_SYMBOL_GPL(mt76_token_consume);
743 struct mt76_txwi_cache *
744 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
746 struct mt76_txwi_cache *txwi;
748 spin_lock_bh(&dev->token_lock);
750 txwi = idr_remove(&dev->token, token);
754 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
755 if (mtk_wed_device_active(&dev->mmio.wed) &&
756 token >= dev->mmio.wed.wlan.token_start &&
757 --dev->wed_token_count == 0)
758 wake_up(&dev->tx_wait);
762 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
763 dev->phy.q_tx[0]->blocked)
766 spin_unlock_bh(&dev->token_lock);
770 EXPORT_SYMBOL_GPL(mt76_token_release);