2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct mt76_txwi_cache *
20 mt76_alloc_txwi(struct mt76_dev *dev)
22 struct mt76_txwi_cache *t;
26 size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
27 t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
31 addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
38 static struct mt76_txwi_cache *
39 __mt76_get_txwi(struct mt76_dev *dev)
41 struct mt76_txwi_cache *t = NULL;
43 spin_lock_bh(&dev->lock);
44 if (!list_empty(&dev->txwi_cache)) {
45 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
49 spin_unlock_bh(&dev->lock);
54 struct mt76_txwi_cache *
55 mt76_get_txwi(struct mt76_dev *dev)
57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
62 return mt76_alloc_txwi(dev);
66 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
71 spin_lock_bh(&dev->lock);
72 list_add(&t->list, &dev->txwi_cache);
73 spin_unlock_bh(&dev->lock);
76 void mt76_tx_free(struct mt76_dev *dev)
78 struct mt76_txwi_cache *t;
80 while ((t = __mt76_get_txwi(dev)) != NULL)
81 dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
86 mt76_txq_get_qid(struct ieee80211_txq *txq)
95 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
96 struct mt76_wcid *wcid, struct sk_buff *skb)
98 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
100 int qid = skb_get_queue_mapping(skb);
102 if (WARN_ON(qid >= MT_TXQ_PSD)) {
104 skb_set_queue_mapping(skb, qid);
107 if (!wcid->tx_rate_set)
108 ieee80211_get_tx_rates(info->control.vif, sta, skb,
109 info->control.rates, 1);
113 spin_lock_bh(&q->lock);
114 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
115 dev->queue_ops->kick(dev, q);
117 if (q->queued > q->ndesc - 8)
118 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
119 spin_unlock_bh(&q->lock);
121 EXPORT_SYMBOL_GPL(mt76_tx);
123 static struct sk_buff *
124 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
126 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
129 skb = skb_dequeue(&mtxq->retry_q);
131 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
133 if (ps && skb_queue_empty(&mtxq->retry_q))
134 ieee80211_sta_set_buffered(txq->sta, tid, false);
139 skb = ieee80211_tx_dequeue(dev->hw, txq);
147 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
149 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
151 if (!ieee80211_is_data_qos(hdr->frame_control) ||
152 !ieee80211_is_data_present(hdr->frame_control))
155 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
159 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
160 struct sk_buff *skb, bool last)
162 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
163 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
164 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
166 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
168 info->flags |= IEEE80211_TX_STATUS_EOSP;
170 mt76_skb_set_moredata(skb, !last);
171 dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
175 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
176 u16 tids, int nframes,
177 enum ieee80211_frame_release_type reason,
180 struct mt76_dev *dev = hw->priv;
181 struct sk_buff *last_skb = NULL;
182 struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
185 spin_lock_bh(&hwq->lock);
186 for (i = 0; tids && nframes; i++, tids >>= 1) {
187 struct ieee80211_txq *txq = sta->txq[i];
188 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
195 skb = mt76_txq_dequeue(dev, mtxq, true);
200 mt76_check_agg_ssn(mtxq, skb);
204 mt76_queue_ps_skb(dev, sta, last_skb, false);
211 mt76_queue_ps_skb(dev, sta, last_skb, true);
212 dev->queue_ops->kick(dev, hwq);
214 spin_unlock_bh(&hwq->lock);
216 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
219 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
220 struct mt76_txq *mtxq, bool *empty)
222 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
223 struct ieee80211_tx_info *info;
224 struct mt76_wcid *wcid = mtxq->wcid;
226 int n_frames = 1, limit;
227 struct ieee80211_tx_rate tx_rate;
232 skb = mt76_txq_dequeue(dev, mtxq, false);
238 info = IEEE80211_SKB_CB(skb);
239 if (!wcid->tx_rate_set)
240 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
241 info->control.rates, 1);
242 tx_rate = info->control.rates[0];
244 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
245 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
246 limit = ampdu ? 16 : 3;
249 mt76_check_agg_ssn(mtxq, skb);
251 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
262 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
263 test_bit(MT76_RESET, &dev->state))
266 skb = mt76_txq_dequeue(dev, mtxq, false);
272 info = IEEE80211_SKB_CB(skb);
273 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
275 if (ampdu != cur_ampdu ||
276 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
277 skb_queue_tail(&mtxq->retry_q, skb);
281 info->control.rates[0] = tx_rate;
284 mt76_check_agg_ssn(mtxq, skb);
286 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
292 } while (n_frames < limit);
296 hwq->entry[idx].schedule = true;
299 dev->queue_ops->kick(dev, hwq);
305 mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
307 struct mt76_txq *mtxq, *mtxq_last;
311 mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
312 while (!list_empty(&hwq->swq)) {
316 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
317 test_bit(MT76_RESET, &dev->state))
320 mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
321 if (mtxq->send_bar && mtxq->aggr) {
322 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
323 struct ieee80211_sta *sta = txq->sta;
324 struct ieee80211_vif *vif = txq->vif;
325 u16 agg_ssn = mtxq->agg_ssn;
328 mtxq->send_bar = false;
329 spin_unlock_bh(&hwq->lock);
330 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
331 spin_lock_bh(&hwq->lock);
335 list_del_init(&mtxq->list);
337 cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
339 list_add_tail(&mtxq->list, &hwq->swq);
346 if (mtxq == mtxq_last)
353 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
359 if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
362 len = mt76_txq_schedule_list(dev, hwq);
366 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
368 void mt76_txq_schedule_all(struct mt76_dev *dev)
372 for (i = 0; i <= MT_TXQ_BK; i++) {
373 struct mt76_queue *q = &dev->q_tx[i];
375 spin_lock_bh(&q->lock);
376 mt76_txq_schedule(dev, q);
377 spin_unlock_bh(&q->lock);
380 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
382 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
387 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
388 struct ieee80211_txq *txq = sta->txq[i];
389 struct mt76_txq *mtxq;
394 mtxq = (struct mt76_txq *)txq->drv_priv;
396 spin_lock_bh(&mtxq->hwq->lock);
397 mtxq->send_bar = mtxq->aggr && send_bar;
398 if (!list_empty(&mtxq->list))
399 list_del_init(&mtxq->list);
400 spin_unlock_bh(&mtxq->hwq->lock);
403 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
405 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
407 struct mt76_dev *dev = hw->priv;
408 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
409 struct mt76_queue *hwq = mtxq->hwq;
411 spin_lock_bh(&hwq->lock);
412 if (list_empty(&mtxq->list))
413 list_add_tail(&mtxq->list, &hwq->swq);
414 mt76_txq_schedule(dev, hwq);
415 spin_unlock_bh(&hwq->lock);
417 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
419 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
421 struct mt76_txq *mtxq;
422 struct mt76_queue *hwq;
428 mtxq = (struct mt76_txq *) txq->drv_priv;
431 spin_lock_bh(&hwq->lock);
432 if (!list_empty(&mtxq->list))
433 list_del(&mtxq->list);
434 spin_unlock_bh(&hwq->lock);
436 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
437 ieee80211_free_txskb(dev->hw, skb);
439 EXPORT_SYMBOL_GPL(mt76_txq_remove);
441 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
443 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
445 INIT_LIST_HEAD(&mtxq->list);
446 skb_queue_head_init(&mtxq->retry_q);
448 mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
450 EXPORT_SYMBOL_GPL(mt76_txq_init);