2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, see <http://www.gnu.org/licenses/>.
23 Abstract: rt2x00 queue specific routines.
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/dma-mapping.h>
32 #include "rt2x00lib.h"
34 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
36 struct data_queue *queue = entry->queue;
37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
39 struct skb_frame_desc *skbdesc;
40 unsigned int frame_size;
41 unsigned int head_size = 0;
42 unsigned int tail_size = 0;
45 * The frame size includes descriptor size, because the
46 * hardware directly receive the frame into the skbuffer.
48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
51 * The payload should be aligned to a 4-byte boundary,
52 * this means we need at least 3 bytes for moving the frame
53 * into the correct offset.
58 * For IV/EIV/ICV assembly we must make sure there is
59 * at least 8 bytes bytes available in headroom for IV/EIV
60 * and 8 bytes for ICV data as tailroon.
62 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
70 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
75 * Make sure we not have a frame with the requested bytes
76 * available in the head and tail.
78 skb_reserve(skb, head_size);
79 skb_put(skb, frame_size);
84 skbdesc = get_skb_frame_desc(skb);
85 memset(skbdesc, 0, sizeof(*skbdesc));
87 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
90 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
92 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
93 dev_kfree_skb_any(skb);
97 skbdesc->skb_dma = skb_dma;
98 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
104 int rt2x00queue_map_txskb(struct queue_entry *entry)
106 struct device *dev = entry->queue->rt2x00dev->dev;
107 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
110 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
112 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
115 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
118 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
120 void rt2x00queue_unmap_skb(struct queue_entry *entry)
122 struct device *dev = entry->queue->rt2x00dev->dev;
123 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
125 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
126 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
128 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
129 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
130 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
132 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
135 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
137 void rt2x00queue_free_skb(struct queue_entry *entry)
142 rt2x00queue_unmap_skb(entry);
143 dev_kfree_skb_any(entry->skb);
147 void rt2x00queue_align_frame(struct sk_buff *skb)
149 unsigned int frame_length = skb->len;
150 unsigned int align = ALIGN_SIZE(skb, 0);
155 skb_push(skb, align);
156 memmove(skb->data, skb->data + align, frame_length);
157 skb_trim(skb, frame_length);
161 * H/W needs L2 padding between the header and the paylod if header size
162 * is not 4 bytes aligned.
164 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
166 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
171 skb_push(skb, l2pad);
172 memmove(skb->data, skb->data + l2pad, hdr_len);
175 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
177 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
182 memmove(skb->data + l2pad, skb->data, hdr_len);
183 skb_pull(skb, l2pad);
186 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
188 struct txentry_desc *txdesc)
190 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
191 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
192 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
195 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
198 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
200 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
202 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
203 * seqno on retransmitted data (non-QOS) and management frames.
204 * To workaround the problem let's generate seqno in software.
205 * Except for beacons which are transmitted periodically by H/W
206 * hence hardware has to assign seqno for them.
208 if (ieee80211_is_beacon(hdr->frame_control)) {
209 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
210 /* H/W will generate sequence number */
214 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
218 * The hardware is not able to insert a sequence number. Assign a
219 * software generated one here.
221 * This is wrong because beacons are not getting sequence
222 * numbers assigned properly.
224 * A secondary problem exists for drivers that cannot toggle
225 * sequence counting per-frame, since those will override the
226 * sequence counter given by mac80211.
228 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
229 seqno = atomic_add_return(0x10, &intf->seqno);
231 seqno = atomic_read(&intf->seqno);
233 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
234 hdr->seq_ctrl |= cpu_to_le16(seqno);
237 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
239 struct txentry_desc *txdesc,
240 const struct rt2x00_rate *hwrate)
242 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
243 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
244 unsigned int data_length;
245 unsigned int duration;
246 unsigned int residual;
249 * Determine with what IFS priority this frame should be send.
250 * Set ifs to IFS_SIFS when the this is not the first fragment,
251 * or this fragment came after RTS/CTS.
253 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
254 txdesc->u.plcp.ifs = IFS_BACKOFF;
256 txdesc->u.plcp.ifs = IFS_SIFS;
258 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
259 data_length = skb->len + 4;
260 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
264 * Length calculation depends on OFDM/CCK rate.
266 txdesc->u.plcp.signal = hwrate->plcp;
267 txdesc->u.plcp.service = 0x04;
269 if (hwrate->flags & DEV_RATE_OFDM) {
270 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
271 txdesc->u.plcp.length_low = data_length & 0x3f;
274 * Convert length to microseconds.
276 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
277 duration = GET_DURATION(data_length, hwrate->bitrate);
283 * Check if we need to set the Length Extension
285 if (hwrate->bitrate == 110 && residual <= 30)
286 txdesc->u.plcp.service |= 0x80;
289 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
290 txdesc->u.plcp.length_low = duration & 0xff;
293 * When preamble is enabled we should set the
294 * preamble bit for the signal.
296 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
297 txdesc->u.plcp.signal |= 0x08;
301 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
303 struct txentry_desc *txdesc,
304 struct ieee80211_sta *sta,
305 const struct rt2x00_rate *hwrate)
307 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
308 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
309 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
310 struct rt2x00_sta *sta_priv = NULL;
314 sta_priv = sta_to_rt2x00_sta(sta);
315 txdesc->u.ht.wcid = sta_priv->wcid;
316 density = sta->ht_cap.ampdu_density;
320 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
321 * mcs rate to be used
323 if (txrate->flags & IEEE80211_TX_RC_MCS) {
324 txdesc->u.ht.mcs = txrate->idx;
327 * MIMO PS should be set to 1 for STA's using dynamic SM PS
328 * when using more then one tx stream (>MCS7).
330 if (sta && txdesc->u.ht.mcs > 7 &&
331 sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
332 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
334 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
335 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
336 txdesc->u.ht.mcs |= 0x08;
339 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
340 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
341 txdesc->u.ht.txop = TXOP_SIFS;
343 txdesc->u.ht.txop = TXOP_BACKOFF;
345 /* Left zero on all other settings. */
350 * Only one STBC stream is supported for now.
352 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
353 txdesc->u.ht.stbc = 1;
356 * This frame is eligible for an AMPDU, however, don't aggregate
357 * frames that are intended to probe a specific tx rate.
359 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
360 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
361 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
362 txdesc->u.ht.mpdu_density = density;
363 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
367 * Set 40Mhz mode if necessary (for legacy rates this will
368 * duplicate the frame to both channels).
370 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
371 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
372 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
373 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
374 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
377 * Determine IFS values
378 * - Use TXOP_BACKOFF for management frames except beacons
379 * - Use TXOP_SIFS for fragment bursts
380 * - Use TXOP_HTTXOP for everything else
382 * Note: rt2800 devices won't use CTS protection (if used)
383 * for frames not transmitted with TXOP_HTTXOP
385 if (ieee80211_is_mgmt(hdr->frame_control) &&
386 !ieee80211_is_beacon(hdr->frame_control))
387 txdesc->u.ht.txop = TXOP_BACKOFF;
388 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
389 txdesc->u.ht.txop = TXOP_SIFS;
391 txdesc->u.ht.txop = TXOP_HTTXOP;
394 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
396 struct txentry_desc *txdesc,
397 struct ieee80211_sta *sta)
399 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
400 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
401 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
402 struct ieee80211_rate *rate;
403 const struct rt2x00_rate *hwrate = NULL;
405 memset(txdesc, 0, sizeof(*txdesc));
408 * Header and frame information.
410 txdesc->length = skb->len;
411 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
414 * Check whether this frame is to be acked.
416 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
417 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
420 * Check if this is a RTS/CTS frame
422 if (ieee80211_is_rts(hdr->frame_control) ||
423 ieee80211_is_cts(hdr->frame_control)) {
424 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
425 if (ieee80211_is_rts(hdr->frame_control))
426 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
428 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
429 if (tx_info->control.rts_cts_rate_idx >= 0)
431 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
435 * Determine retry information.
437 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
438 if (txdesc->retry_limit >= rt2x00dev->long_retry)
439 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
442 * Check if more fragments are pending
444 if (ieee80211_has_morefrags(hdr->frame_control)) {
445 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
446 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
450 * Check if more frames (!= fragments) are pending
452 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
453 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
456 * Beacons and probe responses require the tsf timestamp
457 * to be inserted into the frame.
459 if (ieee80211_is_beacon(hdr->frame_control) ||
460 ieee80211_is_probe_resp(hdr->frame_control))
461 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
463 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
464 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
465 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
468 * Determine rate modulation.
470 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
471 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
472 else if (txrate->flags & IEEE80211_TX_RC_MCS)
473 txdesc->rate_mode = RATE_MODE_HT_MIX;
475 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
476 hwrate = rt2x00_get_rate(rate->hw_value);
477 if (hwrate->flags & DEV_RATE_OFDM)
478 txdesc->rate_mode = RATE_MODE_OFDM;
480 txdesc->rate_mode = RATE_MODE_CCK;
484 * Apply TX descriptor handling by components
486 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
487 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
489 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
490 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
493 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
497 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
498 struct txentry_desc *txdesc)
500 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
503 * This should not happen, we already checked the entry
504 * was ours. When the hardware disagrees there has been
505 * a queue corruption!
507 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
508 rt2x00dev->ops->lib->get_entry_state(entry))) {
509 rt2x00_err(rt2x00dev,
510 "Corrupt queue %d, accessing entry which is not ours\n"
511 "Please file bug report to %s\n",
512 entry->queue->qid, DRV_PROJECT);
517 * Add the requested extra tx headroom in front of the skb.
519 skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
520 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
523 * Call the driver's write_tx_data function, if it exists.
525 if (rt2x00dev->ops->lib->write_tx_data)
526 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
529 * Map the skb to DMA.
531 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
532 rt2x00queue_map_txskb(entry))
538 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
539 struct txentry_desc *txdesc)
541 struct data_queue *queue = entry->queue;
543 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
546 * All processing on the frame has been completed, this means
547 * it is now ready to be dumped to userspace through debugfs.
549 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
552 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
553 struct txentry_desc *txdesc)
556 * Check if we need to kick the queue, there are however a few rules
557 * 1) Don't kick unless this is the last in frame in a burst.
558 * When the burst flag is set, this frame is always followed
559 * by another frame which in some way are related to eachother.
560 * This is true for fragments, RTS or CTS-to-self frames.
561 * 2) Rule 1 can be broken when the available entries
562 * in the queue are less then a certain threshold.
564 if (rt2x00queue_threshold(queue) ||
565 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
566 queue->rt2x00dev->ops->lib->kick_queue(queue);
569 static void rt2x00queue_bar_check(struct queue_entry *entry)
571 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
572 struct ieee80211_bar *bar = (void *) (entry->skb->data +
573 rt2x00dev->extra_tx_headroom);
574 struct rt2x00_bar_list_entry *bar_entry;
576 if (likely(!ieee80211_is_back_req(bar->frame_control)))
579 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
582 * If the alloc fails we still send the BAR out but just don't track
583 * it in our bar list. And as a result we will report it to mac80211
589 bar_entry->entry = entry;
590 bar_entry->block_acked = 0;
593 * Copy the relevant parts of the 802.11 BAR into out check list
594 * such that we can use RCU for less-overhead in the RX path since
595 * sending BARs and processing the according BlockAck should be
598 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
599 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
600 bar_entry->control = bar->control;
601 bar_entry->start_seq_num = bar->start_seq_num;
604 * Insert BAR into our BAR check list.
606 spin_lock_bh(&rt2x00dev->bar_list_lock);
607 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
608 spin_unlock_bh(&rt2x00dev->bar_list_lock);
611 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
612 struct ieee80211_sta *sta, bool local)
614 struct ieee80211_tx_info *tx_info;
615 struct queue_entry *entry;
616 struct txentry_desc txdesc;
617 struct skb_frame_desc *skbdesc;
618 u8 rate_idx, rate_flags;
622 * Copy all TX descriptor information into txdesc,
623 * after that we are free to use the skb->cb array
624 * for our information.
626 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
629 * All information is retrieved from the skb->cb array,
630 * now we should claim ownership of the driver part of that
631 * array, preserving the bitrate index and flags.
633 tx_info = IEEE80211_SKB_CB(skb);
634 rate_idx = tx_info->control.rates[0].idx;
635 rate_flags = tx_info->control.rates[0].flags;
636 skbdesc = get_skb_frame_desc(skb);
637 memset(skbdesc, 0, sizeof(*skbdesc));
638 skbdesc->tx_rate_idx = rate_idx;
639 skbdesc->tx_rate_flags = rate_flags;
642 skbdesc->flags |= SKBDESC_NOT_MAC80211;
645 * When hardware encryption is supported, and this frame
646 * is to be encrypted, we should strip the IV/EIV data from
647 * the frame so we can provide it to the driver separately.
649 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
650 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
651 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
652 rt2x00crypto_tx_copy_iv(skb, &txdesc);
654 rt2x00crypto_tx_remove_iv(skb, &txdesc);
658 * When DMA allocation is required we should guarantee to the
659 * driver that the DMA is aligned to a 4-byte boundary.
660 * However some drivers require L2 padding to pad the payload
661 * rather then the header. This could be a requirement for
662 * PCI and USB devices, while header alignment only is valid
665 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
666 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
667 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
668 rt2x00queue_align_frame(skb);
671 * That function must be called with bh disabled.
673 spin_lock(&queue->tx_lock);
675 if (unlikely(rt2x00queue_full(queue))) {
676 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
682 entry = rt2x00queue_get_entry(queue, Q_INDEX);
684 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
686 rt2x00_err(queue->rt2x00dev,
687 "Arrived at non-free entry in the non-full queue %d\n"
688 "Please file bug report to %s\n",
689 queue->qid, DRV_PROJECT);
697 * It could be possible that the queue was corrupted and this
698 * call failed. Since we always return NETDEV_TX_OK to mac80211,
699 * this frame will simply be dropped.
701 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
702 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
709 * Put BlockAckReqs into our check list for driver BA processing.
711 rt2x00queue_bar_check(entry);
713 set_bit(ENTRY_DATA_PENDING, &entry->flags);
715 rt2x00queue_index_inc(entry, Q_INDEX);
716 rt2x00queue_write_tx_descriptor(entry, &txdesc);
717 rt2x00queue_kick_tx_queue(queue, &txdesc);
721 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
722 * do this under queue->tx_lock. Bottom halve was already disabled
723 * before ieee80211_xmit() call.
725 if (rt2x00queue_threshold(queue))
726 rt2x00queue_pause_queue(queue);
728 spin_unlock(&queue->tx_lock);
732 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
733 struct ieee80211_vif *vif)
735 struct rt2x00_intf *intf = vif_to_intf(vif);
737 if (unlikely(!intf->beacon))
741 * Clean up the beacon skb.
743 rt2x00queue_free_skb(intf->beacon);
746 * Clear beacon (single bssid devices don't need to clear the beacon
747 * since the beacon queue will get stopped anyway).
749 if (rt2x00dev->ops->lib->clear_beacon)
750 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
755 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
756 struct ieee80211_vif *vif)
758 struct rt2x00_intf *intf = vif_to_intf(vif);
759 struct skb_frame_desc *skbdesc;
760 struct txentry_desc txdesc;
762 if (unlikely(!intf->beacon))
766 * Clean up the beacon skb.
768 rt2x00queue_free_skb(intf->beacon);
770 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
771 if (!intf->beacon->skb)
775 * Copy all TX descriptor information into txdesc,
776 * after that we are free to use the skb->cb array
777 * for our information.
779 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
782 * Fill in skb descriptor
784 skbdesc = get_skb_frame_desc(intf->beacon->skb);
785 memset(skbdesc, 0, sizeof(*skbdesc));
788 * Send beacon to hardware.
790 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
796 bool rt2x00queue_for_each_entry(struct data_queue *queue,
797 enum queue_index start,
798 enum queue_index end,
800 bool (*fn)(struct queue_entry *entry,
803 unsigned long irqflags;
804 unsigned int index_start;
805 unsigned int index_end;
808 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
809 rt2x00_err(queue->rt2x00dev,
810 "Entry requested from invalid index range (%d - %d)\n",
816 * Only protect the range we are going to loop over,
817 * if during our loop a extra entry is set to pending
818 * it should not be kicked during this run, since it
819 * is part of another TX operation.
821 spin_lock_irqsave(&queue->index_lock, irqflags);
822 index_start = queue->index[start];
823 index_end = queue->index[end];
824 spin_unlock_irqrestore(&queue->index_lock, irqflags);
827 * Start from the TX done pointer, this guarantees that we will
828 * send out all frames in the correct order.
830 if (index_start < index_end) {
831 for (i = index_start; i < index_end; i++) {
832 if (fn(&queue->entries[i], data))
836 for (i = index_start; i < queue->limit; i++) {
837 if (fn(&queue->entries[i], data))
841 for (i = 0; i < index_end; i++) {
842 if (fn(&queue->entries[i], data))
849 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
851 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
852 enum queue_index index)
854 struct queue_entry *entry;
855 unsigned long irqflags;
857 if (unlikely(index >= Q_INDEX_MAX)) {
858 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
863 spin_lock_irqsave(&queue->index_lock, irqflags);
865 entry = &queue->entries[queue->index[index]];
867 spin_unlock_irqrestore(&queue->index_lock, irqflags);
871 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
873 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
875 struct data_queue *queue = entry->queue;
876 unsigned long irqflags;
878 if (unlikely(index >= Q_INDEX_MAX)) {
879 rt2x00_err(queue->rt2x00dev,
880 "Index change on invalid index type (%d)\n", index);
884 spin_lock_irqsave(&queue->index_lock, irqflags);
886 queue->index[index]++;
887 if (queue->index[index] >= queue->limit)
888 queue->index[index] = 0;
890 entry->last_action = jiffies;
892 if (index == Q_INDEX) {
894 } else if (index == Q_INDEX_DONE) {
899 spin_unlock_irqrestore(&queue->index_lock, irqflags);
902 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
904 switch (queue->qid) {
910 * For TX queues, we have to disable the queue
913 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
919 void rt2x00queue_pause_queue(struct data_queue *queue)
921 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
922 !test_bit(QUEUE_STARTED, &queue->flags) ||
923 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
926 rt2x00queue_pause_queue_nocheck(queue);
928 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
930 void rt2x00queue_unpause_queue(struct data_queue *queue)
932 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
933 !test_bit(QUEUE_STARTED, &queue->flags) ||
934 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
937 switch (queue->qid) {
943 * For TX queues, we have to enable the queue
946 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
950 * For RX we need to kick the queue now in order to
953 queue->rt2x00dev->ops->lib->kick_queue(queue);
958 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
960 void rt2x00queue_start_queue(struct data_queue *queue)
962 mutex_lock(&queue->status_lock);
964 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
965 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
966 mutex_unlock(&queue->status_lock);
970 set_bit(QUEUE_PAUSED, &queue->flags);
972 queue->rt2x00dev->ops->lib->start_queue(queue);
974 rt2x00queue_unpause_queue(queue);
976 mutex_unlock(&queue->status_lock);
978 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
980 void rt2x00queue_stop_queue(struct data_queue *queue)
982 mutex_lock(&queue->status_lock);
984 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
985 mutex_unlock(&queue->status_lock);
989 rt2x00queue_pause_queue_nocheck(queue);
991 queue->rt2x00dev->ops->lib->stop_queue(queue);
993 mutex_unlock(&queue->status_lock);
995 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
997 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1000 (queue->qid == QID_AC_VO) ||
1001 (queue->qid == QID_AC_VI) ||
1002 (queue->qid == QID_AC_BE) ||
1003 (queue->qid == QID_AC_BK);
1005 if (rt2x00queue_empty(queue))
1009 * If we are not supposed to drop any pending
1010 * frames, this means we must force a start (=kick)
1011 * to the queue to make sure the hardware will
1012 * start transmitting.
1014 if (!drop && tx_queue)
1015 queue->rt2x00dev->ops->lib->kick_queue(queue);
1018 * Check if driver supports flushing, if that is the case we can
1019 * defer the flushing to the driver. Otherwise we must use the
1020 * alternative which just waits for the queue to become empty.
1022 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1023 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1026 * The queue flush has failed...
1028 if (unlikely(!rt2x00queue_empty(queue)))
1029 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1032 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1034 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1036 struct data_queue *queue;
1039 * rt2x00queue_start_queue will call ieee80211_wake_queue
1040 * for each queue after is has been properly initialized.
1042 tx_queue_for_each(rt2x00dev, queue)
1043 rt2x00queue_start_queue(queue);
1045 rt2x00queue_start_queue(rt2x00dev->rx);
1047 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1049 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1051 struct data_queue *queue;
1054 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1055 * as well, but we are completely shutting doing everything
1056 * now, so it is much safer to stop all TX queues at once,
1057 * and use rt2x00queue_stop_queue for cleaning up.
1059 ieee80211_stop_queues(rt2x00dev->hw);
1061 tx_queue_for_each(rt2x00dev, queue)
1062 rt2x00queue_stop_queue(queue);
1064 rt2x00queue_stop_queue(rt2x00dev->rx);
1066 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1068 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1070 struct data_queue *queue;
1072 tx_queue_for_each(rt2x00dev, queue)
1073 rt2x00queue_flush_queue(queue, drop);
1075 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1077 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1079 static void rt2x00queue_reset(struct data_queue *queue)
1081 unsigned long irqflags;
1084 spin_lock_irqsave(&queue->index_lock, irqflags);
1089 for (i = 0; i < Q_INDEX_MAX; i++)
1090 queue->index[i] = 0;
1092 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1095 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1097 struct data_queue *queue;
1100 queue_for_each(rt2x00dev, queue) {
1101 rt2x00queue_reset(queue);
1103 for (i = 0; i < queue->limit; i++)
1104 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1108 static int rt2x00queue_alloc_entries(struct data_queue *queue)
1110 struct queue_entry *entries;
1111 unsigned int entry_size;
1114 rt2x00queue_reset(queue);
1117 * Allocate all queue entries.
1119 entry_size = sizeof(*entries) + queue->priv_size;
1120 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1124 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1125 (((char *)(__base)) + ((__limit) * (__esize)) + \
1126 ((__index) * (__psize)))
1128 for (i = 0; i < queue->limit; i++) {
1129 entries[i].flags = 0;
1130 entries[i].queue = queue;
1131 entries[i].skb = NULL;
1132 entries[i].entry_idx = i;
1133 entries[i].priv_data =
1134 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1135 sizeof(*entries), queue->priv_size);
1138 #undef QUEUE_ENTRY_PRIV_OFFSET
1140 queue->entries = entries;
1145 static void rt2x00queue_free_skbs(struct data_queue *queue)
1149 if (!queue->entries)
1152 for (i = 0; i < queue->limit; i++) {
1153 rt2x00queue_free_skb(&queue->entries[i]);
1157 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1160 struct sk_buff *skb;
1162 for (i = 0; i < queue->limit; i++) {
1163 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1166 queue->entries[i].skb = skb;
1172 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1174 struct data_queue *queue;
1177 status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1181 tx_queue_for_each(rt2x00dev, queue) {
1182 status = rt2x00queue_alloc_entries(queue);
1187 status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1191 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1192 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1197 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1204 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1206 rt2x00queue_uninitialize(rt2x00dev);
1211 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1213 struct data_queue *queue;
1215 rt2x00queue_free_skbs(rt2x00dev->rx);
1217 queue_for_each(rt2x00dev, queue) {
1218 kfree(queue->entries);
1219 queue->entries = NULL;
1223 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1224 struct data_queue *queue, enum data_queue_qid qid)
1226 mutex_init(&queue->status_lock);
1227 spin_lock_init(&queue->tx_lock);
1228 spin_lock_init(&queue->index_lock);
1230 queue->rt2x00dev = rt2x00dev;
1237 rt2x00dev->ops->queue_init(queue);
1239 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1242 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1244 struct data_queue *queue;
1245 enum data_queue_qid qid;
1246 unsigned int req_atim =
1247 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1250 * We need the following queues:
1252 * TX: ops->tx_queues
1254 * Atim: 1 (if required)
1256 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1258 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1263 * Initialize pointers
1265 rt2x00dev->rx = queue;
1266 rt2x00dev->tx = &queue[1];
1267 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1268 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1271 * Initialize queue parameters.
1273 * TX: qid = QID_AC_VO + index
1274 * TX: cw_min: 2^5 = 32.
1275 * TX: cw_max: 2^10 = 1024.
1276 * BCN: qid = QID_BEACON
1277 * ATIM: qid = QID_ATIM
1279 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1282 tx_queue_for_each(rt2x00dev, queue)
1283 rt2x00queue_init(rt2x00dev, queue, qid++);
1285 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1287 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1292 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1294 kfree(rt2x00dev->rx);
1295 rt2x00dev->rx = NULL;
1296 rt2x00dev->tx = NULL;
1297 rt2x00dev->bcn = NULL;