1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
11 #include <linux/jiffies.h>
16 /* private */ struct cw1200_queue_item
18 struct list_head head;
21 unsigned long queue_timestamp;
22 unsigned long xmit_timestamp;
23 struct cw1200_txpriv txpriv;
27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
29 struct cw1200_queue_stats *stats = queue->stats;
30 if (queue->tx_locked_cnt++ == 0) {
31 pr_debug("[TX] Queue %d is locked.\n",
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
39 struct cw1200_queue_stats *stats = queue->stats;
40 BUG_ON(!queue->tx_locked_cnt);
41 if (--queue->tx_locked_cnt == 0) {
42 pr_debug("[TX] Queue %d is unlocked.\n",
44 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
48 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
49 u8 *queue_id, u8 *item_generation,
52 *item_id = (packet_id >> 0) & 0xFF;
53 *item_generation = (packet_id >> 8) & 0xFF;
54 *queue_id = (packet_id >> 16) & 0xFF;
55 *queue_generation = (packet_id >> 24) & 0xFF;
58 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
59 u8 item_generation, u8 item_id)
61 return ((u32)item_id << 0) |
62 ((u32)item_generation << 8) |
63 ((u32)queue_id << 16) |
64 ((u32)queue_generation << 24);
67 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
68 struct list_head *gc_list)
70 struct cw1200_queue_item *item, *tmp;
72 list_for_each_entry_safe(item, tmp, gc_list, head) {
73 list_del(&item->head);
74 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
79 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
80 struct cw1200_queue_item *item)
82 struct cw1200_queue_item *gc_item;
83 gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
86 list_add_tail(&gc_item->head, gc_list);
89 static void __cw1200_queue_gc(struct cw1200_queue *queue,
90 struct list_head *head,
93 struct cw1200_queue_stats *stats = queue->stats;
94 struct cw1200_queue_item *item = NULL, *iter, *tmp;
95 bool wakeup_stats = false;
97 list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
98 if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
103 --queue->link_map_cache[iter->txpriv.link_id];
104 spin_lock_bh(&stats->lock);
106 if (!--stats->link_map_cache[iter->txpriv.link_id])
108 spin_unlock_bh(&stats->lock);
109 cw1200_debug_tx_ttl(stats->priv);
110 cw1200_queue_register_post_gc(head, iter);
112 list_move_tail(&iter->head, &queue->free_pool);
116 wake_up(&stats->wait_link_id_empty);
118 if (queue->overfull) {
119 if (queue->num_queued <= (queue->capacity >> 1)) {
120 queue->overfull = false;
122 __cw1200_queue_unlock(queue);
124 unsigned long tmo = item->queue_timestamp + queue->ttl;
125 mod_timer(&queue->gc, tmo);
126 cw1200_pm_stay_awake(&stats->priv->pm_state,
132 static void cw1200_queue_gc(struct timer_list *t)
135 struct cw1200_queue *queue =
136 from_timer(queue, t, gc);
138 spin_lock_bh(&queue->lock);
139 __cw1200_queue_gc(queue, &list, true);
140 spin_unlock_bh(&queue->lock);
141 cw1200_queue_post_gc(queue->stats, &list);
144 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
146 cw1200_queue_skb_dtor_t skb_dtor,
147 struct cw1200_common *priv)
149 memset(stats, 0, sizeof(*stats));
150 stats->map_capacity = map_capacity;
151 stats->skb_dtor = skb_dtor;
153 spin_lock_init(&stats->lock);
154 init_waitqueue_head(&stats->wait_link_id_empty);
156 stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
158 if (!stats->link_map_cache)
164 int cw1200_queue_init(struct cw1200_queue *queue,
165 struct cw1200_queue_stats *stats,
172 memset(queue, 0, sizeof(*queue));
173 queue->stats = stats;
174 queue->capacity = capacity;
175 queue->queue_id = queue_id;
177 INIT_LIST_HEAD(&queue->queue);
178 INIT_LIST_HEAD(&queue->pending);
179 INIT_LIST_HEAD(&queue->free_pool);
180 spin_lock_init(&queue->lock);
181 timer_setup(&queue->gc, cw1200_queue_gc, 0);
183 queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
188 queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
190 if (!queue->link_map_cache) {
196 for (i = 0; i < capacity; ++i)
197 list_add_tail(&queue->pool[i].head, &queue->free_pool);
202 int cw1200_queue_clear(struct cw1200_queue *queue)
206 struct cw1200_queue_stats *stats = queue->stats;
207 struct cw1200_queue_item *item, *tmp;
209 spin_lock_bh(&queue->lock);
211 list_splice_tail_init(&queue->queue, &queue->pending);
212 list_for_each_entry_safe(item, tmp, &queue->pending, head) {
214 cw1200_queue_register_post_gc(&gc_list, item);
216 list_move_tail(&item->head, &queue->free_pool);
218 queue->num_queued = 0;
219 queue->num_pending = 0;
221 spin_lock_bh(&stats->lock);
222 for (i = 0; i < stats->map_capacity; ++i) {
223 stats->num_queued -= queue->link_map_cache[i];
224 stats->link_map_cache[i] -= queue->link_map_cache[i];
225 queue->link_map_cache[i] = 0;
227 spin_unlock_bh(&stats->lock);
228 if (queue->overfull) {
229 queue->overfull = false;
230 __cw1200_queue_unlock(queue);
232 spin_unlock_bh(&queue->lock);
233 wake_up(&stats->wait_link_id_empty);
234 cw1200_queue_post_gc(stats, &gc_list);
238 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
240 kfree(stats->link_map_cache);
241 stats->link_map_cache = NULL;
244 void cw1200_queue_deinit(struct cw1200_queue *queue)
246 cw1200_queue_clear(queue);
247 del_timer_sync(&queue->gc);
248 INIT_LIST_HEAD(&queue->free_pool);
250 kfree(queue->link_map_cache);
252 queue->link_map_cache = NULL;
256 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
261 size_t map_capacity = queue->stats->map_capacity;
266 spin_lock_bh(&queue->lock);
267 if (link_id_map == (u32)-1) {
268 ret = queue->num_queued - queue->num_pending;
271 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
272 if (link_id_map & bit)
273 ret += queue->link_map_cache[i];
276 spin_unlock_bh(&queue->lock);
280 int cw1200_queue_put(struct cw1200_queue *queue,
282 struct cw1200_txpriv *txpriv)
285 struct cw1200_queue_stats *stats = queue->stats;
287 if (txpriv->link_id >= queue->stats->map_capacity)
290 spin_lock_bh(&queue->lock);
291 if (!WARN_ON(list_empty(&queue->free_pool))) {
292 struct cw1200_queue_item *item = list_first_entry(
293 &queue->free_pool, struct cw1200_queue_item, head);
296 list_move_tail(&item->head, &queue->queue);
298 item->txpriv = *txpriv;
299 item->generation = 0;
300 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
304 item->queue_timestamp = jiffies;
307 ++queue->link_map_cache[txpriv->link_id];
309 spin_lock_bh(&stats->lock);
311 ++stats->link_map_cache[txpriv->link_id];
312 spin_unlock_bh(&stats->lock);
314 /* TX may happen in parallel sometimes.
315 * Leave extra queue slots so we don't overflow.
317 if (queue->overfull == false &&
319 (queue->capacity - (num_present_cpus() - 1))) {
320 queue->overfull = true;
321 __cw1200_queue_lock(queue);
322 mod_timer(&queue->gc, jiffies);
327 spin_unlock_bh(&queue->lock);
331 int cw1200_queue_get(struct cw1200_queue *queue,
334 struct ieee80211_tx_info **tx_info,
335 const struct cw1200_txpriv **txpriv)
338 struct cw1200_queue_item *item;
339 struct cw1200_queue_stats *stats = queue->stats;
340 bool wakeup_stats = false;
342 spin_lock_bh(&queue->lock);
343 list_for_each_entry(item, &queue->queue, head) {
344 if (link_id_map & BIT(item->txpriv.link_id)) {
351 *tx = (struct wsm_tx *)item->skb->data;
352 *tx_info = IEEE80211_SKB_CB(item->skb);
353 *txpriv = &item->txpriv;
354 (*tx)->packet_id = item->packet_id;
355 list_move_tail(&item->head, &queue->pending);
356 ++queue->num_pending;
357 --queue->link_map_cache[item->txpriv.link_id];
358 item->xmit_timestamp = jiffies;
360 spin_lock_bh(&stats->lock);
362 if (!--stats->link_map_cache[item->txpriv.link_id])
364 spin_unlock_bh(&stats->lock);
366 spin_unlock_bh(&queue->lock);
368 wake_up(&stats->wait_link_id_empty);
372 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
375 u8 queue_generation, queue_id, item_generation, item_id;
376 struct cw1200_queue_item *item;
377 struct cw1200_queue_stats *stats = queue->stats;
379 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
380 &item_generation, &item_id);
382 item = &queue->pool[item_id];
384 spin_lock_bh(&queue->lock);
385 BUG_ON(queue_id != queue->queue_id);
386 if (queue_generation != queue->generation) {
388 } else if (item_id >= (unsigned) queue->capacity) {
391 } else if (item->generation != item_generation) {
395 --queue->num_pending;
396 ++queue->link_map_cache[item->txpriv.link_id];
398 spin_lock_bh(&stats->lock);
400 ++stats->link_map_cache[item->txpriv.link_id];
401 spin_unlock_bh(&stats->lock);
403 item->generation = ++item_generation;
404 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
408 list_move(&item->head, &queue->queue);
410 spin_unlock_bh(&queue->lock);
414 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
416 struct cw1200_queue_item *item, *tmp;
417 struct cw1200_queue_stats *stats = queue->stats;
418 spin_lock_bh(&queue->lock);
420 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
421 --queue->num_pending;
422 ++queue->link_map_cache[item->txpriv.link_id];
424 spin_lock_bh(&stats->lock);
426 ++stats->link_map_cache[item->txpriv.link_id];
427 spin_unlock_bh(&stats->lock);
430 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
434 list_move(&item->head, &queue->queue);
436 spin_unlock_bh(&queue->lock);
441 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
444 u8 queue_generation, queue_id, item_generation, item_id;
445 struct cw1200_queue_item *item;
446 struct cw1200_queue_stats *stats = queue->stats;
447 struct sk_buff *gc_skb = NULL;
448 struct cw1200_txpriv gc_txpriv;
450 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
451 &item_generation, &item_id);
453 item = &queue->pool[item_id];
455 spin_lock_bh(&queue->lock);
456 BUG_ON(queue_id != queue->queue_id);
457 if (queue_generation != queue->generation) {
459 } else if (item_id >= (unsigned) queue->capacity) {
462 } else if (item->generation != item_generation) {
466 gc_txpriv = item->txpriv;
469 --queue->num_pending;
473 /* Do not use list_move_tail here, but list_move:
474 * try to utilize cache row.
476 list_move(&item->head, &queue->free_pool);
478 if (queue->overfull &&
479 (queue->num_queued <= (queue->capacity >> 1))) {
480 queue->overfull = false;
481 __cw1200_queue_unlock(queue);
484 spin_unlock_bh(&queue->lock);
487 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
492 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
493 struct sk_buff **skb,
494 const struct cw1200_txpriv **txpriv)
497 u8 queue_generation, queue_id, item_generation, item_id;
498 struct cw1200_queue_item *item;
499 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
500 &item_generation, &item_id);
502 item = &queue->pool[item_id];
504 spin_lock_bh(&queue->lock);
505 BUG_ON(queue_id != queue->queue_id);
506 if (queue_generation != queue->generation) {
508 } else if (item_id >= (unsigned) queue->capacity) {
511 } else if (item->generation != item_generation) {
516 *txpriv = &item->txpriv;
518 spin_unlock_bh(&queue->lock);
522 void cw1200_queue_lock(struct cw1200_queue *queue)
524 spin_lock_bh(&queue->lock);
525 __cw1200_queue_lock(queue);
526 spin_unlock_bh(&queue->lock);
529 void cw1200_queue_unlock(struct cw1200_queue *queue)
531 spin_lock_bh(&queue->lock);
532 __cw1200_queue_unlock(queue);
533 spin_unlock_bh(&queue->lock);
536 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
537 unsigned long *timestamp,
538 u32 pending_frame_id)
540 struct cw1200_queue_item *item;
543 spin_lock_bh(&queue->lock);
544 ret = !list_empty(&queue->pending);
546 list_for_each_entry(item, &queue->pending, head) {
547 if (item->packet_id != pending_frame_id)
548 if (time_before(item->xmit_timestamp,
550 *timestamp = item->xmit_timestamp;
553 spin_unlock_bh(&queue->lock);
557 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
562 spin_lock_bh(&stats->lock);
563 if (link_id_map == (u32)-1) {
564 empty = stats->num_queued == 0;
567 for (i = 0; i < stats->map_capacity; ++i) {
568 if (link_id_map & BIT(i)) {
569 if (stats->link_map_cache[i]) {
576 spin_unlock_bh(&stats->lock);