2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
46 #define WL1271_SUSPEND_SLEEP 100
47 #define WL1271_WAKEUP_TIMEOUT 500
49 static char *fwlog_param;
50 static int fwlog_mem_blocks = -1;
51 static int bug_on_recovery = -1;
52 static int no_recovery = -1;
54 static void __wl1271_op_remove_interface(struct wl1271 *wl,
55 struct ieee80211_vif *vif,
56 bool reset_tx_queues);
57 static void wlcore_op_stop_locked(struct wl1271 *wl);
58 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
64 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
67 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
70 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
73 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
77 wl1271_info("Association completed.");
81 static void wl1271_reg_notify(struct wiphy *wiphy,
82 struct regulatory_request *request)
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 /* copy the current dfs region */
89 wl->dfs_region = request->dfs_region;
91 wlcore_regdomain_config(wl);
94 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
99 /* we should hold wl->mutex */
100 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
105 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
107 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 * this function is being called when the rx_streaming interval
114 * has beed changed or rx_streaming should be disabled
116 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
119 int period = wl->conf.rx_streaming.interval;
121 /* don't reconfigure if rx_streaming is disabled */
122 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
125 /* reconfigure/disable according to new streaming_period */
127 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
128 (wl->conf.rx_streaming.always ||
129 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
130 ret = wl1271_set_rx_streaming(wl, wlvif, true);
132 ret = wl1271_set_rx_streaming(wl, wlvif, false);
133 /* don't cancel_work_sync since we might deadlock */
134 del_timer_sync(&wlvif->rx_streaming_timer);
140 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
143 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
144 rx_streaming_enable_work);
145 struct wl1271 *wl = wlvif->wl;
147 mutex_lock(&wl->mutex);
149 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
150 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
151 (!wl->conf.rx_streaming.always &&
152 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
155 if (!wl->conf.rx_streaming.interval)
158 ret = pm_runtime_get_sync(wl->dev);
160 pm_runtime_put_noidle(wl->dev);
164 ret = wl1271_set_rx_streaming(wl, wlvif, true);
168 /* stop it after some time of inactivity */
169 mod_timer(&wlvif->rx_streaming_timer,
170 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
173 pm_runtime_mark_last_busy(wl->dev);
174 pm_runtime_put_autosuspend(wl->dev);
176 mutex_unlock(&wl->mutex);
179 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
182 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
183 rx_streaming_disable_work);
184 struct wl1271 *wl = wlvif->wl;
186 mutex_lock(&wl->mutex);
188 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
191 ret = pm_runtime_get_sync(wl->dev);
193 pm_runtime_put_noidle(wl->dev);
197 ret = wl1271_set_rx_streaming(wl, wlvif, false);
202 pm_runtime_mark_last_busy(wl->dev);
203 pm_runtime_put_autosuspend(wl->dev);
205 mutex_unlock(&wl->mutex);
208 static void wl1271_rx_streaming_timer(struct timer_list *t)
210 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
211 struct wl1271 *wl = wlvif->wl;
212 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 /* wl->mutex must be taken */
216 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 /* if the watchdog is not armed, don't do anything */
219 if (wl->tx_allocated_blocks == 0)
222 cancel_delayed_work(&wl->tx_watchdog_work);
223 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
224 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 static void wlcore_rc_update_work(struct work_struct *work)
230 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
232 struct wl1271 *wl = wlvif->wl;
233 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
235 mutex_lock(&wl->mutex);
237 if (unlikely(wl->state != WLCORE_STATE_ON))
240 ret = pm_runtime_get_sync(wl->dev);
242 pm_runtime_put_noidle(wl->dev);
246 if (ieee80211_vif_is_mesh(vif)) {
247 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
248 true, wlvif->sta.hlid);
252 wlcore_hw_sta_rc_update(wl, wlvif);
256 pm_runtime_mark_last_busy(wl->dev);
257 pm_runtime_put_autosuspend(wl->dev);
259 mutex_unlock(&wl->mutex);
262 static void wl12xx_tx_watchdog_work(struct work_struct *work)
264 struct delayed_work *dwork;
267 dwork = to_delayed_work(work);
268 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
270 mutex_lock(&wl->mutex);
272 if (unlikely(wl->state != WLCORE_STATE_ON))
275 /* Tx went out in the meantime - everything is ok */
276 if (unlikely(wl->tx_allocated_blocks == 0))
280 * if a ROC is in progress, we might not have any Tx for a long
281 * time (e.g. pending Tx on the non-ROC channels)
283 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
284 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
285 wl->conf.tx.tx_watchdog_timeout);
286 wl12xx_rearm_tx_watchdog_locked(wl);
291 * if a scan is in progress, we might not have any Tx for a long
294 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
295 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_rearm_tx_watchdog_locked(wl);
302 * AP might cache a frame for a long time for a sleeping station,
303 * so rearm the timer if there's an AP interface with stations. If
304 * Tx is genuinely stuck we will most hopefully discover it when all
305 * stations are removed due to inactivity.
307 if (wl->active_sta_count) {
308 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
310 wl->conf.tx.tx_watchdog_timeout,
311 wl->active_sta_count);
312 wl12xx_rearm_tx_watchdog_locked(wl);
316 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
317 wl->conf.tx.tx_watchdog_timeout);
318 wl12xx_queue_recovery_work(wl);
321 mutex_unlock(&wl->mutex);
324 static void wlcore_adjust_conf(struct wl1271 *wl)
328 if (!strcmp(fwlog_param, "continuous")) {
329 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
331 } else if (!strcmp(fwlog_param, "dbgpins")) {
332 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
333 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
334 } else if (!strcmp(fwlog_param, "disable")) {
335 wl->conf.fwlog.mem_blocks = 0;
336 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
338 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
342 if (bug_on_recovery != -1)
343 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
345 if (no_recovery != -1)
346 wl->conf.recovery.no_recovery = (u8) no_recovery;
349 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
350 struct wl12xx_vif *wlvif,
355 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
358 * Wake up from high level PS if the STA is asleep with too little
359 * packets in FW or if the STA is awake.
361 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
362 wl12xx_ps_link_end(wl, wlvif, hlid);
365 * Start high-level PS if the STA is asleep with enough blocks in FW.
366 * Make an exception if this is the only connected link. In this
367 * case FW-memory congestion is less of a problem.
368 * Note that a single connected STA means 2*ap_count + 1 active links,
369 * since we must account for the global and broadcast AP links
370 * for each AP. The "fw_ps" check assures us the other link is a STA
371 * connected to the AP. Otherwise the FW would not set the PSM bit.
373 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
374 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
375 wl12xx_ps_link_start(wl, wlvif, hlid, true);
378 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
379 struct wl12xx_vif *wlvif,
380 struct wl_fw_status *status)
382 unsigned long cur_fw_ps_map;
385 cur_fw_ps_map = status->link_ps_bitmap;
386 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
387 wl1271_debug(DEBUG_PSM,
388 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
389 wl->ap_fw_ps_map, cur_fw_ps_map,
390 wl->ap_fw_ps_map ^ cur_fw_ps_map);
392 wl->ap_fw_ps_map = cur_fw_ps_map;
395 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
396 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
397 wl->links[hlid].allocated_pkts);
400 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
402 struct wl12xx_vif *wlvif;
403 u32 old_tx_blk_count = wl->tx_blocks_available;
404 int avail, freed_blocks;
407 struct wl1271_link *lnk;
409 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
411 wl->fw_status_len, false);
415 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
417 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
418 "drv_rx_counter = %d, tx_results_counter = %d)",
420 status->fw_rx_counter,
421 status->drv_rx_counter,
422 status->tx_results_counter);
424 for (i = 0; i < NUM_TX_QUEUES; i++) {
425 /* prevent wrap-around in freed-packets counter */
426 wl->tx_allocated_pkts[i] -=
427 (status->counters.tx_released_pkts[i] -
428 wl->tx_pkts_freed[i]) & 0xff;
430 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
434 for_each_set_bit(i, wl->links_map, wl->num_links) {
438 /* prevent wrap-around in freed-packets counter */
439 diff = (status->counters.tx_lnk_free_pkts[i] -
440 lnk->prev_freed_pkts) & 0xff;
445 lnk->allocated_pkts -= diff;
446 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
448 /* accumulate the prev_freed_pkts counter */
449 lnk->total_freed_pkts += diff;
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
454 freed_blocks = status->total_released_blks -
457 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
458 status->total_released_blks;
460 wl->tx_blocks_freed = status->total_released_blks;
462 wl->tx_allocated_blocks -= freed_blocks;
465 * If the FW freed some blocks:
466 * If we still have allocated blocks - re-arm the timer, Tx is
467 * not stuck. Otherwise, cancel the timer (no Tx currently).
470 if (wl->tx_allocated_blocks)
471 wl12xx_rearm_tx_watchdog_locked(wl);
473 cancel_delayed_work(&wl->tx_watchdog_work);
476 avail = status->tx_total - wl->tx_allocated_blocks;
479 * The FW might change the total number of TX memblocks before
480 * we get a notification about blocks being released. Thus, the
481 * available blocks calculation might yield a temporary result
482 * which is lower than the actual available blocks. Keeping in
483 * mind that only blocks that were allocated can be moved from
484 * TX to RX, tx_blocks_available should never decrease here.
486 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
489 /* if more blocks are available now, tx work can be scheduled */
490 if (wl->tx_blocks_available > old_tx_blk_count)
491 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493 /* for AP update num of allocated TX blocks per link and ps status */
494 wl12xx_for_each_wlvif_ap(wl, wlvif) {
495 wl12xx_irq_update_links_status(wl, wlvif, status);
498 /* update the host-chipset time offset */
499 wl->time_offset = (ktime_get_boot_ns() >> 10) -
500 (s64)(status->fw_localtime);
502 wl->fw_fast_lnk_map = status->link_fast_bitmap;
507 static void wl1271_flush_deferred_work(struct wl1271 *wl)
511 /* Pass all received frames to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
513 ieee80211_rx_ni(wl->hw, skb);
515 /* Return sent skbs to the network stack */
516 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
517 ieee80211_tx_status_ni(wl->hw, skb);
520 static void wl1271_netstack_work(struct work_struct *work)
523 container_of(work, struct wl1271, netstack_work);
526 wl1271_flush_deferred_work(wl);
527 } while (skb_queue_len(&wl->deferred_rx_queue));
530 #define WL1271_IRQ_MAX_LOOPS 256
532 static int wlcore_irq_locked(struct wl1271 *wl)
536 int loopcount = WL1271_IRQ_MAX_LOOPS;
538 unsigned int defer_count;
542 * In case edge triggered interrupt must be used, we cannot iterate
543 * more than once without introducing race conditions with the hardirq.
545 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
548 wl1271_debug(DEBUG_IRQ, "IRQ work");
550 if (unlikely(wl->state != WLCORE_STATE_ON))
553 ret = pm_runtime_get_sync(wl->dev);
555 pm_runtime_put_noidle(wl->dev);
559 while (!done && loopcount--) {
561 * In order to avoid a race with the hardirq, clear the flag
562 * before acknowledging the chip.
564 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
565 smp_mb__after_atomic();
567 ret = wlcore_fw_status(wl, wl->fw_status);
571 wlcore_hw_tx_immediate_compl(wl);
573 intr = wl->fw_status->intr;
574 intr &= WLCORE_ALL_INTR_MASK;
580 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
581 wl1271_error("HW watchdog interrupt received! starting recovery.");
582 wl->watchdog_recovery = true;
585 /* restarting the chip. ignore any other interrupt. */
589 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
590 wl1271_error("SW watchdog interrupt received! "
591 "starting recovery.");
592 wl->watchdog_recovery = true;
595 /* restarting the chip. ignore any other interrupt. */
599 if (likely(intr & WL1271_ACX_INTR_DATA)) {
600 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
602 ret = wlcore_rx(wl, wl->fw_status);
606 /* Check if any tx blocks were freed */
607 spin_lock_irqsave(&wl->wl_lock, flags);
608 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
609 wl1271_tx_total_queue_count(wl) > 0) {
610 spin_unlock_irqrestore(&wl->wl_lock, flags);
612 * In order to avoid starvation of the TX path,
613 * call the work function directly.
615 ret = wlcore_tx_work_locked(wl);
619 spin_unlock_irqrestore(&wl->wl_lock, flags);
622 /* check for tx results */
623 ret = wlcore_hw_tx_delayed_compl(wl);
627 /* Make sure the deferred queues don't get too long */
628 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
629 skb_queue_len(&wl->deferred_rx_queue);
630 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
631 wl1271_flush_deferred_work(wl);
634 if (intr & WL1271_ACX_INTR_EVENT_A) {
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
636 ret = wl1271_event_handle(wl, 0);
641 if (intr & WL1271_ACX_INTR_EVENT_B) {
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
643 ret = wl1271_event_handle(wl, 1);
648 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
649 wl1271_debug(DEBUG_IRQ,
650 "WL1271_ACX_INTR_INIT_COMPLETE");
652 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
653 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
656 pm_runtime_mark_last_busy(wl->dev);
657 pm_runtime_put_autosuspend(wl->dev);
663 static irqreturn_t wlcore_irq(int irq, void *cookie)
667 struct wl1271 *wl = cookie;
669 /* complete the ELP completion */
670 spin_lock_irqsave(&wl->wl_lock, flags);
671 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
673 complete(wl->elp_compl);
674 wl->elp_compl = NULL;
677 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
678 /* don't enqueue a work right now. mark it as pending */
679 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
680 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
681 disable_irq_nosync(wl->irq);
682 pm_wakeup_event(wl->dev, 0);
683 spin_unlock_irqrestore(&wl->wl_lock, flags);
686 spin_unlock_irqrestore(&wl->wl_lock, flags);
688 /* TX might be handled here, avoid redundant work */
689 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
690 cancel_work_sync(&wl->tx_work);
692 mutex_lock(&wl->mutex);
694 ret = wlcore_irq_locked(wl);
696 wl12xx_queue_recovery_work(wl);
698 spin_lock_irqsave(&wl->wl_lock, flags);
699 /* In case TX was not handled here, queue TX work */
700 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
701 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
702 wl1271_tx_total_queue_count(wl) > 0)
703 ieee80211_queue_work(wl->hw, &wl->tx_work);
704 spin_unlock_irqrestore(&wl->wl_lock, flags);
706 mutex_unlock(&wl->mutex);
711 struct vif_counter_data {
714 struct ieee80211_vif *cur_vif;
715 bool cur_vif_running;
718 static void wl12xx_vif_count_iter(void *data, u8 *mac,
719 struct ieee80211_vif *vif)
721 struct vif_counter_data *counter = data;
724 if (counter->cur_vif == vif)
725 counter->cur_vif_running = true;
728 /* caller must not hold wl->mutex, as it might deadlock */
729 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
730 struct ieee80211_vif *cur_vif,
731 struct vif_counter_data *data)
733 memset(data, 0, sizeof(*data));
734 data->cur_vif = cur_vif;
736 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
737 wl12xx_vif_count_iter, data);
740 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
742 const struct firmware *fw;
744 enum wl12xx_fw_type fw_type;
748 fw_type = WL12XX_FW_TYPE_PLT;
749 fw_name = wl->plt_fw_name;
752 * we can't call wl12xx_get_vif_count() here because
753 * wl->mutex is taken, so use the cached last_vif_count value
755 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
756 fw_type = WL12XX_FW_TYPE_MULTI;
757 fw_name = wl->mr_fw_name;
759 fw_type = WL12XX_FW_TYPE_NORMAL;
760 fw_name = wl->sr_fw_name;
764 if (wl->fw_type == fw_type)
767 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
769 ret = reject_firmware(&fw, fw_name, wl->dev);
772 wl1271_error("could not get firmware %s: %d", fw_name, ret);
777 wl1271_error("firmware size is not multiple of 32 bits: %zu",
784 wl->fw_type = WL12XX_FW_TYPE_NONE;
785 wl->fw_len = fw->size;
786 wl->fw = vmalloc(wl->fw_len);
789 wl1271_error("could not allocate memory for the firmware");
794 memcpy(wl->fw, fw->data, wl->fw_len);
796 wl->fw_type = fw_type;
798 release_firmware(fw);
803 void wl12xx_queue_recovery_work(struct wl1271 *wl)
805 /* Avoid a recursive recovery */
806 if (wl->state == WLCORE_STATE_ON) {
807 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
810 wl->state = WLCORE_STATE_RESTARTING;
811 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
812 ieee80211_queue_work(wl->hw, &wl->recovery_work);
816 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
820 /* Make sure we have enough room */
821 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
823 /* Fill the FW log file, consumed by the sysfs fwlog entry */
824 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
825 wl->fwlog_size += len;
830 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
835 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
838 wl1271_info("Reading FW panic log");
841 * Make sure the chip is awake and the logger isn't active.
842 * Do not send a stop fwlog command if the fw is hanged or if
843 * dbgpins are used (due to some fw bug).
845 error = pm_runtime_get_sync(wl->dev);
847 pm_runtime_put_noidle(wl->dev);
850 if (!wl->watchdog_recovery &&
851 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
852 wl12xx_cmd_stop_fwlog(wl);
854 /* Traverse the memory blocks linked list */
856 end_of_log = wlcore_event_fw_logger(wl);
857 if (end_of_log == 0) {
859 end_of_log = wlcore_event_fw_logger(wl);
861 } while (end_of_log != 0);
864 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
865 u8 hlid, struct ieee80211_sta *sta)
867 struct wl1271_station *wl_sta;
868 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
870 wl_sta = (void *)sta->drv_priv;
871 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
874 * increment the initial seq number on recovery to account for
875 * transmitted packets that we haven't yet got in the FW status
877 if (wlvif->encryption_type == KEY_GEM)
878 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
880 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
881 wl_sta->total_freed_pkts += sqn_recovery_padding;
884 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
885 struct wl12xx_vif *wlvif,
886 u8 hlid, const u8 *addr)
888 struct ieee80211_sta *sta;
889 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
891 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
892 is_zero_ether_addr(addr)))
896 sta = ieee80211_find_sta(vif, addr);
898 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
902 static void wlcore_print_recovery(struct wl1271 *wl)
908 wl1271_info("Hardware recovery in progress. FW ver: %s",
909 wl->chip.fw_ver_str);
911 /* change partitions momentarily so we can read the FW pc */
912 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
916 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
920 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
924 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
925 pc, hint_sts, ++wl->recovery_count);
927 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
931 static void wl1271_recovery_work(struct work_struct *work)
934 container_of(work, struct wl1271, recovery_work);
935 struct wl12xx_vif *wlvif;
936 struct ieee80211_vif *vif;
939 mutex_lock(&wl->mutex);
941 if (wl->state == WLCORE_STATE_OFF || wl->plt)
944 error = pm_runtime_get_sync(wl->dev);
946 wl1271_warning("Enable for recovery failed");
947 pm_runtime_put_noidle(wl->dev);
949 wlcore_disable_interrupts_nosync(wl);
951 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
952 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
953 wl12xx_read_fwlog_panic(wl);
954 wlcore_print_recovery(wl);
957 BUG_ON(wl->conf.recovery.bug_on_recovery &&
958 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
960 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
962 if (wl->conf.recovery.no_recovery) {
963 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
967 /* Prevent spurious TX during FW restart */
968 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 /* reboot the chipset */
971 while (!list_empty(&wl->wlvif_list)) {
972 wlvif = list_first_entry(&wl->wlvif_list,
973 struct wl12xx_vif, list);
974 vif = wl12xx_wlvif_to_vif(wlvif);
976 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
977 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
978 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
979 vif->bss_conf.bssid);
982 __wl1271_op_remove_interface(wl, vif, false);
985 wlcore_op_stop_locked(wl);
986 pm_runtime_mark_last_busy(wl->dev);
987 pm_runtime_put_autosuspend(wl->dev);
989 ieee80211_restart_hw(wl->hw);
992 * Its safe to enable TX now - the queues are stopped after a request
995 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
998 wl->watchdog_recovery = false;
999 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1000 mutex_unlock(&wl->mutex);
1003 static int wlcore_fw_wakeup(struct wl1271 *wl)
1005 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1008 static int wl1271_setup(struct wl1271 *wl)
1010 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1011 if (!wl->raw_fw_status)
1014 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1018 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1024 kfree(wl->fw_status);
1025 kfree(wl->raw_fw_status);
1029 static int wl12xx_set_power_on(struct wl1271 *wl)
1033 msleep(WL1271_PRE_POWER_ON_SLEEP);
1034 ret = wl1271_power_on(wl);
1037 msleep(WL1271_POWER_ON_SLEEP);
1038 wl1271_io_reset(wl);
1041 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1045 /* ELP module wake up */
1046 ret = wlcore_fw_wakeup(wl);
1054 wl1271_power_off(wl);
1058 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1062 ret = wl12xx_set_power_on(wl);
1067 * For wl127x based devices we could use the default block
1068 * size (512 bytes), but due to a bug in the sdio driver, we
1069 * need to set it explicitly after the chip is powered on. To
1070 * simplify the code and since the performance impact is
1071 * negligible, we use the same block size for all different
1074 * Check if the bus supports blocksize alignment and, if it
1075 * doesn't, make sure we don't have the quirk.
1077 if (!wl1271_set_block_size(wl))
1078 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1080 /* TODO: make sure the lower driver has set things up correctly */
1082 ret = wl1271_setup(wl);
1086 ret = wl12xx_fetch_firmware(wl, plt);
1088 kfree(wl->fw_status);
1089 kfree(wl->raw_fw_status);
1090 kfree(wl->tx_res_if);
1097 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1099 int retries = WL1271_BOOT_RETRIES;
1100 struct wiphy *wiphy = wl->hw->wiphy;
1102 static const char* const PLT_MODE[] = {
1111 mutex_lock(&wl->mutex);
1113 wl1271_notice("power up");
1115 if (wl->state != WLCORE_STATE_OFF) {
1116 wl1271_error("cannot go into PLT state because not "
1117 "in off state: %d", wl->state);
1122 /* Indicate to lower levels that we are now in PLT mode */
1124 wl->plt_mode = plt_mode;
1128 ret = wl12xx_chip_wakeup(wl, true);
1132 if (plt_mode != PLT_CHIP_AWAKE) {
1133 ret = wl->ops->plt_init(wl);
1138 wl->state = WLCORE_STATE_ON;
1139 wl1271_notice("firmware booted in PLT mode %s (%s)",
1141 wl->chip.fw_ver_str);
1143 /* update hw/fw version info in wiphy struct */
1144 wiphy->hw_version = wl->chip.id;
1145 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1146 sizeof(wiphy->fw_version));
1151 wl1271_power_off(wl);
1155 wl->plt_mode = PLT_OFF;
1157 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1158 WL1271_BOOT_RETRIES);
1160 mutex_unlock(&wl->mutex);
1165 int wl1271_plt_stop(struct wl1271 *wl)
1169 wl1271_notice("power down");
1172 * Interrupts must be disabled before setting the state to OFF.
1173 * Otherwise, the interrupt handler might be called and exit without
1174 * reading the interrupt status.
1176 wlcore_disable_interrupts(wl);
1177 mutex_lock(&wl->mutex);
1179 mutex_unlock(&wl->mutex);
1182 * This will not necessarily enable interrupts as interrupts
1183 * may have been disabled when op_stop was called. It will,
1184 * however, balance the above call to disable_interrupts().
1186 wlcore_enable_interrupts(wl);
1188 wl1271_error("cannot power down because not in PLT "
1189 "state: %d", wl->state);
1194 mutex_unlock(&wl->mutex);
1196 wl1271_flush_deferred_work(wl);
1197 cancel_work_sync(&wl->netstack_work);
1198 cancel_work_sync(&wl->recovery_work);
1199 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1201 mutex_lock(&wl->mutex);
1202 wl1271_power_off(wl);
1204 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1205 wl->state = WLCORE_STATE_OFF;
1207 wl->plt_mode = PLT_OFF;
1209 mutex_unlock(&wl->mutex);
1215 static void wl1271_op_tx(struct ieee80211_hw *hw,
1216 struct ieee80211_tx_control *control,
1217 struct sk_buff *skb)
1219 struct wl1271 *wl = hw->priv;
1220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1221 struct ieee80211_vif *vif = info->control.vif;
1222 struct wl12xx_vif *wlvif = NULL;
1223 unsigned long flags;
1228 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1229 ieee80211_free_txskb(hw, skb);
1233 wlvif = wl12xx_vif_to_data(vif);
1234 mapping = skb_get_queue_mapping(skb);
1235 q = wl1271_tx_get_queue(mapping);
1237 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1239 spin_lock_irqsave(&wl->wl_lock, flags);
1242 * drop the packet if the link is invalid or the queue is stopped
1243 * for any reason but watermark. Watermark is a "soft"-stop so we
1244 * allow these packets through.
1246 if (hlid == WL12XX_INVALID_LINK_ID ||
1247 (!test_bit(hlid, wlvif->links_map)) ||
1248 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1249 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1251 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1252 ieee80211_free_txskb(hw, skb);
1256 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1258 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1260 wl->tx_queue_count[q]++;
1261 wlvif->tx_queue_count[q]++;
1264 * The workqueue is slow to process the tx_queue and we need stop
1265 * the queue here, otherwise the queue will get too long.
1267 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1268 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1269 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1270 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1271 wlcore_stop_queue_locked(wl, wlvif, q,
1272 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1276 * The chip specific setup must run before the first TX packet -
1277 * before that, the tx_work will not be initialized!
1280 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1281 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1282 ieee80211_queue_work(wl->hw, &wl->tx_work);
1285 spin_unlock_irqrestore(&wl->wl_lock, flags);
1288 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1290 unsigned long flags;
1293 /* no need to queue a new dummy packet if one is already pending */
1294 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1297 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1299 spin_lock_irqsave(&wl->wl_lock, flags);
1300 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1301 wl->tx_queue_count[q]++;
1302 spin_unlock_irqrestore(&wl->wl_lock, flags);
1304 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1305 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1306 return wlcore_tx_work_locked(wl);
1309 * If the FW TX is busy, TX work will be scheduled by the threaded
1310 * interrupt handler function
1316 * The size of the dummy packet should be at least 1400 bytes. However, in
1317 * order to minimize the number of bus transactions, aligning it to 512 bytes
1318 * boundaries could be beneficial, performance wise
1320 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1322 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1324 struct sk_buff *skb;
1325 struct ieee80211_hdr_3addr *hdr;
1326 unsigned int dummy_packet_size;
1328 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1329 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1331 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1333 wl1271_warning("Failed to allocate a dummy packet skb");
1337 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1339 hdr = skb_put_zero(skb, sizeof(*hdr));
1340 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1341 IEEE80211_STYPE_NULLFUNC |
1342 IEEE80211_FCTL_TODS);
1344 skb_put_zero(skb, dummy_packet_size);
1346 /* Dummy packets require the TID to be management */
1347 skb->priority = WL1271_TID_MGMT;
1349 /* Initialize all fields that might be used */
1350 skb_set_queue_mapping(skb, 0);
1351 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1358 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1360 int num_fields = 0, in_field = 0, fields_size = 0;
1361 int i, pattern_len = 0;
1364 wl1271_warning("No mask in WoWLAN pattern");
1369 * The pattern is broken up into segments of bytes at different offsets
1370 * that need to be checked by the FW filter. Each segment is called
1371 * a field in the FW API. We verify that the total number of fields
1372 * required for this pattern won't exceed FW limits (8)
1373 * as well as the total fields buffer won't exceed the FW limit.
1374 * Note that if there's a pattern which crosses Ethernet/IP header
1375 * boundary a new field is required.
1377 for (i = 0; i < p->pattern_len; i++) {
1378 if (test_bit(i, (unsigned long *)p->mask)) {
1383 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1385 fields_size += pattern_len +
1386 RX_FILTER_FIELD_OVERHEAD;
1394 fields_size += pattern_len +
1395 RX_FILTER_FIELD_OVERHEAD;
1402 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1406 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1407 wl1271_warning("RX Filter too complex. Too many segments");
1411 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1412 wl1271_warning("RX filter pattern is too big");
1419 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1421 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1424 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1431 for (i = 0; i < filter->num_fields; i++)
1432 kfree(filter->fields[i].pattern);
1437 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1438 u16 offset, u8 flags,
1439 const u8 *pattern, u8 len)
1441 struct wl12xx_rx_filter_field *field;
1443 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1444 wl1271_warning("Max fields per RX filter. can't alloc another");
1448 field = &filter->fields[filter->num_fields];
1450 field->pattern = kzalloc(len, GFP_KERNEL);
1451 if (!field->pattern) {
1452 wl1271_warning("Failed to allocate RX filter pattern");
1456 filter->num_fields++;
1458 field->offset = cpu_to_le16(offset);
1459 field->flags = flags;
1461 memcpy(field->pattern, pattern, len);
1466 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1468 int i, fields_size = 0;
1470 for (i = 0; i < filter->num_fields; i++)
1471 fields_size += filter->fields[i].len +
1472 sizeof(struct wl12xx_rx_filter_field) -
1478 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1482 struct wl12xx_rx_filter_field *field;
1484 for (i = 0; i < filter->num_fields; i++) {
1485 field = (struct wl12xx_rx_filter_field *)buf;
1487 field->offset = filter->fields[i].offset;
1488 field->flags = filter->fields[i].flags;
1489 field->len = filter->fields[i].len;
1491 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1492 buf += sizeof(struct wl12xx_rx_filter_field) -
1493 sizeof(u8 *) + field->len;
1498 * Allocates an RX filter returned through f
1499 * which needs to be freed using rx_filter_free()
1502 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1503 struct wl12xx_rx_filter **f)
1506 struct wl12xx_rx_filter *filter;
1510 filter = wl1271_rx_filter_alloc();
1512 wl1271_warning("Failed to alloc rx filter");
1518 while (i < p->pattern_len) {
1519 if (!test_bit(i, (unsigned long *)p->mask)) {
1524 for (j = i; j < p->pattern_len; j++) {
1525 if (!test_bit(j, (unsigned long *)p->mask))
1528 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1529 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1533 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1535 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1537 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1538 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1543 ret = wl1271_rx_filter_alloc_field(filter,
1546 &p->pattern[i], len);
1553 filter->action = FILTER_SIGNAL;
1559 wl1271_rx_filter_free(filter);
1565 static int wl1271_configure_wowlan(struct wl1271 *wl,
1566 struct cfg80211_wowlan *wow)
1570 if (!wow || wow->any || !wow->n_patterns) {
1571 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1576 ret = wl1271_rx_filter_clear_all(wl);
1583 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1586 /* Validate all incoming patterns before clearing current FW state */
1587 for (i = 0; i < wow->n_patterns; i++) {
1588 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1590 wl1271_warning("Bad wowlan pattern %d", i);
1595 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1599 ret = wl1271_rx_filter_clear_all(wl);
1603 /* Translate WoWLAN patterns into filters */
1604 for (i = 0; i < wow->n_patterns; i++) {
1605 struct cfg80211_pkt_pattern *p;
1606 struct wl12xx_rx_filter *filter = NULL;
1608 p = &wow->patterns[i];
1610 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1612 wl1271_warning("Failed to create an RX filter from "
1613 "wowlan pattern %d", i);
1617 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1619 wl1271_rx_filter_free(filter);
1624 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1630 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1631 struct wl12xx_vif *wlvif,
1632 struct cfg80211_wowlan *wow)
1636 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1639 ret = wl1271_configure_wowlan(wl, wow);
1643 if ((wl->conf.conn.suspend_wake_up_event ==
1644 wl->conf.conn.wake_up_event) &&
1645 (wl->conf.conn.suspend_listen_interval ==
1646 wl->conf.conn.listen_interval))
1649 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1650 wl->conf.conn.suspend_wake_up_event,
1651 wl->conf.conn.suspend_listen_interval);
1654 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1660 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1661 struct wl12xx_vif *wlvif,
1662 struct cfg80211_wowlan *wow)
1666 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1669 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1673 ret = wl1271_configure_wowlan(wl, wow);
1682 static int wl1271_configure_suspend(struct wl1271 *wl,
1683 struct wl12xx_vif *wlvif,
1684 struct cfg80211_wowlan *wow)
1686 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1687 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1688 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1689 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1693 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1696 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1697 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1699 if ((!is_ap) && (!is_sta))
1702 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1703 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1706 wl1271_configure_wowlan(wl, NULL);
1709 if ((wl->conf.conn.suspend_wake_up_event ==
1710 wl->conf.conn.wake_up_event) &&
1711 (wl->conf.conn.suspend_listen_interval ==
1712 wl->conf.conn.listen_interval))
1715 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1716 wl->conf.conn.wake_up_event,
1717 wl->conf.conn.listen_interval);
1720 wl1271_error("resume: wake up conditions failed: %d",
1724 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1728 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1729 struct cfg80211_wowlan *wow)
1731 struct wl1271 *wl = hw->priv;
1732 struct wl12xx_vif *wlvif;
1733 unsigned long flags;
1736 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1739 /* we want to perform the recovery before suspending */
1740 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1741 wl1271_warning("postponing suspend to perform recovery");
1745 wl1271_tx_flush(wl);
1747 mutex_lock(&wl->mutex);
1749 ret = pm_runtime_get_sync(wl->dev);
1751 pm_runtime_put_noidle(wl->dev);
1752 mutex_unlock(&wl->mutex);
1756 wl->wow_enabled = true;
1757 wl12xx_for_each_wlvif(wl, wlvif) {
1758 if (wlcore_is_p2p_mgmt(wlvif))
1761 ret = wl1271_configure_suspend(wl, wlvif, wow);
1763 mutex_unlock(&wl->mutex);
1764 wl1271_warning("couldn't prepare device to suspend");
1769 /* disable fast link flow control notifications from FW */
1770 ret = wlcore_hw_interrupt_notify(wl, false);
1774 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1775 ret = wlcore_hw_rx_ba_filter(wl,
1776 !!wl->conf.conn.suspend_rx_ba_activity);
1781 pm_runtime_put_noidle(wl->dev);
1782 mutex_unlock(&wl->mutex);
1785 wl1271_warning("couldn't prepare device to suspend");
1789 /* flush any remaining work */
1790 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1792 flush_work(&wl->tx_work);
1795 * Cancel the watchdog even if above tx_flush failed. We will detect
1796 * it on resume anyway.
1798 cancel_delayed_work(&wl->tx_watchdog_work);
1801 * set suspended flag to avoid triggering a new threaded_irq
1804 spin_lock_irqsave(&wl->wl_lock, flags);
1805 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1806 spin_unlock_irqrestore(&wl->wl_lock, flags);
1808 return pm_runtime_force_suspend(wl->dev);
1811 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1813 struct wl1271 *wl = hw->priv;
1814 struct wl12xx_vif *wlvif;
1815 unsigned long flags;
1816 bool run_irq_work = false, pending_recovery;
1819 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1821 WARN_ON(!wl->wow_enabled);
1823 ret = pm_runtime_force_resume(wl->dev);
1825 wl1271_error("ELP wakeup failure!");
1830 * re-enable irq_work enqueuing, and call irq_work directly if
1831 * there is a pending work.
1833 spin_lock_irqsave(&wl->wl_lock, flags);
1834 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1835 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1836 run_irq_work = true;
1837 spin_unlock_irqrestore(&wl->wl_lock, flags);
1839 mutex_lock(&wl->mutex);
1841 /* test the recovery flag before calling any SDIO functions */
1842 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1846 wl1271_debug(DEBUG_MAC80211,
1847 "run postponed irq_work directly");
1849 /* don't talk to the HW if recovery is pending */
1850 if (!pending_recovery) {
1851 ret = wlcore_irq_locked(wl);
1853 wl12xx_queue_recovery_work(wl);
1856 wlcore_enable_interrupts(wl);
1859 if (pending_recovery) {
1860 wl1271_warning("queuing forgotten recovery on resume");
1861 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1865 ret = pm_runtime_get_sync(wl->dev);
1867 pm_runtime_put_noidle(wl->dev);
1871 wl12xx_for_each_wlvif(wl, wlvif) {
1872 if (wlcore_is_p2p_mgmt(wlvif))
1875 wl1271_configure_resume(wl, wlvif);
1878 ret = wlcore_hw_interrupt_notify(wl, true);
1882 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1883 ret = wlcore_hw_rx_ba_filter(wl, false);
1888 pm_runtime_mark_last_busy(wl->dev);
1889 pm_runtime_put_autosuspend(wl->dev);
1892 wl->wow_enabled = false;
1895 * Set a flag to re-init the watchdog on the first Tx after resume.
1896 * That way we avoid possible conditions where Tx-complete interrupts
1897 * fail to arrive and we perform a spurious recovery.
1899 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1900 mutex_unlock(&wl->mutex);
1905 static int wl1271_op_start(struct ieee80211_hw *hw)
1907 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1910 * We have to delay the booting of the hardware because
1911 * we need to know the local MAC address before downloading and
1912 * initializing the firmware. The MAC address cannot be changed
1913 * after boot, and without the proper MAC address, the firmware
1914 * will not function properly.
1916 * The MAC address is first known when the corresponding interface
1917 * is added. That is where we will initialize the hardware.
1923 static void wlcore_op_stop_locked(struct wl1271 *wl)
1927 if (wl->state == WLCORE_STATE_OFF) {
1928 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1930 wlcore_enable_interrupts(wl);
1936 * this must be before the cancel_work calls below, so that the work
1937 * functions don't perform further work.
1939 wl->state = WLCORE_STATE_OFF;
1942 * Use the nosync variant to disable interrupts, so the mutex could be
1943 * held while doing so without deadlocking.
1945 wlcore_disable_interrupts_nosync(wl);
1947 mutex_unlock(&wl->mutex);
1949 wlcore_synchronize_interrupts(wl);
1950 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1951 cancel_work_sync(&wl->recovery_work);
1952 wl1271_flush_deferred_work(wl);
1953 cancel_delayed_work_sync(&wl->scan_complete_work);
1954 cancel_work_sync(&wl->netstack_work);
1955 cancel_work_sync(&wl->tx_work);
1956 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1958 /* let's notify MAC80211 about the remaining pending TX frames */
1959 mutex_lock(&wl->mutex);
1960 wl12xx_tx_reset(wl);
1962 wl1271_power_off(wl);
1964 * In case a recovery was scheduled, interrupts were disabled to avoid
1965 * an interrupt storm. Now that the power is down, it is safe to
1966 * re-enable interrupts to balance the disable depth
1968 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1969 wlcore_enable_interrupts(wl);
1971 wl->band = NL80211_BAND_2GHZ;
1974 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1975 wl->channel_type = NL80211_CHAN_NO_HT;
1976 wl->tx_blocks_available = 0;
1977 wl->tx_allocated_blocks = 0;
1978 wl->tx_results_count = 0;
1979 wl->tx_packets_count = 0;
1980 wl->time_offset = 0;
1981 wl->ap_fw_ps_map = 0;
1983 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1984 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1985 memset(wl->links_map, 0, sizeof(wl->links_map));
1986 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1987 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1988 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1989 wl->active_sta_count = 0;
1990 wl->active_link_count = 0;
1992 /* The system link is always allocated */
1993 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1994 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1995 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1998 * this is performed after the cancel_work calls and the associated
1999 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2000 * get executed before all these vars have been reset.
2004 wl->tx_blocks_freed = 0;
2006 for (i = 0; i < NUM_TX_QUEUES; i++) {
2007 wl->tx_pkts_freed[i] = 0;
2008 wl->tx_allocated_pkts[i] = 0;
2011 wl1271_debugfs_reset(wl);
2013 kfree(wl->raw_fw_status);
2014 wl->raw_fw_status = NULL;
2015 kfree(wl->fw_status);
2016 wl->fw_status = NULL;
2017 kfree(wl->tx_res_if);
2018 wl->tx_res_if = NULL;
2019 kfree(wl->target_mem_map);
2020 wl->target_mem_map = NULL;
2023 * FW channels must be re-calibrated after recovery,
2024 * save current Reg-Domain channel configuration and clear it.
2026 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2027 sizeof(wl->reg_ch_conf_pending));
2028 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2031 static void wlcore_op_stop(struct ieee80211_hw *hw)
2033 struct wl1271 *wl = hw->priv;
2035 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2037 mutex_lock(&wl->mutex);
2039 wlcore_op_stop_locked(wl);
2041 mutex_unlock(&wl->mutex);
2044 static void wlcore_channel_switch_work(struct work_struct *work)
2046 struct delayed_work *dwork;
2048 struct ieee80211_vif *vif;
2049 struct wl12xx_vif *wlvif;
2052 dwork = to_delayed_work(work);
2053 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2056 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2058 mutex_lock(&wl->mutex);
2060 if (unlikely(wl->state != WLCORE_STATE_ON))
2063 /* check the channel switch is still ongoing */
2064 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2067 vif = wl12xx_wlvif_to_vif(wlvif);
2068 ieee80211_chswitch_done(vif, false);
2070 ret = pm_runtime_get_sync(wl->dev);
2072 pm_runtime_put_noidle(wl->dev);
2076 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2078 pm_runtime_mark_last_busy(wl->dev);
2079 pm_runtime_put_autosuspend(wl->dev);
2081 mutex_unlock(&wl->mutex);
2084 static void wlcore_connection_loss_work(struct work_struct *work)
2086 struct delayed_work *dwork;
2088 struct ieee80211_vif *vif;
2089 struct wl12xx_vif *wlvif;
2091 dwork = to_delayed_work(work);
2092 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2095 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2097 mutex_lock(&wl->mutex);
2099 if (unlikely(wl->state != WLCORE_STATE_ON))
2102 /* Call mac80211 connection loss */
2103 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2106 vif = wl12xx_wlvif_to_vif(wlvif);
2107 ieee80211_connection_loss(vif);
2109 mutex_unlock(&wl->mutex);
2112 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2114 struct delayed_work *dwork;
2116 struct wl12xx_vif *wlvif;
2117 unsigned long time_spare;
2120 dwork = to_delayed_work(work);
2121 wlvif = container_of(dwork, struct wl12xx_vif,
2122 pending_auth_complete_work);
2125 mutex_lock(&wl->mutex);
2127 if (unlikely(wl->state != WLCORE_STATE_ON))
2131 * Make sure a second really passed since the last auth reply. Maybe
2132 * a second auth reply arrived while we were stuck on the mutex.
2133 * Check for a little less than the timeout to protect from scheduler
2136 time_spare = jiffies +
2137 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2138 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2141 ret = pm_runtime_get_sync(wl->dev);
2143 pm_runtime_put_noidle(wl->dev);
2147 /* cancel the ROC if active */
2148 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2150 pm_runtime_mark_last_busy(wl->dev);
2151 pm_runtime_put_autosuspend(wl->dev);
2153 mutex_unlock(&wl->mutex);
2156 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2158 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2159 WL12XX_MAX_RATE_POLICIES);
2160 if (policy >= WL12XX_MAX_RATE_POLICIES)
2163 __set_bit(policy, wl->rate_policies_map);
2168 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2170 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2173 __clear_bit(*idx, wl->rate_policies_map);
2174 *idx = WL12XX_MAX_RATE_POLICIES;
2177 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2179 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2180 WLCORE_MAX_KLV_TEMPLATES);
2181 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2184 __set_bit(policy, wl->klv_templates_map);
2189 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2191 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2194 __clear_bit(*idx, wl->klv_templates_map);
2195 *idx = WLCORE_MAX_KLV_TEMPLATES;
2198 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2200 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2202 switch (wlvif->bss_type) {
2203 case BSS_TYPE_AP_BSS:
2205 return WL1271_ROLE_P2P_GO;
2206 else if (ieee80211_vif_is_mesh(vif))
2207 return WL1271_ROLE_MESH_POINT;
2209 return WL1271_ROLE_AP;
2211 case BSS_TYPE_STA_BSS:
2213 return WL1271_ROLE_P2P_CL;
2215 return WL1271_ROLE_STA;
2218 return WL1271_ROLE_IBSS;
2221 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2223 return WL12XX_INVALID_ROLE_TYPE;
2226 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2228 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2231 /* clear everything but the persistent data */
2232 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2234 switch (ieee80211_vif_type_p2p(vif)) {
2235 case NL80211_IFTYPE_P2P_CLIENT:
2238 case NL80211_IFTYPE_STATION:
2239 case NL80211_IFTYPE_P2P_DEVICE:
2240 wlvif->bss_type = BSS_TYPE_STA_BSS;
2242 case NL80211_IFTYPE_ADHOC:
2243 wlvif->bss_type = BSS_TYPE_IBSS;
2245 case NL80211_IFTYPE_P2P_GO:
2248 case NL80211_IFTYPE_AP:
2249 case NL80211_IFTYPE_MESH_POINT:
2250 wlvif->bss_type = BSS_TYPE_AP_BSS;
2253 wlvif->bss_type = MAX_BSS_TYPE;
2257 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2258 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2259 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2261 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2262 wlvif->bss_type == BSS_TYPE_IBSS) {
2263 /* init sta/ibss data */
2264 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2265 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2266 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2267 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2268 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2269 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2270 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2271 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2274 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2275 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2276 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2277 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2278 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2279 wl12xx_allocate_rate_policy(wl,
2280 &wlvif->ap.ucast_rate_idx[i]);
2281 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2283 * TODO: check if basic_rate shouldn't be
2284 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2285 * instead (the same thing for STA above).
2287 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2288 /* TODO: this seems to be used only for STA, check it */
2289 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2292 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2293 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2294 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2297 * mac80211 configures some values globally, while we treat them
2298 * per-interface. thus, on init, we have to copy them from wl
2300 wlvif->band = wl->band;
2301 wlvif->channel = wl->channel;
2302 wlvif->power_level = wl->power_level;
2303 wlvif->channel_type = wl->channel_type;
2305 INIT_WORK(&wlvif->rx_streaming_enable_work,
2306 wl1271_rx_streaming_enable_work);
2307 INIT_WORK(&wlvif->rx_streaming_disable_work,
2308 wl1271_rx_streaming_disable_work);
2309 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2310 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2311 wlcore_channel_switch_work);
2312 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2313 wlcore_connection_loss_work);
2314 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2315 wlcore_pending_auth_complete_work);
2316 INIT_LIST_HEAD(&wlvif->list);
2318 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2322 static int wl12xx_init_fw(struct wl1271 *wl)
2324 int retries = WL1271_BOOT_RETRIES;
2325 bool booted = false;
2326 struct wiphy *wiphy = wl->hw->wiphy;
2331 ret = wl12xx_chip_wakeup(wl, false);
2335 ret = wl->ops->boot(wl);
2339 ret = wl1271_hw_init(wl);
2347 mutex_unlock(&wl->mutex);
2348 /* Unlocking the mutex in the middle of handling is
2349 inherently unsafe. In this case we deem it safe to do,
2350 because we need to let any possibly pending IRQ out of
2351 the system (and while we are WLCORE_STATE_OFF the IRQ
2352 work function will not do anything.) Also, any other
2353 possible concurrent operations will fail due to the
2354 current state, hence the wl1271 struct should be safe. */
2355 wlcore_disable_interrupts(wl);
2356 wl1271_flush_deferred_work(wl);
2357 cancel_work_sync(&wl->netstack_work);
2358 mutex_lock(&wl->mutex);
2360 wl1271_power_off(wl);
2364 wl1271_error("firmware boot failed despite %d retries",
2365 WL1271_BOOT_RETRIES);
2369 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2371 /* update hw/fw version info in wiphy struct */
2372 wiphy->hw_version = wl->chip.id;
2373 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2374 sizeof(wiphy->fw_version));
2377 * Now we know if 11a is supported (info from the NVS), so disable
2378 * 11a channels if not supported
2380 if (!wl->enable_11a)
2381 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2383 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2384 wl->enable_11a ? "" : "not ");
2386 wl->state = WLCORE_STATE_ON;
2391 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2393 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2397 * Check whether a fw switch (i.e. moving from one loaded
2398 * fw to another) is needed. This function is also responsible
2399 * for updating wl->last_vif_count, so it must be called before
2400 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2403 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2404 struct vif_counter_data vif_counter_data,
2407 enum wl12xx_fw_type current_fw = wl->fw_type;
2408 u8 vif_count = vif_counter_data.counter;
2410 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2413 /* increase the vif count if this is a new vif */
2414 if (add && !vif_counter_data.cur_vif_running)
2417 wl->last_vif_count = vif_count;
2419 /* no need for fw change if the device is OFF */
2420 if (wl->state == WLCORE_STATE_OFF)
2423 /* no need for fw change if a single fw is used */
2424 if (!wl->mr_fw_name)
2427 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2429 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2436 * Enter "forced psm". Make sure the sta is in psm against the ap,
2437 * to make the fw switch a bit more disconnection-persistent.
2439 static void wl12xx_force_active_psm(struct wl1271 *wl)
2441 struct wl12xx_vif *wlvif;
2443 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2444 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2448 struct wlcore_hw_queue_iter_data {
2449 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2451 struct ieee80211_vif *vif;
2452 /* is the current vif among those iterated */
2456 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2457 struct ieee80211_vif *vif)
2459 struct wlcore_hw_queue_iter_data *iter_data = data;
2461 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2462 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2465 if (iter_data->cur_running || vif == iter_data->vif) {
2466 iter_data->cur_running = true;
2470 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2473 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2474 struct wl12xx_vif *wlvif)
2476 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2477 struct wlcore_hw_queue_iter_data iter_data = {};
2480 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2481 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2485 iter_data.vif = vif;
2487 /* mark all bits taken by active interfaces */
2488 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2489 IEEE80211_IFACE_ITER_RESUME_ALL,
2490 wlcore_hw_queue_iter, &iter_data);
2492 /* the current vif is already running in mac80211 (resume/recovery) */
2493 if (iter_data.cur_running) {
2494 wlvif->hw_queue_base = vif->hw_queue[0];
2495 wl1271_debug(DEBUG_MAC80211,
2496 "using pre-allocated hw queue base %d",
2497 wlvif->hw_queue_base);
2499 /* interface type might have changed type */
2500 goto adjust_cab_queue;
2503 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2504 WLCORE_NUM_MAC_ADDRESSES);
2505 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2508 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2509 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2510 wlvif->hw_queue_base);
2512 for (i = 0; i < NUM_TX_QUEUES; i++) {
2513 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2514 /* register hw queues in mac80211 */
2515 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2519 /* the last places are reserved for cab queues per interface */
2520 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2521 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2522 wlvif->hw_queue_base / NUM_TX_QUEUES;
2524 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2529 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2530 struct ieee80211_vif *vif)
2532 struct wl1271 *wl = hw->priv;
2533 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2534 struct vif_counter_data vif_count;
2539 wl1271_error("Adding Interface not allowed while in PLT mode");
2543 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2544 IEEE80211_VIF_SUPPORTS_UAPSD |
2545 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2547 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2548 ieee80211_vif_type_p2p(vif), vif->addr);
2550 wl12xx_get_vif_count(hw, vif, &vif_count);
2552 mutex_lock(&wl->mutex);
2555 * in some very corner case HW recovery scenarios its possible to
2556 * get here before __wl1271_op_remove_interface is complete, so
2557 * opt out if that is the case.
2559 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2560 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2566 ret = wl12xx_init_vif_data(wl, vif);
2571 role_type = wl12xx_get_role_type(wl, wlvif);
2572 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2577 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2582 * TODO: after the nvs issue will be solved, move this block
2583 * to start(), and make sure here the driver is ON.
2585 if (wl->state == WLCORE_STATE_OFF) {
2587 * we still need this in order to configure the fw
2588 * while uploading the nvs
2590 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2592 ret = wl12xx_init_fw(wl);
2598 * Call runtime PM only after possible wl12xx_init_fw() above
2599 * is done. Otherwise we do not have interrupts enabled.
2601 ret = pm_runtime_get_sync(wl->dev);
2603 pm_runtime_put_noidle(wl->dev);
2607 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2608 wl12xx_force_active_psm(wl);
2609 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2610 mutex_unlock(&wl->mutex);
2611 wl1271_recovery_work(&wl->recovery_work);
2615 if (!wlcore_is_p2p_mgmt(wlvif)) {
2616 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2617 role_type, &wlvif->role_id);
2621 ret = wl1271_init_vif_specific(wl, vif);
2626 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2627 &wlvif->dev_role_id);
2631 /* needed mainly for configuring rate policies */
2632 ret = wl1271_sta_hw_init(wl, wlvif);
2637 list_add(&wlvif->list, &wl->wlvif_list);
2638 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2640 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2645 pm_runtime_mark_last_busy(wl->dev);
2646 pm_runtime_put_autosuspend(wl->dev);
2648 mutex_unlock(&wl->mutex);
2653 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2654 struct ieee80211_vif *vif,
2655 bool reset_tx_queues)
2657 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2659 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2661 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2663 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2666 /* because of hardware recovery, we may get here twice */
2667 if (wl->state == WLCORE_STATE_OFF)
2670 wl1271_info("down");
2672 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2673 wl->scan_wlvif == wlvif) {
2674 struct cfg80211_scan_info info = {
2679 * Rearm the tx watchdog just before idling scan. This
2680 * prevents just-finished scans from triggering the watchdog
2682 wl12xx_rearm_tx_watchdog_locked(wl);
2684 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2685 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2686 wl->scan_wlvif = NULL;
2687 wl->scan.req = NULL;
2688 ieee80211_scan_completed(wl->hw, &info);
2691 if (wl->sched_vif == wlvif)
2692 wl->sched_vif = NULL;
2694 if (wl->roc_vif == vif) {
2696 ieee80211_remain_on_channel_expired(wl->hw);
2699 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2700 /* disable active roles */
2701 ret = pm_runtime_get_sync(wl->dev);
2703 pm_runtime_put_noidle(wl->dev);
2707 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2708 wlvif->bss_type == BSS_TYPE_IBSS) {
2709 if (wl12xx_dev_role_started(wlvif))
2710 wl12xx_stop_dev(wl, wlvif);
2713 if (!wlcore_is_p2p_mgmt(wlvif)) {
2714 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2718 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2723 pm_runtime_mark_last_busy(wl->dev);
2724 pm_runtime_put_autosuspend(wl->dev);
2727 wl12xx_tx_reset_wlvif(wl, wlvif);
2729 /* clear all hlids (except system_hlid) */
2730 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2732 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2733 wlvif->bss_type == BSS_TYPE_IBSS) {
2734 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2735 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2736 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2737 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2738 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2740 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2741 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2742 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2743 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2744 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2745 wl12xx_free_rate_policy(wl,
2746 &wlvif->ap.ucast_rate_idx[i]);
2747 wl1271_free_ap_keys(wl, wlvif);
2750 dev_kfree_skb(wlvif->probereq);
2751 wlvif->probereq = NULL;
2752 if (wl->last_wlvif == wlvif)
2753 wl->last_wlvif = NULL;
2754 list_del(&wlvif->list);
2755 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2756 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2757 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2765 * Last AP, have more stations. Configure sleep auth according to STA.
2766 * Don't do thin on unintended recovery.
2768 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2769 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2772 if (wl->ap_count == 0 && is_ap) {
2773 /* mask ap events */
2774 wl->event_mask &= ~wl->ap_event_mask;
2775 wl1271_event_unmask(wl);
2778 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2779 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2780 /* Configure for power according to debugfs */
2781 if (sta_auth != WL1271_PSM_ILLEGAL)
2782 wl1271_acx_sleep_auth(wl, sta_auth);
2783 /* Configure for ELP power saving */
2785 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2789 mutex_unlock(&wl->mutex);
2791 del_timer_sync(&wlvif->rx_streaming_timer);
2792 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2793 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2794 cancel_work_sync(&wlvif->rc_update_work);
2795 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2796 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2797 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2799 mutex_lock(&wl->mutex);
2802 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2803 struct ieee80211_vif *vif)
2805 struct wl1271 *wl = hw->priv;
2806 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2807 struct wl12xx_vif *iter;
2808 struct vif_counter_data vif_count;
2810 wl12xx_get_vif_count(hw, vif, &vif_count);
2811 mutex_lock(&wl->mutex);
2813 if (wl->state == WLCORE_STATE_OFF ||
2814 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2818 * wl->vif can be null here if someone shuts down the interface
2819 * just when hardware recovery has been started.
2821 wl12xx_for_each_wlvif(wl, iter) {
2825 __wl1271_op_remove_interface(wl, vif, true);
2828 WARN_ON(iter != wlvif);
2829 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2830 wl12xx_force_active_psm(wl);
2831 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2832 wl12xx_queue_recovery_work(wl);
2835 mutex_unlock(&wl->mutex);
2838 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2839 struct ieee80211_vif *vif,
2840 enum nl80211_iftype new_type, bool p2p)
2842 struct wl1271 *wl = hw->priv;
2845 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2846 wl1271_op_remove_interface(hw, vif);
2848 vif->type = new_type;
2850 ret = wl1271_op_add_interface(hw, vif);
2852 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2856 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2859 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2862 * One of the side effects of the JOIN command is that is clears
2863 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2864 * to a WPA/WPA2 access point will therefore kill the data-path.
2865 * Currently the only valid scenario for JOIN during association
2866 * is on roaming, in which case we will also be given new keys.
2867 * Keep the below message for now, unless it starts bothering
2868 * users who really like to roam a lot :)
2870 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2871 wl1271_info("JOIN while associated.");
2873 /* clear encryption type */
2874 wlvif->encryption_type = KEY_NONE;
2877 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2879 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2884 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2888 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2892 wl1271_error("No SSID in IEs!");
2897 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2898 wl1271_error("SSID is too long!");
2902 wlvif->ssid_len = ssid_len;
2903 memcpy(wlvif->ssid, ptr+2, ssid_len);
2907 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2909 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2910 struct sk_buff *skb;
2913 /* we currently only support setting the ssid from the ap probe req */
2914 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2917 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2921 ieoffset = offsetof(struct ieee80211_mgmt,
2922 u.probe_req.variable);
2923 wl1271_ssid_set(wlvif, skb, ieoffset);
2929 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2930 struct ieee80211_bss_conf *bss_conf,
2936 wlvif->aid = bss_conf->aid;
2937 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2938 wlvif->beacon_int = bss_conf->beacon_int;
2939 wlvif->wmm_enabled = bss_conf->qos;
2941 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2944 * with wl1271, we don't need to update the
2945 * beacon_int and dtim_period, because the firmware
2946 * updates it by itself when the first beacon is
2947 * received after a join.
2949 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2954 * Get a template for hardware connection maintenance
2956 dev_kfree_skb(wlvif->probereq);
2957 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2960 ieoffset = offsetof(struct ieee80211_mgmt,
2961 u.probe_req.variable);
2962 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2964 /* enable the connection monitoring feature */
2965 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2970 * The join command disable the keep-alive mode, shut down its process,
2971 * and also clear the template config, so we need to reset it all after
2972 * the join. The acx_aid starts the keep-alive process, and the order
2973 * of the commands below is relevant.
2975 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2979 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2983 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2987 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2988 wlvif->sta.klv_template_id,
2989 ACX_KEEP_ALIVE_TPL_VALID);
2994 * The default fw psm configuration is AUTO, while mac80211 default
2995 * setting is off (ACTIVE), so sync the fw with the correct value.
2997 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3003 wl1271_tx_enabled_rates_get(wl,
3006 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3014 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3017 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3019 /* make sure we are connected (sta) joined */
3021 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3024 /* make sure we are joined (ibss) */
3026 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3030 /* use defaults when not associated */
3033 /* free probe-request template */
3034 dev_kfree_skb(wlvif->probereq);
3035 wlvif->probereq = NULL;
3037 /* disable connection monitor features */
3038 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3042 /* Disable the keep-alive feature */
3043 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3047 /* disable beacon filtering */
3048 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3053 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3054 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3056 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3057 ieee80211_chswitch_done(vif, false);
3058 cancel_delayed_work(&wlvif->channel_switch_work);
3061 /* invalidate keep-alive template */
3062 wl1271_acx_keep_alive_config(wl, wlvif,
3063 wlvif->sta.klv_template_id,
3064 ACX_KEEP_ALIVE_TPL_INVALID);
3069 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3071 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3072 wlvif->rate_set = wlvif->basic_rate_set;
3075 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3078 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3080 if (idle == cur_idle)
3084 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3086 /* The current firmware only supports sched_scan in idle */
3087 if (wl->sched_vif == wlvif)
3088 wl->ops->sched_scan_stop(wl, wlvif);
3090 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3094 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3095 struct ieee80211_conf *conf, u32 changed)
3099 if (wlcore_is_p2p_mgmt(wlvif))
3102 if (conf->power_level != wlvif->power_level) {
3103 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3107 wlvif->power_level = conf->power_level;
3113 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3115 struct wl1271 *wl = hw->priv;
3116 struct wl12xx_vif *wlvif;
3117 struct ieee80211_conf *conf = &hw->conf;
3120 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3122 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3124 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3127 mutex_lock(&wl->mutex);
3129 if (changed & IEEE80211_CONF_CHANGE_POWER)
3130 wl->power_level = conf->power_level;
3132 if (unlikely(wl->state != WLCORE_STATE_ON))
3135 ret = pm_runtime_get_sync(wl->dev);
3137 pm_runtime_put_noidle(wl->dev);
3141 /* configure each interface */
3142 wl12xx_for_each_wlvif(wl, wlvif) {
3143 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3149 pm_runtime_mark_last_busy(wl->dev);
3150 pm_runtime_put_autosuspend(wl->dev);
3153 mutex_unlock(&wl->mutex);
3158 struct wl1271_filter_params {
3161 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3164 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3165 struct netdev_hw_addr_list *mc_list)
3167 struct wl1271_filter_params *fp;
3168 struct netdev_hw_addr *ha;
3170 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3172 wl1271_error("Out of memory setting filters.");
3176 /* update multicast filtering parameters */
3177 fp->mc_list_length = 0;
3178 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3179 fp->enabled = false;
3182 netdev_hw_addr_list_for_each(ha, mc_list) {
3183 memcpy(fp->mc_list[fp->mc_list_length],
3184 ha->addr, ETH_ALEN);
3185 fp->mc_list_length++;
3189 return (u64)(unsigned long)fp;
3192 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3194 FIF_BCN_PRBRESP_PROMISC | \
3198 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3199 unsigned int changed,
3200 unsigned int *total, u64 multicast)
3202 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3203 struct wl1271 *wl = hw->priv;
3204 struct wl12xx_vif *wlvif;
3208 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3209 " total %x", changed, *total);
3211 mutex_lock(&wl->mutex);
3213 *total &= WL1271_SUPPORTED_FILTERS;
3214 changed &= WL1271_SUPPORTED_FILTERS;
3216 if (unlikely(wl->state != WLCORE_STATE_ON))
3219 ret = pm_runtime_get_sync(wl->dev);
3221 pm_runtime_put_noidle(wl->dev);
3225 wl12xx_for_each_wlvif(wl, wlvif) {
3226 if (wlcore_is_p2p_mgmt(wlvif))
3229 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3230 if (*total & FIF_ALLMULTI)
3231 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3238 fp->mc_list_length);
3244 * If interface in AP mode and created with allmulticast then disable
3245 * the firmware filters so that all multicast packets are passed
3246 * This is mandatory for MDNS based discovery protocols
3248 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3249 if (*total & FIF_ALLMULTI) {
3250 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3260 * the fw doesn't provide an api to configure the filters. instead,
3261 * the filters configuration is based on the active roles / ROC
3266 pm_runtime_mark_last_busy(wl->dev);
3267 pm_runtime_put_autosuspend(wl->dev);
3270 mutex_unlock(&wl->mutex);
3274 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3275 u8 id, u8 key_type, u8 key_size,
3276 const u8 *key, u8 hlid, u32 tx_seq_32,
3279 struct wl1271_ap_key *ap_key;
3282 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3284 if (key_size > MAX_KEY_SIZE)
3288 * Find next free entry in ap_keys. Also check we are not replacing
3291 for (i = 0; i < MAX_NUM_KEYS; i++) {
3292 if (wlvif->ap.recorded_keys[i] == NULL)
3295 if (wlvif->ap.recorded_keys[i]->id == id) {
3296 wl1271_warning("trying to record key replacement");
3301 if (i == MAX_NUM_KEYS)
3304 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3309 ap_key->key_type = key_type;
3310 ap_key->key_size = key_size;
3311 memcpy(ap_key->key, key, key_size);
3312 ap_key->hlid = hlid;
3313 ap_key->tx_seq_32 = tx_seq_32;
3314 ap_key->tx_seq_16 = tx_seq_16;
3316 wlvif->ap.recorded_keys[i] = ap_key;
3320 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3324 for (i = 0; i < MAX_NUM_KEYS; i++) {
3325 kfree(wlvif->ap.recorded_keys[i]);
3326 wlvif->ap.recorded_keys[i] = NULL;
3330 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3333 struct wl1271_ap_key *key;
3334 bool wep_key_added = false;
3336 for (i = 0; i < MAX_NUM_KEYS; i++) {
3338 if (wlvif->ap.recorded_keys[i] == NULL)
3341 key = wlvif->ap.recorded_keys[i];
3343 if (hlid == WL12XX_INVALID_LINK_ID)
3344 hlid = wlvif->ap.bcast_hlid;
3346 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3347 key->id, key->key_type,
3348 key->key_size, key->key,
3349 hlid, key->tx_seq_32,
3354 if (key->key_type == KEY_WEP)
3355 wep_key_added = true;
3358 if (wep_key_added) {
3359 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3360 wlvif->ap.bcast_hlid);
3366 wl1271_free_ap_keys(wl, wlvif);
3370 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3371 u16 action, u8 id, u8 key_type,
3372 u8 key_size, const u8 *key, u32 tx_seq_32,
3373 u16 tx_seq_16, struct ieee80211_sta *sta)
3376 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3379 struct wl1271_station *wl_sta;
3383 wl_sta = (struct wl1271_station *)sta->drv_priv;
3384 hlid = wl_sta->hlid;
3386 hlid = wlvif->ap.bcast_hlid;
3389 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3391 * We do not support removing keys after AP shutdown.
3392 * Pretend we do to make mac80211 happy.
3394 if (action != KEY_ADD_OR_REPLACE)
3397 ret = wl1271_record_ap_key(wl, wlvif, id,
3399 key, hlid, tx_seq_32,
3402 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3403 id, key_type, key_size,
3404 key, hlid, tx_seq_32,
3412 static const u8 bcast_addr[ETH_ALEN] = {
3413 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3416 addr = sta ? sta->addr : bcast_addr;
3418 if (is_zero_ether_addr(addr)) {
3419 /* We dont support TX only encryption */
3423 /* The wl1271 does not allow to remove unicast keys - they
3424 will be cleared automatically on next CMD_JOIN. Ignore the
3425 request silently, as we dont want the mac80211 to emit
3426 an error message. */
3427 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3430 /* don't remove key if hlid was already deleted */
3431 if (action == KEY_REMOVE &&
3432 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3435 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3436 id, key_type, key_size,
3437 key, addr, tx_seq_32,
3447 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3448 struct ieee80211_vif *vif,
3449 struct ieee80211_sta *sta,
3450 struct ieee80211_key_conf *key_conf)
3452 struct wl1271 *wl = hw->priv;
3454 bool might_change_spare =
3455 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3456 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3458 if (might_change_spare) {
3460 * stop the queues and flush to ensure the next packets are
3461 * in sync with FW spare block accounting
3463 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3464 wl1271_tx_flush(wl);
3467 mutex_lock(&wl->mutex);
3469 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3471 goto out_wake_queues;
3474 ret = pm_runtime_get_sync(wl->dev);
3476 pm_runtime_put_noidle(wl->dev);
3477 goto out_wake_queues;
3480 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3482 pm_runtime_mark_last_busy(wl->dev);
3483 pm_runtime_put_autosuspend(wl->dev);
3486 if (might_change_spare)
3487 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3489 mutex_unlock(&wl->mutex);
3494 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3495 struct ieee80211_vif *vif,
3496 struct ieee80211_sta *sta,
3497 struct ieee80211_key_conf *key_conf)
3499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3519 hlid = wlvif->ap.bcast_hlid;
3522 hlid = wlvif->sta.hlid;
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3535 key_conf->hw_key_idx = key_conf->keyidx;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3541 case WLAN_CIPHER_SUITE_CCMP:
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3545 case WL1271_CIPHER_SUITE_GEM:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3556 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3557 key_conf->keyidx, key_type,
3558 key_conf->keylen, key_conf->key,
3559 tx_seq_32, tx_seq_16, sta);
3561 wl1271_error("Could not add or replace key");
3566 * reconfiguring arp response if the unicast (or common)
3567 * encryption key type was changed
3569 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3570 (sta || key_type == KEY_WEP) &&
3571 wlvif->encryption_type != key_type) {
3572 wlvif->encryption_type = key_type;
3573 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3575 wl1271_warning("build arp rsp failed: %d", ret);
3582 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3583 key_conf->keyidx, key_type,
3584 key_conf->keylen, key_conf->key,
3587 wl1271_error("Could not remove key");
3593 wl1271_error("Unsupported key cmd 0x%x", cmd);
3599 EXPORT_SYMBOL_GPL(wlcore_set_key);
3601 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3602 struct ieee80211_vif *vif,
3605 struct wl1271 *wl = hw->priv;
3606 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3612 /* we don't handle unsetting of default key */
3616 mutex_lock(&wl->mutex);
3618 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3623 ret = pm_runtime_get_sync(wl->dev);
3625 pm_runtime_put_noidle(wl->dev);
3629 wlvif->default_key = key_idx;
3631 /* the default WEP key needs to be configured at least once */
3632 if (wlvif->encryption_type == KEY_WEP) {
3633 ret = wl12xx_cmd_set_default_wep_key(wl,
3641 pm_runtime_mark_last_busy(wl->dev);
3642 pm_runtime_put_autosuspend(wl->dev);
3645 mutex_unlock(&wl->mutex);
3648 void wlcore_regdomain_config(struct wl1271 *wl)
3652 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3655 mutex_lock(&wl->mutex);
3657 if (unlikely(wl->state != WLCORE_STATE_ON))
3660 ret = pm_runtime_get_sync(wl->dev);
3662 pm_runtime_put_autosuspend(wl->dev);
3666 ret = wlcore_cmd_regdomain_config_locked(wl);
3668 wl12xx_queue_recovery_work(wl);
3672 pm_runtime_mark_last_busy(wl->dev);
3673 pm_runtime_put_autosuspend(wl->dev);
3675 mutex_unlock(&wl->mutex);
3678 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3679 struct ieee80211_vif *vif,
3680 struct ieee80211_scan_request *hw_req)
3682 struct cfg80211_scan_request *req = &hw_req->req;
3683 struct wl1271 *wl = hw->priv;
3688 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3691 ssid = req->ssids[0].ssid;
3692 len = req->ssids[0].ssid_len;
3695 mutex_lock(&wl->mutex);
3697 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3699 * We cannot return -EBUSY here because cfg80211 will expect
3700 * a call to ieee80211_scan_completed if we do - in this case
3701 * there won't be any call.
3707 ret = pm_runtime_get_sync(wl->dev);
3709 pm_runtime_put_noidle(wl->dev);
3713 /* fail if there is any role in ROC */
3714 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3715 /* don't allow scanning right now */
3720 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3722 pm_runtime_mark_last_busy(wl->dev);
3723 pm_runtime_put_autosuspend(wl->dev);
3725 mutex_unlock(&wl->mutex);
3730 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3731 struct ieee80211_vif *vif)
3733 struct wl1271 *wl = hw->priv;
3734 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3735 struct cfg80211_scan_info info = {
3740 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3742 mutex_lock(&wl->mutex);
3744 if (unlikely(wl->state != WLCORE_STATE_ON))
3747 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3750 ret = pm_runtime_get_sync(wl->dev);
3752 pm_runtime_put_noidle(wl->dev);
3756 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3757 ret = wl->ops->scan_stop(wl, wlvif);
3763 * Rearm the tx watchdog just before idling scan. This
3764 * prevents just-finished scans from triggering the watchdog
3766 wl12xx_rearm_tx_watchdog_locked(wl);
3768 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3769 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3770 wl->scan_wlvif = NULL;
3771 wl->scan.req = NULL;
3772 ieee80211_scan_completed(wl->hw, &info);
3775 pm_runtime_mark_last_busy(wl->dev);
3776 pm_runtime_put_autosuspend(wl->dev);
3778 mutex_unlock(&wl->mutex);
3780 cancel_delayed_work_sync(&wl->scan_complete_work);
3783 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3784 struct ieee80211_vif *vif,
3785 struct cfg80211_sched_scan_request *req,
3786 struct ieee80211_scan_ies *ies)
3788 struct wl1271 *wl = hw->priv;
3789 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3792 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3794 mutex_lock(&wl->mutex);
3796 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3801 ret = pm_runtime_get_sync(wl->dev);
3803 pm_runtime_put_noidle(wl->dev);
3807 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3811 wl->sched_vif = wlvif;
3814 pm_runtime_mark_last_busy(wl->dev);
3815 pm_runtime_put_autosuspend(wl->dev);
3817 mutex_unlock(&wl->mutex);
3821 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3822 struct ieee80211_vif *vif)
3824 struct wl1271 *wl = hw->priv;
3825 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3828 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3830 mutex_lock(&wl->mutex);
3832 if (unlikely(wl->state != WLCORE_STATE_ON))
3835 ret = pm_runtime_get_sync(wl->dev);
3837 pm_runtime_put_noidle(wl->dev);
3841 wl->ops->sched_scan_stop(wl, wlvif);
3843 pm_runtime_mark_last_busy(wl->dev);
3844 pm_runtime_put_autosuspend(wl->dev);
3846 mutex_unlock(&wl->mutex);
3851 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3853 struct wl1271 *wl = hw->priv;
3856 mutex_lock(&wl->mutex);
3858 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3863 ret = pm_runtime_get_sync(wl->dev);
3865 pm_runtime_put_noidle(wl->dev);
3869 ret = wl1271_acx_frag_threshold(wl, value);
3871 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3873 pm_runtime_mark_last_busy(wl->dev);
3874 pm_runtime_put_autosuspend(wl->dev);
3877 mutex_unlock(&wl->mutex);
3882 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3884 struct wl1271 *wl = hw->priv;
3885 struct wl12xx_vif *wlvif;
3888 mutex_lock(&wl->mutex);
3890 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3895 ret = pm_runtime_get_sync(wl->dev);
3897 pm_runtime_put_noidle(wl->dev);
3901 wl12xx_for_each_wlvif(wl, wlvif) {
3902 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3904 wl1271_warning("set rts threshold failed: %d", ret);
3906 pm_runtime_mark_last_busy(wl->dev);
3907 pm_runtime_put_autosuspend(wl->dev);
3910 mutex_unlock(&wl->mutex);
3915 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3918 const u8 *next, *end = skb->data + skb->len;
3919 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3920 skb->len - ieoffset);
3925 memmove(ie, next, end - next);
3926 skb_trim(skb, skb->len - len);
3929 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3930 unsigned int oui, u8 oui_type,
3934 const u8 *next, *end = skb->data + skb->len;
3935 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3936 skb->data + ieoffset,
3937 skb->len - ieoffset);
3942 memmove(ie, next, end - next);
3943 skb_trim(skb, skb->len - len);
3946 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3947 struct ieee80211_vif *vif)
3949 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3950 struct sk_buff *skb;
3953 skb = ieee80211_proberesp_get(wl->hw, vif);
3957 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3958 CMD_TEMPL_AP_PROBE_RESPONSE,
3967 wl1271_debug(DEBUG_AP, "probe response updated");
3968 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3974 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3975 struct ieee80211_vif *vif,
3977 size_t probe_rsp_len,
3980 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3981 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3982 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3983 int ssid_ie_offset, ie_offset, templ_len;
3986 /* no need to change probe response if the SSID is set correctly */
3987 if (wlvif->ssid_len > 0)
3988 return wl1271_cmd_template_set(wl, wlvif->role_id,
3989 CMD_TEMPL_AP_PROBE_RESPONSE,
3994 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3995 wl1271_error("probe_rsp template too big");
3999 /* start searching from IE offset */
4000 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4002 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4003 probe_rsp_len - ie_offset);
4005 wl1271_error("No SSID in beacon!");
4009 ssid_ie_offset = ptr - probe_rsp_data;
4010 ptr += (ptr[1] + 2);
4012 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4014 /* insert SSID from bss_conf */
4015 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4016 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4017 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4018 bss_conf->ssid, bss_conf->ssid_len);
4019 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4021 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4022 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4023 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4025 return wl1271_cmd_template_set(wl, wlvif->role_id,
4026 CMD_TEMPL_AP_PROBE_RESPONSE,
4032 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4033 struct ieee80211_vif *vif,
4034 struct ieee80211_bss_conf *bss_conf,
4037 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4040 if (changed & BSS_CHANGED_ERP_SLOT) {
4041 if (bss_conf->use_short_slot)
4042 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4044 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4046 wl1271_warning("Set slot time failed %d", ret);
4051 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4052 if (bss_conf->use_short_preamble)
4053 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4055 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4058 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4059 if (bss_conf->use_cts_prot)
4060 ret = wl1271_acx_cts_protect(wl, wlvif,
4063 ret = wl1271_acx_cts_protect(wl, wlvif,
4064 CTSPROTECT_DISABLE);
4066 wl1271_warning("Set ctsprotect failed %d", ret);
4075 static int wlcore_set_beacon_template(struct wl1271 *wl,
4076 struct ieee80211_vif *vif,
4079 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4080 struct ieee80211_hdr *hdr;
4083 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4084 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4092 wl1271_debug(DEBUG_MASTER, "beacon updated");
4094 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4096 dev_kfree_skb(beacon);
4099 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4100 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4102 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4107 dev_kfree_skb(beacon);
4111 wlvif->wmm_enabled =
4112 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4113 WLAN_OUI_TYPE_MICROSOFT_WMM,
4114 beacon->data + ieoffset,
4115 beacon->len - ieoffset);
4118 * In case we already have a probe-resp beacon set explicitly
4119 * by usermode, don't use the beacon data.
4121 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4124 /* remove TIM ie from probe response */
4125 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4128 * remove p2p ie from probe response.
4129 * the fw reponds to probe requests that don't include
4130 * the p2p ie. probe requests with p2p ie will be passed,
4131 * and will be responded by the supplicant (the spec
4132 * forbids including the p2p ie when responding to probe
4133 * requests that didn't include it).
4135 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4136 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4138 hdr = (struct ieee80211_hdr *) beacon->data;
4139 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4140 IEEE80211_STYPE_PROBE_RESP);
4142 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4147 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4148 CMD_TEMPL_PROBE_RESPONSE,
4153 dev_kfree_skb(beacon);
4161 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4162 struct ieee80211_vif *vif,
4163 struct ieee80211_bss_conf *bss_conf,
4166 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4167 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4170 if (changed & BSS_CHANGED_BEACON_INT) {
4171 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4172 bss_conf->beacon_int);
4174 wlvif->beacon_int = bss_conf->beacon_int;
4177 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4178 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4180 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4183 if (changed & BSS_CHANGED_BEACON) {
4184 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4188 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4190 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4197 wl1271_error("beacon info change failed: %d", ret);
4201 /* AP mode changes */
4202 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4203 struct ieee80211_vif *vif,
4204 struct ieee80211_bss_conf *bss_conf,
4207 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4210 if (changed & BSS_CHANGED_BASIC_RATES) {
4211 u32 rates = bss_conf->basic_rates;
4213 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4215 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4216 wlvif->basic_rate_set);
4218 ret = wl1271_init_ap_rates(wl, wlvif);
4220 wl1271_error("AP rate policy change failed %d", ret);
4224 ret = wl1271_ap_init_templates(wl, vif);
4228 /* No need to set probe resp template for mesh */
4229 if (!ieee80211_vif_is_mesh(vif)) {
4230 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4237 ret = wlcore_set_beacon_template(wl, vif, true);
4242 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4246 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4247 if (bss_conf->enable_beacon) {
4248 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4249 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4253 ret = wl1271_ap_init_hwenc(wl, wlvif);
4257 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4258 wl1271_debug(DEBUG_AP, "started AP");
4261 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4263 * AP might be in ROC in case we have just
4264 * sent auth reply. handle it.
4266 if (test_bit(wlvif->role_id, wl->roc_map))
4267 wl12xx_croc(wl, wlvif->role_id);
4269 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4273 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4274 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4276 wl1271_debug(DEBUG_AP, "stopped AP");
4281 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4285 /* Handle HT information change */
4286 if ((changed & BSS_CHANGED_HT) &&
4287 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4288 ret = wl1271_acx_set_ht_information(wl, wlvif,
4289 bss_conf->ht_operation_mode);
4291 wl1271_warning("Set ht information failed %d", ret);
4300 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4301 struct ieee80211_bss_conf *bss_conf,
4307 wl1271_debug(DEBUG_MAC80211,
4308 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4309 bss_conf->bssid, bss_conf->aid,
4310 bss_conf->beacon_int,
4311 bss_conf->basic_rates, sta_rate_set);
4313 wlvif->beacon_int = bss_conf->beacon_int;
4314 rates = bss_conf->basic_rates;
4315 wlvif->basic_rate_set =
4316 wl1271_tx_enabled_rates_get(wl, rates,
4319 wl1271_tx_min_rate_get(wl,
4320 wlvif->basic_rate_set);
4324 wl1271_tx_enabled_rates_get(wl,
4328 /* we only support sched_scan while not connected */
4329 if (wl->sched_vif == wlvif)
4330 wl->ops->sched_scan_stop(wl, wlvif);
4332 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4336 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4340 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4344 wlcore_set_ssid(wl, wlvif);
4346 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4351 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4355 /* revert back to minimum rates for the current band */
4356 wl1271_set_band_rate(wl, wlvif);
4357 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4359 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4363 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4364 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4365 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4370 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4373 /* STA/IBSS mode changes */
4374 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4375 struct ieee80211_vif *vif,
4376 struct ieee80211_bss_conf *bss_conf,
4379 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4380 bool do_join = false;
4381 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4382 bool ibss_joined = false;
4383 u32 sta_rate_set = 0;
4385 struct ieee80211_sta *sta;
4386 bool sta_exists = false;
4387 struct ieee80211_sta_ht_cap sta_ht_cap;
4390 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4396 if (changed & BSS_CHANGED_IBSS) {
4397 if (bss_conf->ibss_joined) {
4398 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4401 wlcore_unset_assoc(wl, wlvif);
4402 wl12xx_cmd_role_stop_sta(wl, wlvif);
4406 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4409 /* Need to update the SSID (for filtering etc) */
4410 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4413 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4414 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4415 bss_conf->enable_beacon ? "enabled" : "disabled");
4420 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4421 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4423 if (changed & BSS_CHANGED_CQM) {
4424 bool enable = false;
4425 if (bss_conf->cqm_rssi_thold)
4427 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4428 bss_conf->cqm_rssi_thold,
4429 bss_conf->cqm_rssi_hyst);
4432 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4435 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4436 BSS_CHANGED_ASSOC)) {
4438 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4440 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4442 /* save the supp_rates of the ap */
4443 sta_rate_set = sta->supp_rates[wlvif->band];
4444 if (sta->ht_cap.ht_supported)
4446 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4447 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4448 sta_ht_cap = sta->ht_cap;
4455 if (changed & BSS_CHANGED_BSSID) {
4456 if (!is_zero_ether_addr(bss_conf->bssid)) {
4457 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4462 /* Need to update the BSSID (for filtering etc) */
4465 ret = wlcore_clear_bssid(wl, wlvif);
4471 if (changed & BSS_CHANGED_IBSS) {
4472 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4473 bss_conf->ibss_joined);
4475 if (bss_conf->ibss_joined) {
4476 u32 rates = bss_conf->basic_rates;
4477 wlvif->basic_rate_set =
4478 wl1271_tx_enabled_rates_get(wl, rates,
4481 wl1271_tx_min_rate_get(wl,
4482 wlvif->basic_rate_set);
4484 /* by default, use 11b + OFDM rates */
4485 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4486 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4492 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4493 /* enable beacon filtering */
4494 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4499 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4504 ret = wlcore_join(wl, wlvif);
4506 wl1271_warning("cmd join failed %d", ret);
4511 if (changed & BSS_CHANGED_ASSOC) {
4512 if (bss_conf->assoc) {
4513 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4518 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4519 wl12xx_set_authorized(wl, wlvif);
4521 wlcore_unset_assoc(wl, wlvif);
4525 if (changed & BSS_CHANGED_PS) {
4526 if ((bss_conf->ps) &&
4527 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4528 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4532 if (wl->conf.conn.forced_ps) {
4533 ps_mode = STATION_POWER_SAVE_MODE;
4534 ps_mode_str = "forced";
4536 ps_mode = STATION_AUTO_PS_MODE;
4537 ps_mode_str = "auto";
4540 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4542 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4544 wl1271_warning("enter %s ps failed %d",
4546 } else if (!bss_conf->ps &&
4547 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4548 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4550 ret = wl1271_ps_set_mode(wl, wlvif,
4551 STATION_ACTIVE_MODE);
4553 wl1271_warning("exit auto ps failed %d", ret);
4557 /* Handle new association with HT. Do this after join. */
4560 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4562 ret = wlcore_hw_set_peer_cap(wl,
4568 wl1271_warning("Set ht cap failed %d", ret);
4574 ret = wl1271_acx_set_ht_information(wl, wlvif,
4575 bss_conf->ht_operation_mode);
4577 wl1271_warning("Set ht information failed %d",
4584 /* Handle arp filtering. Done after join. */
4585 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4586 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4587 __be32 addr = bss_conf->arp_addr_list[0];
4588 wlvif->sta.qos = bss_conf->qos;
4589 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4591 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4592 wlvif->ip_addr = addr;
4594 * The template should have been configured only upon
4595 * association. however, it seems that the correct ip
4596 * isn't being set (when sending), so we have to
4597 * reconfigure the template upon every ip change.
4599 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4601 wl1271_warning("build arp rsp failed: %d", ret);
4605 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4606 (ACX_ARP_FILTER_ARP_FILTERING |
4607 ACX_ARP_FILTER_AUTO_ARP),
4611 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4622 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4623 struct ieee80211_vif *vif,
4624 struct ieee80211_bss_conf *bss_conf,
4627 struct wl1271 *wl = hw->priv;
4628 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4629 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4632 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4633 wlvif->role_id, (int)changed);
4636 * make sure to cancel pending disconnections if our association
4639 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4640 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4642 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4643 !bss_conf->enable_beacon)
4644 wl1271_tx_flush(wl);
4646 mutex_lock(&wl->mutex);
4648 if (unlikely(wl->state != WLCORE_STATE_ON))
4651 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4654 ret = pm_runtime_get_sync(wl->dev);
4656 pm_runtime_put_noidle(wl->dev);
4660 if ((changed & BSS_CHANGED_TXPOWER) &&
4661 bss_conf->txpower != wlvif->power_level) {
4663 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4667 wlvif->power_level = bss_conf->txpower;
4671 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4673 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4675 pm_runtime_mark_last_busy(wl->dev);
4676 pm_runtime_put_autosuspend(wl->dev);
4679 mutex_unlock(&wl->mutex);
4682 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4683 struct ieee80211_chanctx_conf *ctx)
4685 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4686 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4687 cfg80211_get_chandef_type(&ctx->def));
4691 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4692 struct ieee80211_chanctx_conf *ctx)
4694 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4695 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4696 cfg80211_get_chandef_type(&ctx->def));
4699 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4700 struct ieee80211_chanctx_conf *ctx,
4703 struct wl1271 *wl = hw->priv;
4704 struct wl12xx_vif *wlvif;
4706 int channel = ieee80211_frequency_to_channel(
4707 ctx->def.chan->center_freq);
4709 wl1271_debug(DEBUG_MAC80211,
4710 "mac80211 change chanctx %d (type %d) changed 0x%x",
4711 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4713 mutex_lock(&wl->mutex);
4715 ret = pm_runtime_get_sync(wl->dev);
4717 pm_runtime_put_noidle(wl->dev);
4721 wl12xx_for_each_wlvif(wl, wlvif) {
4722 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4725 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4731 /* start radar if needed */
4732 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4733 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4734 ctx->radar_enabled && !wlvif->radar_enabled &&
4735 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4736 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4737 wlcore_hw_set_cac(wl, wlvif, true);
4738 wlvif->radar_enabled = true;
4742 pm_runtime_mark_last_busy(wl->dev);
4743 pm_runtime_put_autosuspend(wl->dev);
4745 mutex_unlock(&wl->mutex);
4748 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4749 struct ieee80211_vif *vif,
4750 struct ieee80211_chanctx_conf *ctx)
4752 struct wl1271 *wl = hw->priv;
4753 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4754 int channel = ieee80211_frequency_to_channel(
4755 ctx->def.chan->center_freq);
4758 wl1271_debug(DEBUG_MAC80211,
4759 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4760 wlvif->role_id, channel,
4761 cfg80211_get_chandef_type(&ctx->def),
4762 ctx->radar_enabled, ctx->def.chan->dfs_state);
4764 mutex_lock(&wl->mutex);
4766 if (unlikely(wl->state != WLCORE_STATE_ON))
4769 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4772 ret = pm_runtime_get_sync(wl->dev);
4774 pm_runtime_put_noidle(wl->dev);
4778 wlvif->band = ctx->def.chan->band;
4779 wlvif->channel = channel;
4780 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4782 /* update default rates according to the band */
4783 wl1271_set_band_rate(wl, wlvif);
4785 if (ctx->radar_enabled &&
4786 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4787 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4788 wlcore_hw_set_cac(wl, wlvif, true);
4789 wlvif->radar_enabled = true;
4792 pm_runtime_mark_last_busy(wl->dev);
4793 pm_runtime_put_autosuspend(wl->dev);
4795 mutex_unlock(&wl->mutex);
4800 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4801 struct ieee80211_vif *vif,
4802 struct ieee80211_chanctx_conf *ctx)
4804 struct wl1271 *wl = hw->priv;
4805 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4808 wl1271_debug(DEBUG_MAC80211,
4809 "mac80211 unassign chanctx (role %d) %d (type %d)",
4811 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4812 cfg80211_get_chandef_type(&ctx->def));
4814 wl1271_tx_flush(wl);
4816 mutex_lock(&wl->mutex);
4818 if (unlikely(wl->state != WLCORE_STATE_ON))
4821 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4824 ret = pm_runtime_get_sync(wl->dev);
4826 pm_runtime_put_noidle(wl->dev);
4830 if (wlvif->radar_enabled) {
4831 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4832 wlcore_hw_set_cac(wl, wlvif, false);
4833 wlvif->radar_enabled = false;
4836 pm_runtime_mark_last_busy(wl->dev);
4837 pm_runtime_put_autosuspend(wl->dev);
4839 mutex_unlock(&wl->mutex);
4842 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4843 struct wl12xx_vif *wlvif,
4844 struct ieee80211_chanctx_conf *new_ctx)
4846 int channel = ieee80211_frequency_to_channel(
4847 new_ctx->def.chan->center_freq);
4849 wl1271_debug(DEBUG_MAC80211,
4850 "switch vif (role %d) %d -> %d chan_type: %d",
4851 wlvif->role_id, wlvif->channel, channel,
4852 cfg80211_get_chandef_type(&new_ctx->def));
4854 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4857 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4859 if (wlvif->radar_enabled) {
4860 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4861 wlcore_hw_set_cac(wl, wlvif, false);
4862 wlvif->radar_enabled = false;
4865 wlvif->band = new_ctx->def.chan->band;
4866 wlvif->channel = channel;
4867 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4869 /* start radar if needed */
4870 if (new_ctx->radar_enabled) {
4871 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4872 wlcore_hw_set_cac(wl, wlvif, true);
4873 wlvif->radar_enabled = true;
4880 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4881 struct ieee80211_vif_chanctx_switch *vifs,
4883 enum ieee80211_chanctx_switch_mode mode)
4885 struct wl1271 *wl = hw->priv;
4888 wl1271_debug(DEBUG_MAC80211,
4889 "mac80211 switch chanctx n_vifs %d mode %d",
4892 mutex_lock(&wl->mutex);
4894 ret = pm_runtime_get_sync(wl->dev);
4896 pm_runtime_put_noidle(wl->dev);
4900 for (i = 0; i < n_vifs; i++) {
4901 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4903 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4908 pm_runtime_mark_last_busy(wl->dev);
4909 pm_runtime_put_autosuspend(wl->dev);
4911 mutex_unlock(&wl->mutex);
4916 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4917 struct ieee80211_vif *vif, u16 queue,
4918 const struct ieee80211_tx_queue_params *params)
4920 struct wl1271 *wl = hw->priv;
4921 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4925 if (wlcore_is_p2p_mgmt(wlvif))
4928 mutex_lock(&wl->mutex);
4930 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4933 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4935 ps_scheme = CONF_PS_SCHEME_LEGACY;
4937 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4940 ret = pm_runtime_get_sync(wl->dev);
4942 pm_runtime_put_noidle(wl->dev);
4947 * the txop is confed in units of 32us by the mac80211,
4950 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4951 params->cw_min, params->cw_max,
4952 params->aifs, params->txop << 5);
4956 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4957 CONF_CHANNEL_TYPE_EDCF,
4958 wl1271_tx_get_queue(queue),
4959 ps_scheme, CONF_ACK_POLICY_LEGACY,
4963 pm_runtime_mark_last_busy(wl->dev);
4964 pm_runtime_put_autosuspend(wl->dev);
4967 mutex_unlock(&wl->mutex);
4972 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4973 struct ieee80211_vif *vif)
4976 struct wl1271 *wl = hw->priv;
4977 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4978 u64 mactime = ULLONG_MAX;
4981 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4983 mutex_lock(&wl->mutex);
4985 if (unlikely(wl->state != WLCORE_STATE_ON))
4988 ret = pm_runtime_get_sync(wl->dev);
4990 pm_runtime_put_noidle(wl->dev);
4994 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4999 pm_runtime_mark_last_busy(wl->dev);
5000 pm_runtime_put_autosuspend(wl->dev);
5003 mutex_unlock(&wl->mutex);
5007 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5008 struct survey_info *survey)
5010 struct ieee80211_conf *conf = &hw->conf;
5015 survey->channel = conf->chandef.chan;
5020 static int wl1271_allocate_sta(struct wl1271 *wl,
5021 struct wl12xx_vif *wlvif,
5022 struct ieee80211_sta *sta)
5024 struct wl1271_station *wl_sta;
5028 if (wl->active_sta_count >= wl->max_ap_stations) {
5029 wl1271_warning("could not allocate HLID - too much stations");
5033 wl_sta = (struct wl1271_station *)sta->drv_priv;
5034 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5036 wl1271_warning("could not allocate HLID - too many links");
5040 /* use the previous security seq, if this is a recovery/resume */
5041 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5043 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5044 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5045 wl->active_sta_count++;
5049 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5051 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5054 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5055 __clear_bit(hlid, &wl->ap_ps_map);
5056 __clear_bit(hlid, &wl->ap_fw_ps_map);
5059 * save the last used PN in the private part of iee80211_sta,
5060 * in case of recovery/suspend
5062 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5064 wl12xx_free_link(wl, wlvif, &hlid);
5065 wl->active_sta_count--;
5068 * rearm the tx watchdog when the last STA is freed - give the FW a
5069 * chance to return STA-buffered packets before complaining.
5071 if (wl->active_sta_count == 0)
5072 wl12xx_rearm_tx_watchdog_locked(wl);
5075 static int wl12xx_sta_add(struct wl1271 *wl,
5076 struct wl12xx_vif *wlvif,
5077 struct ieee80211_sta *sta)
5079 struct wl1271_station *wl_sta;
5083 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5085 ret = wl1271_allocate_sta(wl, wlvif, sta);
5089 wl_sta = (struct wl1271_station *)sta->drv_priv;
5090 hlid = wl_sta->hlid;
5092 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5094 wl1271_free_sta(wl, wlvif, hlid);
5099 static int wl12xx_sta_remove(struct wl1271 *wl,
5100 struct wl12xx_vif *wlvif,
5101 struct ieee80211_sta *sta)
5103 struct wl1271_station *wl_sta;
5106 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5108 wl_sta = (struct wl1271_station *)sta->drv_priv;
5110 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5113 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5117 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5121 static void wlcore_roc_if_possible(struct wl1271 *wl,
5122 struct wl12xx_vif *wlvif)
5124 if (find_first_bit(wl->roc_map,
5125 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5128 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5131 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5135 * when wl_sta is NULL, we treat this call as if coming from a
5136 * pending auth reply.
5137 * wl->mutex must be taken and the FW must be awake when the call
5140 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5141 struct wl1271_station *wl_sta, bool in_conn)
5144 if (WARN_ON(wl_sta && wl_sta->in_connection))
5147 if (!wlvif->ap_pending_auth_reply &&
5148 !wlvif->inconn_count)
5149 wlcore_roc_if_possible(wl, wlvif);
5152 wl_sta->in_connection = true;
5153 wlvif->inconn_count++;
5155 wlvif->ap_pending_auth_reply = true;
5158 if (wl_sta && !wl_sta->in_connection)
5161 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5164 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5168 wl_sta->in_connection = false;
5169 wlvif->inconn_count--;
5171 wlvif->ap_pending_auth_reply = false;
5174 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5175 test_bit(wlvif->role_id, wl->roc_map))
5176 wl12xx_croc(wl, wlvif->role_id);
5180 static int wl12xx_update_sta_state(struct wl1271 *wl,
5181 struct wl12xx_vif *wlvif,
5182 struct ieee80211_sta *sta,
5183 enum ieee80211_sta_state old_state,
5184 enum ieee80211_sta_state new_state)
5186 struct wl1271_station *wl_sta;
5187 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5188 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5191 wl_sta = (struct wl1271_station *)sta->drv_priv;
5193 /* Add station (AP mode) */
5195 old_state == IEEE80211_STA_NOTEXIST &&
5196 new_state == IEEE80211_STA_NONE) {
5197 ret = wl12xx_sta_add(wl, wlvif, sta);
5201 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5204 /* Remove station (AP mode) */
5206 old_state == IEEE80211_STA_NONE &&
5207 new_state == IEEE80211_STA_NOTEXIST) {
5209 wl12xx_sta_remove(wl, wlvif, sta);
5211 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5214 /* Authorize station (AP mode) */
5216 new_state == IEEE80211_STA_AUTHORIZED) {
5217 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5221 /* reconfigure rates */
5222 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5226 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5231 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5234 /* Authorize station */
5236 new_state == IEEE80211_STA_AUTHORIZED) {
5237 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5238 ret = wl12xx_set_authorized(wl, wlvif);
5244 old_state == IEEE80211_STA_AUTHORIZED &&
5245 new_state == IEEE80211_STA_ASSOC) {
5246 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5247 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5250 /* save seq number on disassoc (suspend) */
5252 old_state == IEEE80211_STA_ASSOC &&
5253 new_state == IEEE80211_STA_AUTH) {
5254 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5255 wlvif->total_freed_pkts = 0;
5258 /* restore seq number on assoc (resume) */
5260 old_state == IEEE80211_STA_AUTH &&
5261 new_state == IEEE80211_STA_ASSOC) {
5262 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5265 /* clear ROCs on failure or authorization */
5267 (new_state == IEEE80211_STA_AUTHORIZED ||
5268 new_state == IEEE80211_STA_NOTEXIST)) {
5269 if (test_bit(wlvif->role_id, wl->roc_map))
5270 wl12xx_croc(wl, wlvif->role_id);
5274 old_state == IEEE80211_STA_NOTEXIST &&
5275 new_state == IEEE80211_STA_NONE) {
5276 if (find_first_bit(wl->roc_map,
5277 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5278 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5279 wl12xx_roc(wl, wlvif, wlvif->role_id,
5280 wlvif->band, wlvif->channel);
5286 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5287 struct ieee80211_vif *vif,
5288 struct ieee80211_sta *sta,
5289 enum ieee80211_sta_state old_state,
5290 enum ieee80211_sta_state new_state)
5292 struct wl1271 *wl = hw->priv;
5293 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5296 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5297 sta->aid, old_state, new_state);
5299 mutex_lock(&wl->mutex);
5301 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5306 ret = pm_runtime_get_sync(wl->dev);
5308 pm_runtime_put_noidle(wl->dev);
5312 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5314 pm_runtime_mark_last_busy(wl->dev);
5315 pm_runtime_put_autosuspend(wl->dev);
5317 mutex_unlock(&wl->mutex);
5318 if (new_state < old_state)
5323 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5324 struct ieee80211_vif *vif,
5325 struct ieee80211_ampdu_params *params)
5327 struct wl1271 *wl = hw->priv;
5328 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5330 u8 hlid, *ba_bitmap;
5331 struct ieee80211_sta *sta = params->sta;
5332 enum ieee80211_ampdu_mlme_action action = params->action;
5333 u16 tid = params->tid;
5334 u16 *ssn = ¶ms->ssn;
5336 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5339 /* sanity check - the fields in FW are only 8bits wide */
5340 if (WARN_ON(tid > 0xFF))
5343 mutex_lock(&wl->mutex);
5345 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5350 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5351 hlid = wlvif->sta.hlid;
5352 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5353 struct wl1271_station *wl_sta;
5355 wl_sta = (struct wl1271_station *)sta->drv_priv;
5356 hlid = wl_sta->hlid;
5362 ba_bitmap = &wl->links[hlid].ba_bitmap;
5364 ret = pm_runtime_get_sync(wl->dev);
5366 pm_runtime_put_noidle(wl->dev);
5370 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5374 case IEEE80211_AMPDU_RX_START:
5375 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5380 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5382 wl1271_error("exceeded max RX BA sessions");
5386 if (*ba_bitmap & BIT(tid)) {
5388 wl1271_error("cannot enable RX BA session on active "
5393 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5398 *ba_bitmap |= BIT(tid);
5399 wl->ba_rx_session_count++;
5403 case IEEE80211_AMPDU_RX_STOP:
5404 if (!(*ba_bitmap & BIT(tid))) {
5406 * this happens on reconfig - so only output a debug
5407 * message for now, and don't fail the function.
5409 wl1271_debug(DEBUG_MAC80211,
5410 "no active RX BA session on tid: %d",
5416 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5419 *ba_bitmap &= ~BIT(tid);
5420 wl->ba_rx_session_count--;
5425 * The BA initiator session management in FW independently.
5426 * Falling break here on purpose for all TX APDU commands.
5428 case IEEE80211_AMPDU_TX_START:
5429 case IEEE80211_AMPDU_TX_STOP_CONT:
5430 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5431 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5432 case IEEE80211_AMPDU_TX_OPERATIONAL:
5437 wl1271_error("Incorrect ampdu action id=%x\n", action);
5441 pm_runtime_mark_last_busy(wl->dev);
5442 pm_runtime_put_autosuspend(wl->dev);
5445 mutex_unlock(&wl->mutex);
5450 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5451 struct ieee80211_vif *vif,
5452 const struct cfg80211_bitrate_mask *mask)
5454 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5455 struct wl1271 *wl = hw->priv;
5458 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5459 mask->control[NL80211_BAND_2GHZ].legacy,
5460 mask->control[NL80211_BAND_5GHZ].legacy);
5462 mutex_lock(&wl->mutex);
5464 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5465 wlvif->bitrate_masks[i] =
5466 wl1271_tx_enabled_rates_get(wl,
5467 mask->control[i].legacy,
5470 if (unlikely(wl->state != WLCORE_STATE_ON))
5473 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5474 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5476 ret = pm_runtime_get_sync(wl->dev);
5478 pm_runtime_put_noidle(wl->dev);
5482 wl1271_set_band_rate(wl, wlvif);
5484 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5485 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5487 pm_runtime_mark_last_busy(wl->dev);
5488 pm_runtime_put_autosuspend(wl->dev);
5491 mutex_unlock(&wl->mutex);
5496 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5497 struct ieee80211_vif *vif,
5498 struct ieee80211_channel_switch *ch_switch)
5500 struct wl1271 *wl = hw->priv;
5501 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5504 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5506 wl1271_tx_flush(wl);
5508 mutex_lock(&wl->mutex);
5510 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5511 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5512 ieee80211_chswitch_done(vif, false);
5514 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5518 ret = pm_runtime_get_sync(wl->dev);
5520 pm_runtime_put_noidle(wl->dev);
5524 /* TODO: change mac80211 to pass vif as param */
5526 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5527 unsigned long delay_usec;
5529 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5533 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5535 /* indicate failure 5 seconds after channel switch time */
5536 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5538 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5539 usecs_to_jiffies(delay_usec) +
5540 msecs_to_jiffies(5000));
5544 pm_runtime_mark_last_busy(wl->dev);
5545 pm_runtime_put_autosuspend(wl->dev);
5548 mutex_unlock(&wl->mutex);
5551 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5552 struct wl12xx_vif *wlvif,
5555 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5556 struct sk_buff *beacon =
5557 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5562 return cfg80211_find_ie(eid,
5563 beacon->data + ieoffset,
5564 beacon->len - ieoffset);
5567 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5571 const struct ieee80211_channel_sw_ie *ie_csa;
5573 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5577 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5578 *csa_count = ie_csa->count;
5583 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5584 struct ieee80211_vif *vif,
5585 struct cfg80211_chan_def *chandef)
5587 struct wl1271 *wl = hw->priv;
5588 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5589 struct ieee80211_channel_switch ch_switch = {
5591 .chandef = *chandef,
5595 wl1271_debug(DEBUG_MAC80211,
5596 "mac80211 channel switch beacon (role %d)",
5599 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5601 wl1271_error("error getting beacon (for CSA counter)");
5605 mutex_lock(&wl->mutex);
5607 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5612 ret = pm_runtime_get_sync(wl->dev);
5614 pm_runtime_put_noidle(wl->dev);
5618 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5622 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5625 pm_runtime_mark_last_busy(wl->dev);
5626 pm_runtime_put_autosuspend(wl->dev);
5628 mutex_unlock(&wl->mutex);
5631 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5632 u32 queues, bool drop)
5634 struct wl1271 *wl = hw->priv;
5636 wl1271_tx_flush(wl);
5639 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5640 struct ieee80211_vif *vif,
5641 struct ieee80211_channel *chan,
5643 enum ieee80211_roc_type type)
5645 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5646 struct wl1271 *wl = hw->priv;
5647 int channel, active_roc, ret = 0;
5649 channel = ieee80211_frequency_to_channel(chan->center_freq);
5651 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5652 channel, wlvif->role_id);
5654 mutex_lock(&wl->mutex);
5656 if (unlikely(wl->state != WLCORE_STATE_ON))
5659 /* return EBUSY if we can't ROC right now */
5660 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5661 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5662 wl1271_warning("active roc on role %d", active_roc);
5667 ret = pm_runtime_get_sync(wl->dev);
5669 pm_runtime_put_noidle(wl->dev);
5673 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5678 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5679 msecs_to_jiffies(duration));
5681 pm_runtime_mark_last_busy(wl->dev);
5682 pm_runtime_put_autosuspend(wl->dev);
5684 mutex_unlock(&wl->mutex);
5688 static int __wlcore_roc_completed(struct wl1271 *wl)
5690 struct wl12xx_vif *wlvif;
5693 /* already completed */
5694 if (unlikely(!wl->roc_vif))
5697 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5699 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5702 ret = wl12xx_stop_dev(wl, wlvif);
5711 static int wlcore_roc_completed(struct wl1271 *wl)
5715 wl1271_debug(DEBUG_MAC80211, "roc complete");
5717 mutex_lock(&wl->mutex);
5719 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5724 ret = pm_runtime_get_sync(wl->dev);
5726 pm_runtime_put_noidle(wl->dev);
5730 ret = __wlcore_roc_completed(wl);
5732 pm_runtime_mark_last_busy(wl->dev);
5733 pm_runtime_put_autosuspend(wl->dev);
5735 mutex_unlock(&wl->mutex);
5740 static void wlcore_roc_complete_work(struct work_struct *work)
5742 struct delayed_work *dwork;
5746 dwork = to_delayed_work(work);
5747 wl = container_of(dwork, struct wl1271, roc_complete_work);
5749 ret = wlcore_roc_completed(wl);
5751 ieee80211_remain_on_channel_expired(wl->hw);
5754 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5756 struct wl1271 *wl = hw->priv;
5758 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5761 wl1271_tx_flush(wl);
5764 * we can't just flush_work here, because it might deadlock
5765 * (as we might get called from the same workqueue)
5767 cancel_delayed_work_sync(&wl->roc_complete_work);
5768 wlcore_roc_completed(wl);
5773 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5774 struct ieee80211_vif *vif,
5775 struct ieee80211_sta *sta,
5778 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5780 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5782 if (!(changed & IEEE80211_RC_BW_CHANGED))
5785 /* this callback is atomic, so schedule a new work */
5786 wlvif->rc_update_bw = sta->bandwidth;
5787 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5788 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5791 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5792 struct ieee80211_vif *vif,
5793 struct ieee80211_sta *sta,
5794 struct station_info *sinfo)
5796 struct wl1271 *wl = hw->priv;
5797 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5801 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5803 mutex_lock(&wl->mutex);
5805 if (unlikely(wl->state != WLCORE_STATE_ON))
5808 ret = pm_runtime_get_sync(wl->dev);
5810 pm_runtime_put_noidle(wl->dev);
5814 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5818 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5819 sinfo->signal = rssi_dbm;
5822 pm_runtime_mark_last_busy(wl->dev);
5823 pm_runtime_put_autosuspend(wl->dev);
5826 mutex_unlock(&wl->mutex);
5829 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5830 struct ieee80211_sta *sta)
5832 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5833 struct wl1271 *wl = hw->priv;
5834 u8 hlid = wl_sta->hlid;
5836 /* return in units of Kbps */
5837 return (wl->links[hlid].fw_rate_mbps * 1000);
5840 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5842 struct wl1271 *wl = hw->priv;
5845 mutex_lock(&wl->mutex);
5847 if (unlikely(wl->state != WLCORE_STATE_ON))
5850 /* packets are considered pending if in the TX queue or the FW */
5851 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5853 mutex_unlock(&wl->mutex);
5858 /* can't be const, mac80211 writes to this */
5859 static struct ieee80211_rate wl1271_rates[] = {
5861 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5862 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5864 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5865 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5866 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5868 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5869 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5870 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5872 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5873 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5874 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5876 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5877 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5879 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5880 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5882 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5883 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5885 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5886 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5888 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5891 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5894 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5897 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5901 /* can't be const, mac80211 writes to this */
5902 static struct ieee80211_channel wl1271_channels[] = {
5903 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5904 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5905 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5906 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5907 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5908 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5909 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5910 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5911 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5912 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5919 /* can't be const, mac80211 writes to this */
5920 static struct ieee80211_supported_band wl1271_band_2ghz = {
5921 .channels = wl1271_channels,
5922 .n_channels = ARRAY_SIZE(wl1271_channels),
5923 .bitrates = wl1271_rates,
5924 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5927 /* 5 GHz data rates for WL1273 */
5928 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5930 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5931 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5933 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5934 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5936 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5937 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5939 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5940 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5942 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5945 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5948 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5951 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5955 /* 5 GHz band channels for WL1273 */
5956 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5957 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5958 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5959 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5960 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5961 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5962 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5963 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5964 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5965 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5966 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5990 static struct ieee80211_supported_band wl1271_band_5ghz = {
5991 .channels = wl1271_channels_5ghz,
5992 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5993 .bitrates = wl1271_rates_5ghz,
5994 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5997 static const struct ieee80211_ops wl1271_ops = {
5998 .start = wl1271_op_start,
5999 .stop = wlcore_op_stop,
6000 .add_interface = wl1271_op_add_interface,
6001 .remove_interface = wl1271_op_remove_interface,
6002 .change_interface = wl12xx_op_change_interface,
6004 .suspend = wl1271_op_suspend,
6005 .resume = wl1271_op_resume,
6007 .config = wl1271_op_config,
6008 .prepare_multicast = wl1271_op_prepare_multicast,
6009 .configure_filter = wl1271_op_configure_filter,
6011 .set_key = wlcore_op_set_key,
6012 .hw_scan = wl1271_op_hw_scan,
6013 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6014 .sched_scan_start = wl1271_op_sched_scan_start,
6015 .sched_scan_stop = wl1271_op_sched_scan_stop,
6016 .bss_info_changed = wl1271_op_bss_info_changed,
6017 .set_frag_threshold = wl1271_op_set_frag_threshold,
6018 .set_rts_threshold = wl1271_op_set_rts_threshold,
6019 .conf_tx = wl1271_op_conf_tx,
6020 .get_tsf = wl1271_op_get_tsf,
6021 .get_survey = wl1271_op_get_survey,
6022 .sta_state = wl12xx_op_sta_state,
6023 .ampdu_action = wl1271_op_ampdu_action,
6024 .tx_frames_pending = wl1271_tx_frames_pending,
6025 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6026 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6027 .channel_switch = wl12xx_op_channel_switch,
6028 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6029 .flush = wlcore_op_flush,
6030 .remain_on_channel = wlcore_op_remain_on_channel,
6031 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6032 .add_chanctx = wlcore_op_add_chanctx,
6033 .remove_chanctx = wlcore_op_remove_chanctx,
6034 .change_chanctx = wlcore_op_change_chanctx,
6035 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6036 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6037 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6038 .sta_rc_update = wlcore_op_sta_rc_update,
6039 .sta_statistics = wlcore_op_sta_statistics,
6040 .get_expected_throughput = wlcore_op_get_expected_throughput,
6041 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6045 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6051 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6052 wl1271_error("Illegal RX rate from HW: %d", rate);
6056 idx = wl->band_rate_to_idx[band][rate];
6057 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6058 wl1271_error("Unsupported RX rate from HW: %d", rate);
6065 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6069 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6072 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6073 wl1271_warning("NIC part of the MAC address wraps around!");
6075 for (i = 0; i < wl->num_mac_addr; i++) {
6076 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6077 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6078 wl->addresses[i].addr[2] = (u8) oui;
6079 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6080 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6081 wl->addresses[i].addr[5] = (u8) nic;
6085 /* we may be one address short at the most */
6086 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6089 * turn on the LAA bit in the first address and use it as
6092 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6093 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6094 memcpy(&wl->addresses[idx], &wl->addresses[0],
6095 sizeof(wl->addresses[0]));
6097 wl->addresses[idx].addr[0] |= BIT(1);
6100 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6101 wl->hw->wiphy->addresses = wl->addresses;
6104 static int wl12xx_get_hw_info(struct wl1271 *wl)
6108 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6112 wl->fuse_oui_addr = 0;
6113 wl->fuse_nic_addr = 0;
6115 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6119 if (wl->ops->get_mac)
6120 ret = wl->ops->get_mac(wl);
6126 static int wl1271_register_hw(struct wl1271 *wl)
6129 u32 oui_addr = 0, nic_addr = 0;
6130 struct platform_device *pdev = wl->pdev;
6131 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6133 if (wl->mac80211_registered)
6136 if (wl->nvs_len >= 12) {
6137 /* NOTE: The wl->nvs->nvs element must be first, in
6138 * order to simplify the casting, we assume it is at
6139 * the beginning of the wl->nvs structure.
6141 u8 *nvs_ptr = (u8 *)wl->nvs;
6144 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6146 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6149 /* if the MAC address is zeroed in the NVS derive from fuse */
6150 if (oui_addr == 0 && nic_addr == 0) {
6151 oui_addr = wl->fuse_oui_addr;
6152 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6153 nic_addr = wl->fuse_nic_addr + 1;
6156 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6157 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6158 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6159 wl1271_warning("This default nvs file can be removed from the file system");
6161 wl1271_warning("Your device performance is not optimized.");
6162 wl1271_warning("Please use the calibrator tool to configure your device.");
6165 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6166 wl1271_warning("Fuse mac address is zero. using random mac");
6167 /* Use TI oui and a random nic */
6168 oui_addr = WLCORE_TI_OUI_ADDRESS;
6169 nic_addr = get_random_int();
6171 oui_addr = wl->fuse_oui_addr;
6172 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6173 nic_addr = wl->fuse_nic_addr + 1;
6177 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6179 ret = ieee80211_register_hw(wl->hw);
6181 wl1271_error("unable to register mac80211 hw: %d", ret);
6185 wl->mac80211_registered = true;
6187 wl1271_debugfs_init(wl);
6189 wl1271_notice("loaded");
6195 static void wl1271_unregister_hw(struct wl1271 *wl)
6198 wl1271_plt_stop(wl);
6200 ieee80211_unregister_hw(wl->hw);
6201 wl->mac80211_registered = false;
6205 static int wl1271_init_ieee80211(struct wl1271 *wl)
6208 static const u32 cipher_suites[] = {
6209 WLAN_CIPHER_SUITE_WEP40,
6210 WLAN_CIPHER_SUITE_WEP104,
6211 WLAN_CIPHER_SUITE_TKIP,
6212 WLAN_CIPHER_SUITE_CCMP,
6213 WL1271_CIPHER_SUITE_GEM,
6216 /* The tx descriptor buffer */
6217 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6219 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6220 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6223 /* FIXME: find a proper value */
6224 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6226 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6227 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6228 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6229 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6230 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6231 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6232 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6233 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6234 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6235 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6236 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6237 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6238 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6239 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6241 wl->hw->wiphy->cipher_suites = cipher_suites;
6242 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6244 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6245 BIT(NL80211_IFTYPE_AP) |
6246 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6247 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6248 #ifdef CONFIG_MAC80211_MESH
6249 BIT(NL80211_IFTYPE_MESH_POINT) |
6251 BIT(NL80211_IFTYPE_P2P_GO);
6253 wl->hw->wiphy->max_scan_ssids = 1;
6254 wl->hw->wiphy->max_sched_scan_ssids = 16;
6255 wl->hw->wiphy->max_match_sets = 16;
6257 * Maximum length of elements in scanning probe request templates
6258 * should be the maximum length possible for a template, without
6259 * the IEEE80211 header of the template
6261 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6262 sizeof(struct ieee80211_header);
6264 wl->hw->wiphy->max_sched_scan_reqs = 1;
6265 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6266 sizeof(struct ieee80211_header);
6268 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6270 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6271 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6272 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6274 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6276 /* make sure all our channels fit in the scanned_ch bitmask */
6277 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6278 ARRAY_SIZE(wl1271_channels_5ghz) >
6279 WL1271_MAX_CHANNELS);
6281 * clear channel flags from the previous usage
6282 * and restore max_power & max_antenna_gain values.
6284 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6285 wl1271_band_2ghz.channels[i].flags = 0;
6286 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6287 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6290 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6291 wl1271_band_5ghz.channels[i].flags = 0;
6292 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6293 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6297 * We keep local copies of the band structs because we need to
6298 * modify them on a per-device basis.
6300 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6301 sizeof(wl1271_band_2ghz));
6302 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6303 &wl->ht_cap[NL80211_BAND_2GHZ],
6304 sizeof(*wl->ht_cap));
6305 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6306 sizeof(wl1271_band_5ghz));
6307 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6308 &wl->ht_cap[NL80211_BAND_5GHZ],
6309 sizeof(*wl->ht_cap));
6311 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6312 &wl->bands[NL80211_BAND_2GHZ];
6313 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6314 &wl->bands[NL80211_BAND_5GHZ];
6317 * allow 4 queues per mac address we support +
6318 * 1 cab queue per mac + one global offchannel Tx queue
6320 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6322 /* the last queue is the offchannel queue */
6323 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6324 wl->hw->max_rates = 1;
6326 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6328 /* the FW answers probe-requests in AP-mode */
6329 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6330 wl->hw->wiphy->probe_resp_offload =
6331 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6332 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6333 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6335 /* allowed interface combinations */
6336 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6337 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6339 /* register vendor commands */
6340 wlcore_set_vendor_commands(wl->hw->wiphy);
6342 SET_IEEE80211_DEV(wl->hw, wl->dev);
6344 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6345 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6347 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6352 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6355 struct ieee80211_hw *hw;
6360 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6362 wl1271_error("could not alloc ieee80211_hw");
6368 memset(wl, 0, sizeof(*wl));
6370 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6372 wl1271_error("could not alloc wl priv");
6374 goto err_priv_alloc;
6377 INIT_LIST_HEAD(&wl->wlvif_list);
6382 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6383 * we don't allocate any additional resource here, so that's fine.
6385 for (i = 0; i < NUM_TX_QUEUES; i++)
6386 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6387 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6389 skb_queue_head_init(&wl->deferred_rx_queue);
6390 skb_queue_head_init(&wl->deferred_tx_queue);
6392 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6393 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6394 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6395 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6396 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6397 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6399 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6400 if (!wl->freezable_wq) {
6407 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6408 wl->band = NL80211_BAND_2GHZ;
6409 wl->channel_type = NL80211_CHAN_NO_HT;
6411 wl->sg_enabled = true;
6412 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6413 wl->recovery_count = 0;
6416 wl->ap_fw_ps_map = 0;
6418 wl->system_hlid = WL12XX_SYSTEM_HLID;
6419 wl->active_sta_count = 0;
6420 wl->active_link_count = 0;
6423 /* The system link is always allocated */
6424 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6426 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6427 for (i = 0; i < wl->num_tx_desc; i++)
6428 wl->tx_frames[i] = NULL;
6430 spin_lock_init(&wl->wl_lock);
6432 wl->state = WLCORE_STATE_OFF;
6433 wl->fw_type = WL12XX_FW_TYPE_NONE;
6434 mutex_init(&wl->mutex);
6435 mutex_init(&wl->flush_mutex);
6436 init_completion(&wl->nvs_loading_complete);
6438 order = get_order(aggr_buf_size);
6439 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6440 if (!wl->aggr_buf) {
6444 wl->aggr_buf_size = aggr_buf_size;
6446 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6447 if (!wl->dummy_packet) {
6452 /* Allocate one page for the FW log */
6453 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6456 goto err_dummy_packet;
6459 wl->mbox_size = mbox_size;
6460 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6466 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6467 if (!wl->buffer_32) {
6478 free_page((unsigned long)wl->fwlog);
6481 dev_kfree_skb(wl->dummy_packet);
6484 free_pages((unsigned long)wl->aggr_buf, order);
6487 destroy_workqueue(wl->freezable_wq);
6490 wl1271_debugfs_exit(wl);
6494 ieee80211_free_hw(hw);
6498 return ERR_PTR(ret);
6500 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6502 int wlcore_free_hw(struct wl1271 *wl)
6504 /* Unblock any fwlog readers */
6505 mutex_lock(&wl->mutex);
6506 wl->fwlog_size = -1;
6507 mutex_unlock(&wl->mutex);
6509 wlcore_sysfs_free(wl);
6511 kfree(wl->buffer_32);
6513 free_page((unsigned long)wl->fwlog);
6514 dev_kfree_skb(wl->dummy_packet);
6515 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6517 wl1271_debugfs_exit(wl);
6521 wl->fw_type = WL12XX_FW_TYPE_NONE;
6525 kfree(wl->raw_fw_status);
6526 kfree(wl->fw_status);
6527 kfree(wl->tx_res_if);
6528 destroy_workqueue(wl->freezable_wq);
6531 ieee80211_free_hw(wl->hw);
6535 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6538 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6539 .flags = WIPHY_WOWLAN_ANY,
6540 .n_patterns = WL1271_MAX_RX_FILTERS,
6541 .pattern_min_len = 1,
6542 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6546 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6548 return IRQ_WAKE_THREAD;
6551 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6553 struct wl1271 *wl = context;
6554 struct platform_device *pdev = wl->pdev;
6555 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6556 struct resource *res;
6559 irq_handler_t hardirq_fn = NULL;
6562 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6564 wl1271_error("Could not allocate nvs data");
6567 wl->nvs_len = fw->size;
6568 } else if (pdev_data->family->nvs_name) {
6569 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6570 pdev_data->family->nvs_name);
6578 ret = wl->ops->setup(wl);
6582 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6584 /* adjust some runtime configuration parameters */
6585 wlcore_adjust_conf(wl);
6587 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6589 wl1271_error("Could not get IRQ resource");
6593 wl->irq = res->start;
6594 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6595 wl->if_ops = pdev_data->if_ops;
6597 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6598 hardirq_fn = wlcore_hardirq;
6600 wl->irq_flags |= IRQF_ONESHOT;
6602 ret = wl12xx_set_power_on(wl);
6606 ret = wl12xx_get_hw_info(wl);
6608 wl1271_error("couldn't get hw info");
6609 wl1271_power_off(wl);
6613 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6614 wl->irq_flags, pdev->name, wl);
6616 wl1271_error("interrupt configuration failed");
6617 wl1271_power_off(wl);
6622 ret = enable_irq_wake(wl->irq);
6624 wl->irq_wake_enabled = true;
6625 device_init_wakeup(wl->dev, 1);
6626 if (pdev_data->pwr_in_suspend)
6627 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6630 disable_irq(wl->irq);
6631 wl1271_power_off(wl);
6633 ret = wl->ops->identify_chip(wl);
6637 ret = wl1271_init_ieee80211(wl);
6641 ret = wl1271_register_hw(wl);
6645 ret = wlcore_sysfs_init(wl);
6649 wl->initialized = true;
6653 wl1271_unregister_hw(wl);
6656 free_irq(wl->irq, wl);
6662 release_firmware(fw);
6663 complete_all(&wl->nvs_loading_complete);
6666 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6668 struct wl1271 *wl = dev_get_drvdata(dev);
6669 struct wl12xx_vif *wlvif;
6672 /* We do not enter elp sleep in PLT mode */
6676 /* Nothing to do if no ELP mode requested */
6677 if (wl->sleep_auth != WL1271_PSM_ELP)
6680 wl12xx_for_each_wlvif(wl, wlvif) {
6681 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6682 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6686 wl1271_debug(DEBUG_PSM, "chip to elp");
6687 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6689 wl12xx_queue_recovery_work(wl);
6694 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6699 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6701 struct wl1271 *wl = dev_get_drvdata(dev);
6702 DECLARE_COMPLETION_ONSTACK(compl);
6703 unsigned long flags;
6705 unsigned long start_time = jiffies;
6706 bool pending = false;
6707 bool recovery = false;
6709 /* Nothing to do if no ELP mode requested */
6710 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6713 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6715 spin_lock_irqsave(&wl->wl_lock, flags);
6716 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6719 wl->elp_compl = &compl;
6720 spin_unlock_irqrestore(&wl->wl_lock, flags);
6722 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6729 ret = wait_for_completion_timeout(&compl,
6730 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6732 wl1271_warning("ELP wakeup timeout!");
6734 /* Return no error for runtime PM for recovery */
6741 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6743 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6744 jiffies_to_msecs(jiffies - start_time));
6749 spin_lock_irqsave(&wl->wl_lock, flags);
6750 wl->elp_compl = NULL;
6751 spin_unlock_irqrestore(&wl->wl_lock, flags);
6754 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6755 wl12xx_queue_recovery_work(wl);
6761 static const struct dev_pm_ops wlcore_pm_ops = {
6762 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6763 wlcore_runtime_resume,
6767 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6769 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6770 const char *nvs_name;
6773 if (!wl->ops || !wl->ptable || !pdev_data)
6776 wl->dev = &pdev->dev;
6778 platform_set_drvdata(pdev, wl);
6780 if (pdev_data->family && pdev_data->family->nvs_name) {
6781 nvs_name = pdev_data->family->nvs_name;
6782 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6783 nvs_name, &pdev->dev, GFP_KERNEL,
6786 wl1271_error("request_firmware_nowait failed for %s: %d",
6788 complete_all(&wl->nvs_loading_complete);
6791 wlcore_nvs_cb(NULL, wl);
6794 wl->dev->driver->pm = &wlcore_pm_ops;
6795 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6796 pm_runtime_use_autosuspend(wl->dev);
6797 pm_runtime_enable(wl->dev);
6801 EXPORT_SYMBOL_GPL(wlcore_probe);
6803 int wlcore_remove(struct platform_device *pdev)
6805 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6806 struct wl1271 *wl = platform_get_drvdata(pdev);
6809 error = pm_runtime_get_sync(wl->dev);
6811 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6813 wl->dev->driver->pm = NULL;
6815 if (pdev_data->family && pdev_data->family->nvs_name)
6816 wait_for_completion(&wl->nvs_loading_complete);
6817 if (!wl->initialized)
6820 if (wl->irq_wake_enabled) {
6821 device_init_wakeup(wl->dev, 0);
6822 disable_irq_wake(wl->irq);
6824 wl1271_unregister_hw(wl);
6826 pm_runtime_put_sync(wl->dev);
6827 pm_runtime_dont_use_autosuspend(wl->dev);
6828 pm_runtime_disable(wl->dev);
6830 free_irq(wl->irq, wl);
6835 EXPORT_SYMBOL_GPL(wlcore_remove);
6837 u32 wl12xx_debug_level = DEBUG_NONE;
6838 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6839 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6840 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6842 module_param_named(fwlog, fwlog_param, charp, 0);
6843 MODULE_PARM_DESC(fwlog,
6844 "FW logger options: continuous, dbgpins or disable");
6846 module_param(fwlog_mem_blocks, int, 0600);
6847 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6849 module_param(bug_on_recovery, int, 0600);
6850 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6852 module_param(no_recovery, int, 0600);
6853 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6855 MODULE_LICENSE("GPL");
6856 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6857 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");